Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* net/core/xdp.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6#include <linux/bpf.h>
7#include <linux/filter.h>
8#include <linux/types.h>
9#include <linux/mm.h>
10#include <linux/netdevice.h>
11#include <linux/slab.h>
12#include <linux/idr.h>
13#include <linux/rhashtable.h>
14#include <linux/bug.h>
15#include <net/page_pool.h>
16
17#include <net/xdp.h>
18#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19#include <trace/events/xdp.h>
20#include <net/xdp_sock_drv.h>
21
22#define REG_STATE_NEW 0x0
23#define REG_STATE_REGISTERED 0x1
24#define REG_STATE_UNREGISTERED 0x2
25#define REG_STATE_UNUSED 0x3
26
27static DEFINE_IDA(mem_id_pool);
28static DEFINE_MUTEX(mem_id_lock);
29#define MEM_ID_MAX 0xFFFE
30#define MEM_ID_MIN 1
31static int mem_id_next = MEM_ID_MIN;
32
33static bool mem_id_init; /* false */
34static struct rhashtable *mem_id_ht;
35
36static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
37{
38 const u32 *k = data;
39 const u32 key = *k;
40
41 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
42 != sizeof(u32));
43
44 /* Use cyclic increasing ID as direct hash key */
45 return key;
46}
47
48static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
49 const void *ptr)
50{
51 const struct xdp_mem_allocator *xa = ptr;
52 u32 mem_id = *(u32 *)arg->key;
53
54 return xa->mem.id != mem_id;
55}
56
57static const struct rhashtable_params mem_id_rht_params = {
58 .nelem_hint = 64,
59 .head_offset = offsetof(struct xdp_mem_allocator, node),
60 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
61 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
62 .max_size = MEM_ID_MAX,
63 .min_size = 8,
64 .automatic_shrinking = true,
65 .hashfn = xdp_mem_id_hashfn,
66 .obj_cmpfn = xdp_mem_id_cmp,
67};
68
69static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70{
71 struct xdp_mem_allocator *xa;
72
73 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74
75 /* Allow this ID to be reused */
76 ida_simple_remove(&mem_id_pool, xa->mem.id);
77
78 kfree(xa);
79}
80
81static void mem_xa_remove(struct xdp_mem_allocator *xa)
82{
83 trace_mem_disconnect(xa);
84
85 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
87}
88
89static void mem_allocator_disconnect(void *allocator)
90{
91 struct xdp_mem_allocator *xa;
92 struct rhashtable_iter iter;
93
94 mutex_lock(&mem_id_lock);
95
96 rhashtable_walk_enter(mem_id_ht, &iter);
97 do {
98 rhashtable_walk_start(&iter);
99
100 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
101 if (xa->allocator == allocator)
102 mem_xa_remove(xa);
103 }
104
105 rhashtable_walk_stop(&iter);
106
107 } while (xa == ERR_PTR(-EAGAIN));
108 rhashtable_walk_exit(&iter);
109
110 mutex_unlock(&mem_id_lock);
111}
112
113void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
114{
115 struct xdp_mem_allocator *xa;
116 int type = xdp_rxq->mem.type;
117 int id = xdp_rxq->mem.id;
118
119 /* Reset mem info to defaults */
120 xdp_rxq->mem.id = 0;
121 xdp_rxq->mem.type = 0;
122
123 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
124 WARN(1, "Missing register, driver bug");
125 return;
126 }
127
128 if (id == 0)
129 return;
130
131 if (type == MEM_TYPE_PAGE_POOL) {
132 rcu_read_lock();
133 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
134 page_pool_destroy(xa->page_pool);
135 rcu_read_unlock();
136 }
137}
138EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
139
140void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
141{
142 /* Simplify driver cleanup code paths, allow unreg "unused" */
143 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
144 return;
145
146 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
147
148 xdp_rxq_info_unreg_mem_model(xdp_rxq);
149
150 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
151 xdp_rxq->dev = NULL;
152}
153EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
154
155static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
156{
157 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
158}
159
160/* Returns 0 on success, negative on failure */
161int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
162 struct net_device *dev, u32 queue_index, unsigned int napi_id)
163{
164 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
165 WARN(1, "Driver promised not to register this");
166 return -EINVAL;
167 }
168
169 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
170 WARN(1, "Missing unregister, handled but fix driver");
171 xdp_rxq_info_unreg(xdp_rxq);
172 }
173
174 if (!dev) {
175 WARN(1, "Missing net_device from driver");
176 return -ENODEV;
177 }
178
179 /* State either UNREGISTERED or NEW */
180 xdp_rxq_info_init(xdp_rxq);
181 xdp_rxq->dev = dev;
182 xdp_rxq->queue_index = queue_index;
183 xdp_rxq->napi_id = napi_id;
184
185 xdp_rxq->reg_state = REG_STATE_REGISTERED;
186 return 0;
187}
188EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
189
190void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
191{
192 xdp_rxq->reg_state = REG_STATE_UNUSED;
193}
194EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
195
196bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
197{
198 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
199}
200EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
201
202static int __mem_id_init_hash_table(void)
203{
204 struct rhashtable *rht;
205 int ret;
206
207 if (unlikely(mem_id_init))
208 return 0;
209
210 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
211 if (!rht)
212 return -ENOMEM;
213
214 ret = rhashtable_init(rht, &mem_id_rht_params);
215 if (ret < 0) {
216 kfree(rht);
217 return ret;
218 }
219 mem_id_ht = rht;
220 smp_mb(); /* mutex lock should provide enough pairing */
221 mem_id_init = true;
222
223 return 0;
224}
225
226/* Allocate a cyclic ID that maps to allocator pointer.
227 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
228 *
229 * Caller must lock mem_id_lock.
230 */
231static int __mem_id_cyclic_get(gfp_t gfp)
232{
233 int retries = 1;
234 int id;
235
236again:
237 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
238 if (id < 0) {
239 if (id == -ENOSPC) {
240 /* Cyclic allocator, reset next id */
241 if (retries--) {
242 mem_id_next = MEM_ID_MIN;
243 goto again;
244 }
245 }
246 return id; /* errno */
247 }
248 mem_id_next = id + 1;
249
250 return id;
251}
252
253static bool __is_supported_mem_type(enum xdp_mem_type type)
254{
255 if (type == MEM_TYPE_PAGE_POOL)
256 return is_page_pool_compiled_in();
257
258 if (type >= MEM_TYPE_MAX)
259 return false;
260
261 return true;
262}
263
264int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
265 enum xdp_mem_type type, void *allocator)
266{
267 struct xdp_mem_allocator *xdp_alloc;
268 gfp_t gfp = GFP_KERNEL;
269 int id, errno, ret;
270 void *ptr;
271
272 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
273 WARN(1, "Missing register, driver bug");
274 return -EFAULT;
275 }
276
277 if (!__is_supported_mem_type(type))
278 return -EOPNOTSUPP;
279
280 xdp_rxq->mem.type = type;
281
282 if (!allocator) {
283 if (type == MEM_TYPE_PAGE_POOL)
284 return -EINVAL; /* Setup time check page_pool req */
285 return 0;
286 }
287
288 /* Delay init of rhashtable to save memory if feature isn't used */
289 if (!mem_id_init) {
290 mutex_lock(&mem_id_lock);
291 ret = __mem_id_init_hash_table();
292 mutex_unlock(&mem_id_lock);
293 if (ret < 0) {
294 WARN_ON(1);
295 return ret;
296 }
297 }
298
299 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
300 if (!xdp_alloc)
301 return -ENOMEM;
302
303 mutex_lock(&mem_id_lock);
304 id = __mem_id_cyclic_get(gfp);
305 if (id < 0) {
306 errno = id;
307 goto err;
308 }
309 xdp_rxq->mem.id = id;
310 xdp_alloc->mem = xdp_rxq->mem;
311 xdp_alloc->allocator = allocator;
312
313 /* Insert allocator into ID lookup table */
314 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
315 if (IS_ERR(ptr)) {
316 ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
317 xdp_rxq->mem.id = 0;
318 errno = PTR_ERR(ptr);
319 goto err;
320 }
321
322 if (type == MEM_TYPE_PAGE_POOL)
323 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
324
325 mutex_unlock(&mem_id_lock);
326
327 trace_mem_connect(xdp_alloc, xdp_rxq);
328 return 0;
329err:
330 mutex_unlock(&mem_id_lock);
331 kfree(xdp_alloc);
332 return errno;
333}
334EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
335
336/* XDP RX runs under NAPI protection, and in different delivery error
337 * scenarios (e.g. queue full), it is possible to return the xdp_frame
338 * while still leveraging this protection. The @napi_direct boolean
339 * is used for those calls sites. Thus, allowing for faster recycling
340 * of xdp_frames/pages in those cases.
341 */
342static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
343 struct xdp_buff *xdp)
344{
345 struct xdp_mem_allocator *xa;
346 struct page *page;
347
348 switch (mem->type) {
349 case MEM_TYPE_PAGE_POOL:
350 rcu_read_lock();
351 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
352 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
353 page = virt_to_head_page(data);
354 if (napi_direct && xdp_return_frame_no_direct())
355 napi_direct = false;
356 page_pool_put_full_page(xa->page_pool, page, napi_direct);
357 rcu_read_unlock();
358 break;
359 case MEM_TYPE_PAGE_SHARED:
360 page_frag_free(data);
361 break;
362 case MEM_TYPE_PAGE_ORDER0:
363 page = virt_to_page(data); /* Assumes order0 page*/
364 put_page(page);
365 break;
366 case MEM_TYPE_XSK_BUFF_POOL:
367 /* NB! Only valid from an xdp_buff! */
368 xsk_buff_free(xdp);
369 break;
370 default:
371 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
372 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
373 break;
374 }
375}
376
377void xdp_return_frame(struct xdp_frame *xdpf)
378{
379 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
380}
381EXPORT_SYMBOL_GPL(xdp_return_frame);
382
383void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
384{
385 __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
386}
387EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
388
389/* XDP bulk APIs introduce a defer/flush mechanism to return
390 * pages belonging to the same xdp_mem_allocator object
391 * (identified via the mem.id field) in bulk to optimize
392 * I-cache and D-cache.
393 * The bulk queue size is set to 16 to be aligned to how
394 * XDP_REDIRECT bulking works. The bulk is flushed when
395 * it is full or when mem.id changes.
396 * xdp_frame_bulk is usually stored/allocated on the function
397 * call-stack to avoid locking penalties.
398 */
399void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
400{
401 struct xdp_mem_allocator *xa = bq->xa;
402
403 if (unlikely(!xa || !bq->count))
404 return;
405
406 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
407 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
408 bq->count = 0;
409}
410EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
411
412/* Must be called with rcu_read_lock held */
413void xdp_return_frame_bulk(struct xdp_frame *xdpf,
414 struct xdp_frame_bulk *bq)
415{
416 struct xdp_mem_info *mem = &xdpf->mem;
417 struct xdp_mem_allocator *xa;
418
419 if (mem->type != MEM_TYPE_PAGE_POOL) {
420 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
421 return;
422 }
423
424 xa = bq->xa;
425 if (unlikely(!xa)) {
426 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
427 bq->count = 0;
428 bq->xa = xa;
429 }
430
431 if (bq->count == XDP_BULK_QUEUE_SIZE)
432 xdp_flush_frame_bulk(bq);
433
434 if (unlikely(mem->id != xa->mem.id)) {
435 xdp_flush_frame_bulk(bq);
436 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
437 }
438
439 bq->q[bq->count++] = xdpf->data;
440}
441EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
442
443void xdp_return_buff(struct xdp_buff *xdp)
444{
445 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
446}
447
448/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
449void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
450{
451 struct xdp_mem_allocator *xa;
452 struct page *page;
453
454 rcu_read_lock();
455 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
456 page = virt_to_head_page(data);
457 if (xa)
458 page_pool_release_page(xa->page_pool, page);
459 rcu_read_unlock();
460}
461EXPORT_SYMBOL_GPL(__xdp_release_frame);
462
463void xdp_attachment_setup(struct xdp_attachment_info *info,
464 struct netdev_bpf *bpf)
465{
466 if (info->prog)
467 bpf_prog_put(info->prog);
468 info->prog = bpf->prog;
469 info->flags = bpf->flags;
470}
471EXPORT_SYMBOL_GPL(xdp_attachment_setup);
472
473struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
474{
475 unsigned int metasize, totsize;
476 void *addr, *data_to_copy;
477 struct xdp_frame *xdpf;
478 struct page *page;
479
480 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
481 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
482 xdp->data - xdp->data_meta;
483 totsize = xdp->data_end - xdp->data + metasize;
484
485 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
486 return NULL;
487
488 page = dev_alloc_page();
489 if (!page)
490 return NULL;
491
492 addr = page_to_virt(page);
493 xdpf = addr;
494 memset(xdpf, 0, sizeof(*xdpf));
495
496 addr += sizeof(*xdpf);
497 data_to_copy = metasize ? xdp->data_meta : xdp->data;
498 memcpy(addr, data_to_copy, totsize);
499
500 xdpf->data = addr + metasize;
501 xdpf->len = totsize - metasize;
502 xdpf->headroom = 0;
503 xdpf->metasize = metasize;
504 xdpf->frame_sz = PAGE_SIZE;
505 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
506
507 xsk_buff_free(xdp);
508 return xdpf;
509}
510EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
511
512/* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
513void xdp_warn(const char *msg, const char *func, const int line)
514{
515 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
516};
517EXPORT_SYMBOL_GPL(xdp_warn);
518
519int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
520{
521 n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
522 n_skb, skbs);
523 if (unlikely(!n_skb))
524 return -ENOMEM;
525
526 return 0;
527}
528EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
529
530struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
531 struct sk_buff *skb,
532 struct net_device *dev)
533{
534 unsigned int headroom, frame_size;
535 void *hard_start;
536
537 /* Part of headroom was reserved to xdpf */
538 headroom = sizeof(*xdpf) + xdpf->headroom;
539
540 /* Memory size backing xdp_frame data already have reserved
541 * room for build_skb to place skb_shared_info in tailroom.
542 */
543 frame_size = xdpf->frame_sz;
544
545 hard_start = xdpf->data - headroom;
546 skb = build_skb_around(skb, hard_start, frame_size);
547 if (unlikely(!skb))
548 return NULL;
549
550 skb_reserve(skb, headroom);
551 __skb_put(skb, xdpf->len);
552 if (xdpf->metasize)
553 skb_metadata_set(skb, xdpf->metasize);
554
555 /* Essential SKB info: protocol and skb->dev */
556 skb->protocol = eth_type_trans(skb, dev);
557
558 /* Optional SKB info, currently missing:
559 * - HW checksum info (skb->ip_summed)
560 * - HW RX hash (skb_set_hash)
561 * - RX ring dev queue index (skb_record_rx_queue)
562 */
563
564 /* Until page_pool get SKB return path, release DMA here */
565 xdp_release_frame(xdpf);
566
567 /* Allow SKB to reuse area used by xdp_frame */
568 xdp_scrub_frame(xdpf);
569
570 return skb;
571}
572EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
573
574struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
575 struct net_device *dev)
576{
577 struct sk_buff *skb;
578
579 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
580 if (unlikely(!skb))
581 return NULL;
582
583 memset(skb, 0, offsetof(struct sk_buff, tail));
584
585 return __xdp_build_skb_from_frame(xdpf, skb, dev);
586}
587EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
588
589struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
590{
591 unsigned int headroom, totalsize;
592 struct xdp_frame *nxdpf;
593 struct page *page;
594 void *addr;
595
596 headroom = xdpf->headroom + sizeof(*xdpf);
597 totalsize = headroom + xdpf->len;
598
599 if (unlikely(totalsize > PAGE_SIZE))
600 return NULL;
601 page = dev_alloc_page();
602 if (!page)
603 return NULL;
604 addr = page_to_virt(page);
605
606 memcpy(addr, xdpf, totalsize);
607
608 nxdpf = addr;
609 nxdpf->data = addr + headroom;
610 nxdpf->frame_sz = PAGE_SIZE;
611 nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
612 nxdpf->mem.id = 0;
613
614 return nxdpf;
615}
1// SPDX-License-Identifier: GPL-2.0-only
2/* net/core/xdp.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6#include <linux/bpf.h>
7#include <linux/filter.h>
8#include <linux/types.h>
9#include <linux/mm.h>
10#include <linux/netdevice.h>
11#include <linux/slab.h>
12#include <linux/idr.h>
13#include <linux/rhashtable.h>
14#include <net/page_pool.h>
15
16#include <net/xdp.h>
17#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
18#include <trace/events/xdp.h>
19
20#define REG_STATE_NEW 0x0
21#define REG_STATE_REGISTERED 0x1
22#define REG_STATE_UNREGISTERED 0x2
23#define REG_STATE_UNUSED 0x3
24
25static DEFINE_IDA(mem_id_pool);
26static DEFINE_MUTEX(mem_id_lock);
27#define MEM_ID_MAX 0xFFFE
28#define MEM_ID_MIN 1
29static int mem_id_next = MEM_ID_MIN;
30
31static bool mem_id_init; /* false */
32static struct rhashtable *mem_id_ht;
33
34static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
35{
36 const u32 *k = data;
37 const u32 key = *k;
38
39 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
40 != sizeof(u32));
41
42 /* Use cyclic increasing ID as direct hash key */
43 return key;
44}
45
46static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
47 const void *ptr)
48{
49 const struct xdp_mem_allocator *xa = ptr;
50 u32 mem_id = *(u32 *)arg->key;
51
52 return xa->mem.id != mem_id;
53}
54
55static const struct rhashtable_params mem_id_rht_params = {
56 .nelem_hint = 64,
57 .head_offset = offsetof(struct xdp_mem_allocator, node),
58 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
59 .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id),
60 .max_size = MEM_ID_MAX,
61 .min_size = 8,
62 .automatic_shrinking = true,
63 .hashfn = xdp_mem_id_hashfn,
64 .obj_cmpfn = xdp_mem_id_cmp,
65};
66
67static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
68{
69 struct xdp_mem_allocator *xa;
70
71 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
72
73 /* Allocator have indicated safe to remove before this is called */
74 if (xa->mem.type == MEM_TYPE_PAGE_POOL)
75 page_pool_free(xa->page_pool);
76
77 /* Allow this ID to be reused */
78 ida_simple_remove(&mem_id_pool, xa->mem.id);
79
80 /* Poison memory */
81 xa->mem.id = 0xFFFF;
82 xa->mem.type = 0xF0F0;
83 xa->allocator = (void *)0xDEAD9001;
84
85 kfree(xa);
86}
87
88static bool __mem_id_disconnect(int id, bool force)
89{
90 struct xdp_mem_allocator *xa;
91 bool safe_to_remove = true;
92
93 mutex_lock(&mem_id_lock);
94
95 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
96 if (!xa) {
97 mutex_unlock(&mem_id_lock);
98 WARN(1, "Request remove non-existing id(%d), driver bug?", id);
99 return true;
100 }
101 xa->disconnect_cnt++;
102
103 /* Detects in-flight packet-pages for page_pool */
104 if (xa->mem.type == MEM_TYPE_PAGE_POOL)
105 safe_to_remove = page_pool_request_shutdown(xa->page_pool);
106
107 trace_mem_disconnect(xa, safe_to_remove, force);
108
109 if ((safe_to_remove || force) &&
110 !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
111 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
112
113 mutex_unlock(&mem_id_lock);
114 return (safe_to_remove|force);
115}
116
117#define DEFER_TIME (msecs_to_jiffies(1000))
118#define DEFER_WARN_INTERVAL (30 * HZ)
119#define DEFER_MAX_RETRIES 120
120
121static void mem_id_disconnect_defer_retry(struct work_struct *wq)
122{
123 struct delayed_work *dwq = to_delayed_work(wq);
124 struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq);
125 bool force = false;
126
127 if (xa->disconnect_cnt > DEFER_MAX_RETRIES)
128 force = true;
129
130 if (__mem_id_disconnect(xa->mem.id, force))
131 return;
132
133 /* Periodic warning */
134 if (time_after_eq(jiffies, xa->defer_warn)) {
135 int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ;
136
137 pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n",
138 __func__, xa->mem.id, xa->disconnect_cnt, sec);
139 xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
140 }
141
142 /* Still not ready to be disconnected, retry later */
143 schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
144}
145
146void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
147{
148 struct xdp_mem_allocator *xa;
149 int id = xdp_rxq->mem.id;
150
151 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
152 WARN(1, "Missing register, driver bug");
153 return;
154 }
155
156 if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL &&
157 xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) {
158 return;
159 }
160
161 if (id == 0)
162 return;
163
164 if (__mem_id_disconnect(id, false))
165 return;
166
167 /* Could not disconnect, defer new disconnect attempt to later */
168 mutex_lock(&mem_id_lock);
169
170 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
171 if (!xa) {
172 mutex_unlock(&mem_id_lock);
173 return;
174 }
175 xa->defer_start = jiffies;
176 xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
177
178 INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry);
179 mutex_unlock(&mem_id_lock);
180 schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
181}
182EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
183
184/* This unregister operation will also cleanup and destroy the
185 * allocator. The page_pool_free() operation is first called when it's
186 * safe to remove, possibly deferred to a workqueue.
187 */
188void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
189{
190 /* Simplify driver cleanup code paths, allow unreg "unused" */
191 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
192 return;
193
194 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
195
196 xdp_rxq_info_unreg_mem_model(xdp_rxq);
197
198 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
199 xdp_rxq->dev = NULL;
200
201 /* Reset mem info to defaults */
202 xdp_rxq->mem.id = 0;
203 xdp_rxq->mem.type = 0;
204}
205EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
206
207static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
208{
209 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
210}
211
212/* Returns 0 on success, negative on failure */
213int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
214 struct net_device *dev, u32 queue_index)
215{
216 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
217 WARN(1, "Driver promised not to register this");
218 return -EINVAL;
219 }
220
221 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
222 WARN(1, "Missing unregister, handled but fix driver");
223 xdp_rxq_info_unreg(xdp_rxq);
224 }
225
226 if (!dev) {
227 WARN(1, "Missing net_device from driver");
228 return -ENODEV;
229 }
230
231 /* State either UNREGISTERED or NEW */
232 xdp_rxq_info_init(xdp_rxq);
233 xdp_rxq->dev = dev;
234 xdp_rxq->queue_index = queue_index;
235
236 xdp_rxq->reg_state = REG_STATE_REGISTERED;
237 return 0;
238}
239EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
240
241void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
242{
243 xdp_rxq->reg_state = REG_STATE_UNUSED;
244}
245EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
246
247bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
248{
249 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
250}
251EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
252
253static int __mem_id_init_hash_table(void)
254{
255 struct rhashtable *rht;
256 int ret;
257
258 if (unlikely(mem_id_init))
259 return 0;
260
261 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
262 if (!rht)
263 return -ENOMEM;
264
265 ret = rhashtable_init(rht, &mem_id_rht_params);
266 if (ret < 0) {
267 kfree(rht);
268 return ret;
269 }
270 mem_id_ht = rht;
271 smp_mb(); /* mutex lock should provide enough pairing */
272 mem_id_init = true;
273
274 return 0;
275}
276
277/* Allocate a cyclic ID that maps to allocator pointer.
278 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
279 *
280 * Caller must lock mem_id_lock.
281 */
282static int __mem_id_cyclic_get(gfp_t gfp)
283{
284 int retries = 1;
285 int id;
286
287again:
288 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
289 if (id < 0) {
290 if (id == -ENOSPC) {
291 /* Cyclic allocator, reset next id */
292 if (retries--) {
293 mem_id_next = MEM_ID_MIN;
294 goto again;
295 }
296 }
297 return id; /* errno */
298 }
299 mem_id_next = id + 1;
300
301 return id;
302}
303
304static bool __is_supported_mem_type(enum xdp_mem_type type)
305{
306 if (type == MEM_TYPE_PAGE_POOL)
307 return is_page_pool_compiled_in();
308
309 if (type >= MEM_TYPE_MAX)
310 return false;
311
312 return true;
313}
314
315int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
316 enum xdp_mem_type type, void *allocator)
317{
318 struct xdp_mem_allocator *xdp_alloc;
319 gfp_t gfp = GFP_KERNEL;
320 int id, errno, ret;
321 void *ptr;
322
323 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
324 WARN(1, "Missing register, driver bug");
325 return -EFAULT;
326 }
327
328 if (!__is_supported_mem_type(type))
329 return -EOPNOTSUPP;
330
331 xdp_rxq->mem.type = type;
332
333 if (!allocator) {
334 if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY)
335 return -EINVAL; /* Setup time check page_pool req */
336 return 0;
337 }
338
339 /* Delay init of rhashtable to save memory if feature isn't used */
340 if (!mem_id_init) {
341 mutex_lock(&mem_id_lock);
342 ret = __mem_id_init_hash_table();
343 mutex_unlock(&mem_id_lock);
344 if (ret < 0) {
345 WARN_ON(1);
346 return ret;
347 }
348 }
349
350 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
351 if (!xdp_alloc)
352 return -ENOMEM;
353
354 mutex_lock(&mem_id_lock);
355 id = __mem_id_cyclic_get(gfp);
356 if (id < 0) {
357 errno = id;
358 goto err;
359 }
360 xdp_rxq->mem.id = id;
361 xdp_alloc->mem = xdp_rxq->mem;
362 xdp_alloc->allocator = allocator;
363
364 /* Insert allocator into ID lookup table */
365 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
366 if (IS_ERR(ptr)) {
367 ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
368 xdp_rxq->mem.id = 0;
369 errno = PTR_ERR(ptr);
370 goto err;
371 }
372
373 if (type == MEM_TYPE_PAGE_POOL)
374 page_pool_get(xdp_alloc->page_pool);
375
376 mutex_unlock(&mem_id_lock);
377
378 trace_mem_connect(xdp_alloc, xdp_rxq);
379 return 0;
380err:
381 mutex_unlock(&mem_id_lock);
382 kfree(xdp_alloc);
383 return errno;
384}
385EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
386
387/* XDP RX runs under NAPI protection, and in different delivery error
388 * scenarios (e.g. queue full), it is possible to return the xdp_frame
389 * while still leveraging this protection. The @napi_direct boolian
390 * is used for those calls sites. Thus, allowing for faster recycling
391 * of xdp_frames/pages in those cases.
392 */
393static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
394 unsigned long handle)
395{
396 struct xdp_mem_allocator *xa;
397 struct page *page;
398
399 switch (mem->type) {
400 case MEM_TYPE_PAGE_POOL:
401 rcu_read_lock();
402 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
403 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
404 page = virt_to_head_page(data);
405 if (likely(xa)) {
406 napi_direct &= !xdp_return_frame_no_direct();
407 page_pool_put_page(xa->page_pool, page, napi_direct);
408 } else {
409 /* Hopefully stack show who to blame for late return */
410 WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id);
411 trace_mem_return_failed(mem, page);
412 put_page(page);
413 }
414 rcu_read_unlock();
415 break;
416 case MEM_TYPE_PAGE_SHARED:
417 page_frag_free(data);
418 break;
419 case MEM_TYPE_PAGE_ORDER0:
420 page = virt_to_page(data); /* Assumes order0 page*/
421 put_page(page);
422 break;
423 case MEM_TYPE_ZERO_COPY:
424 /* NB! Only valid from an xdp_buff! */
425 rcu_read_lock();
426 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
427 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
428 xa->zc_alloc->free(xa->zc_alloc, handle);
429 rcu_read_unlock();
430 default:
431 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
432 break;
433 }
434}
435
436void xdp_return_frame(struct xdp_frame *xdpf)
437{
438 __xdp_return(xdpf->data, &xdpf->mem, false, 0);
439}
440EXPORT_SYMBOL_GPL(xdp_return_frame);
441
442void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
443{
444 __xdp_return(xdpf->data, &xdpf->mem, true, 0);
445}
446EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
447
448void xdp_return_buff(struct xdp_buff *xdp)
449{
450 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
451}
452EXPORT_SYMBOL_GPL(xdp_return_buff);
453
454/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
455void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
456{
457 struct xdp_mem_allocator *xa;
458 struct page *page;
459
460 rcu_read_lock();
461 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
462 page = virt_to_head_page(data);
463 if (xa)
464 page_pool_release_page(xa->page_pool, page);
465 rcu_read_unlock();
466}
467EXPORT_SYMBOL_GPL(__xdp_release_frame);
468
469int xdp_attachment_query(struct xdp_attachment_info *info,
470 struct netdev_bpf *bpf)
471{
472 bpf->prog_id = info->prog ? info->prog->aux->id : 0;
473 bpf->prog_flags = info->prog ? info->flags : 0;
474 return 0;
475}
476EXPORT_SYMBOL_GPL(xdp_attachment_query);
477
478bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
479 struct netdev_bpf *bpf)
480{
481 if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
482 NL_SET_ERR_MSG(bpf->extack,
483 "program loaded with different flags");
484 return false;
485 }
486 return true;
487}
488EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
489
490void xdp_attachment_setup(struct xdp_attachment_info *info,
491 struct netdev_bpf *bpf)
492{
493 if (info->prog)
494 bpf_prog_put(info->prog);
495 info->prog = bpf->prog;
496 info->flags = bpf->flags;
497}
498EXPORT_SYMBOL_GPL(xdp_attachment_setup);
499
500struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
501{
502 unsigned int metasize, totsize;
503 void *addr, *data_to_copy;
504 struct xdp_frame *xdpf;
505 struct page *page;
506
507 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
508 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
509 xdp->data - xdp->data_meta;
510 totsize = xdp->data_end - xdp->data + metasize;
511
512 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
513 return NULL;
514
515 page = dev_alloc_page();
516 if (!page)
517 return NULL;
518
519 addr = page_to_virt(page);
520 xdpf = addr;
521 memset(xdpf, 0, sizeof(*xdpf));
522
523 addr += sizeof(*xdpf);
524 data_to_copy = metasize ? xdp->data_meta : xdp->data;
525 memcpy(addr, data_to_copy, totsize);
526
527 xdpf->data = addr + metasize;
528 xdpf->len = totsize - metasize;
529 xdpf->headroom = 0;
530 xdpf->metasize = metasize;
531 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
532
533 xdp_return_buff(xdp);
534 return xdpf;
535}
536EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);