Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* bpf/cpumap.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6
7/* The 'cpumap' is primarily used as a backend map for XDP BPF helper
8 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
9 *
10 * Unlike devmap which redirects XDP frames out another NIC device,
11 * this map type redirects raw XDP frames to another CPU. The remote
12 * CPU will do SKB-allocation and call the normal network stack.
13 *
14 * This is a scalability and isolation mechanism, that allow
15 * separating the early driver network XDP layer, from the rest of the
16 * netstack, and assigning dedicated CPUs for this stage. This
17 * basically allows for 10G wirespeed pre-filtering via bpf.
18 */
19#include <linux/bpf.h>
20#include <linux/filter.h>
21#include <linux/ptr_ring.h>
22#include <net/xdp.h>
23
24#include <linux/sched.h>
25#include <linux/workqueue.h>
26#include <linux/kthread.h>
27#include <linux/capability.h>
28#include <trace/events/xdp.h>
29
30#include <linux/netdevice.h> /* netif_receive_skb_core */
31#include <linux/etherdevice.h> /* eth_type_trans */
32
33/* General idea: XDP packets getting XDP redirected to another CPU,
34 * will maximum be stored/queued for one driver ->poll() call. It is
35 * guaranteed that queueing the frame and the flush operation happen on
36 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
37 * which queue in bpf_cpu_map_entry contains packets.
38 */
39
40#define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
41struct bpf_cpu_map_entry;
42struct bpf_cpu_map;
43
44struct xdp_bulk_queue {
45 void *q[CPU_MAP_BULK_SIZE];
46 struct list_head flush_node;
47 struct bpf_cpu_map_entry *obj;
48 unsigned int count;
49};
50
51/* Struct for every remote "destination" CPU in map */
52struct bpf_cpu_map_entry {
53 u32 cpu; /* kthread CPU and map index */
54 int map_id; /* Back reference to map */
55 u32 qsize; /* Queue size placeholder for map lookup */
56
57 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
58 struct xdp_bulk_queue __percpu *bulkq;
59
60 struct bpf_cpu_map *cmap;
61
62 /* Queue with potential multi-producers, and single-consumer kthread */
63 struct ptr_ring *queue;
64 struct task_struct *kthread;
65 struct work_struct kthread_stop_wq;
66
67 atomic_t refcnt; /* Control when this struct can be free'ed */
68 struct rcu_head rcu;
69};
70
71struct bpf_cpu_map {
72 struct bpf_map map;
73 /* Below members specific for map type */
74 struct bpf_cpu_map_entry **cpu_map;
75 struct list_head __percpu *flush_list;
76};
77
78static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx);
79
80static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
81{
82 struct bpf_cpu_map *cmap;
83 int err = -ENOMEM;
84 int ret, cpu;
85 u64 cost;
86
87 if (!capable(CAP_SYS_ADMIN))
88 return ERR_PTR(-EPERM);
89
90 /* check sanity of attributes */
91 if (attr->max_entries == 0 || attr->key_size != 4 ||
92 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
93 return ERR_PTR(-EINVAL);
94
95 cmap = kzalloc(sizeof(*cmap), GFP_USER);
96 if (!cmap)
97 return ERR_PTR(-ENOMEM);
98
99 bpf_map_init_from_attr(&cmap->map, attr);
100
101 /* Pre-limit array size based on NR_CPUS, not final CPU check */
102 if (cmap->map.max_entries > NR_CPUS) {
103 err = -E2BIG;
104 goto free_cmap;
105 }
106
107 /* make sure page count doesn't overflow */
108 cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
109 cost += sizeof(struct list_head) * num_possible_cpus();
110
111 /* Notice returns -EPERM on if map size is larger than memlock limit */
112 ret = bpf_map_charge_init(&cmap->map.memory, cost);
113 if (ret) {
114 err = ret;
115 goto free_cmap;
116 }
117
118 cmap->flush_list = alloc_percpu(struct list_head);
119 if (!cmap->flush_list)
120 goto free_charge;
121
122 for_each_possible_cpu(cpu)
123 INIT_LIST_HEAD(per_cpu_ptr(cmap->flush_list, cpu));
124
125 /* Alloc array for possible remote "destination" CPUs */
126 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
127 sizeof(struct bpf_cpu_map_entry *),
128 cmap->map.numa_node);
129 if (!cmap->cpu_map)
130 goto free_percpu;
131
132 return &cmap->map;
133free_percpu:
134 free_percpu(cmap->flush_list);
135free_charge:
136 bpf_map_charge_finish(&cmap->map.memory);
137free_cmap:
138 kfree(cmap);
139 return ERR_PTR(err);
140}
141
142static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
143{
144 atomic_inc(&rcpu->refcnt);
145}
146
147/* called from workqueue, to workaround syscall using preempt_disable */
148static void cpu_map_kthread_stop(struct work_struct *work)
149{
150 struct bpf_cpu_map_entry *rcpu;
151
152 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
153
154 /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
155 * as it waits until all in-flight call_rcu() callbacks complete.
156 */
157 rcu_barrier();
158
159 /* kthread_stop will wake_up_process and wait for it to complete */
160 kthread_stop(rcpu->kthread);
161}
162
163static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
164 struct xdp_frame *xdpf,
165 struct sk_buff *skb)
166{
167 unsigned int hard_start_headroom;
168 unsigned int frame_size;
169 void *pkt_data_start;
170
171 /* Part of headroom was reserved to xdpf */
172 hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
173
174 /* build_skb need to place skb_shared_info after SKB end, and
175 * also want to know the memory "truesize". Thus, need to
176 * know the memory frame size backing xdp_buff.
177 *
178 * XDP was designed to have PAGE_SIZE frames, but this
179 * assumption is not longer true with ixgbe and i40e. It
180 * would be preferred to set frame_size to 2048 or 4096
181 * depending on the driver.
182 * frame_size = 2048;
183 * frame_len = frame_size - sizeof(*xdp_frame);
184 *
185 * Instead, with info avail, skb_shared_info in placed after
186 * packet len. This, unfortunately fakes the truesize.
187 * Another disadvantage of this approach, the skb_shared_info
188 * is not at a fixed memory location, with mixed length
189 * packets, which is bad for cache-line hotness.
190 */
191 frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
192 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
193
194 pkt_data_start = xdpf->data - hard_start_headroom;
195 skb = build_skb_around(skb, pkt_data_start, frame_size);
196 if (unlikely(!skb))
197 return NULL;
198
199 skb_reserve(skb, hard_start_headroom);
200 __skb_put(skb, xdpf->len);
201 if (xdpf->metasize)
202 skb_metadata_set(skb, xdpf->metasize);
203
204 /* Essential SKB info: protocol and skb->dev */
205 skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
206
207 /* Optional SKB info, currently missing:
208 * - HW checksum info (skb->ip_summed)
209 * - HW RX hash (skb_set_hash)
210 * - RX ring dev queue index (skb_record_rx_queue)
211 */
212
213 /* Until page_pool get SKB return path, release DMA here */
214 xdp_release_frame(xdpf);
215
216 /* Allow SKB to reuse area used by xdp_frame */
217 xdp_scrub_frame(xdpf);
218
219 return skb;
220}
221
222static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
223{
224 /* The tear-down procedure should have made sure that queue is
225 * empty. See __cpu_map_entry_replace() and work-queue
226 * invoked cpu_map_kthread_stop(). Catch any broken behaviour
227 * gracefully and warn once.
228 */
229 struct xdp_frame *xdpf;
230
231 while ((xdpf = ptr_ring_consume(ring)))
232 if (WARN_ON_ONCE(xdpf))
233 xdp_return_frame(xdpf);
234}
235
236static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
237{
238 if (atomic_dec_and_test(&rcpu->refcnt)) {
239 /* The queue should be empty at this point */
240 __cpu_map_ring_cleanup(rcpu->queue);
241 ptr_ring_cleanup(rcpu->queue, NULL);
242 kfree(rcpu->queue);
243 kfree(rcpu);
244 }
245}
246
247#define CPUMAP_BATCH 8
248
249static int cpu_map_kthread_run(void *data)
250{
251 struct bpf_cpu_map_entry *rcpu = data;
252
253 set_current_state(TASK_INTERRUPTIBLE);
254
255 /* When kthread gives stop order, then rcpu have been disconnected
256 * from map, thus no new packets can enter. Remaining in-flight
257 * per CPU stored packets are flushed to this queue. Wait honoring
258 * kthread_stop signal until queue is empty.
259 */
260 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
261 unsigned int drops = 0, sched = 0;
262 void *frames[CPUMAP_BATCH];
263 void *skbs[CPUMAP_BATCH];
264 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
265 int i, n, m;
266
267 /* Release CPU reschedule checks */
268 if (__ptr_ring_empty(rcpu->queue)) {
269 set_current_state(TASK_INTERRUPTIBLE);
270 /* Recheck to avoid lost wake-up */
271 if (__ptr_ring_empty(rcpu->queue)) {
272 schedule();
273 sched = 1;
274 } else {
275 __set_current_state(TASK_RUNNING);
276 }
277 } else {
278 sched = cond_resched();
279 }
280
281 /*
282 * The bpf_cpu_map_entry is single consumer, with this
283 * kthread CPU pinned. Lockless access to ptr_ring
284 * consume side valid as no-resize allowed of queue.
285 */
286 n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
287
288 for (i = 0; i < n; i++) {
289 void *f = frames[i];
290 struct page *page = virt_to_page(f);
291
292 /* Bring struct page memory area to curr CPU. Read by
293 * build_skb_around via page_is_pfmemalloc(), and when
294 * freed written by page_frag_free call.
295 */
296 prefetchw(page);
297 }
298
299 m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs);
300 if (unlikely(m == 0)) {
301 for (i = 0; i < n; i++)
302 skbs[i] = NULL; /* effect: xdp_return_frame */
303 drops = n;
304 }
305
306 local_bh_disable();
307 for (i = 0; i < n; i++) {
308 struct xdp_frame *xdpf = frames[i];
309 struct sk_buff *skb = skbs[i];
310 int ret;
311
312 skb = cpu_map_build_skb(rcpu, xdpf, skb);
313 if (!skb) {
314 xdp_return_frame(xdpf);
315 continue;
316 }
317
318 /* Inject into network stack */
319 ret = netif_receive_skb_core(skb);
320 if (ret == NET_RX_DROP)
321 drops++;
322 }
323 /* Feedback loop via tracepoint */
324 trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched);
325
326 local_bh_enable(); /* resched point, may call do_softirq() */
327 }
328 __set_current_state(TASK_RUNNING);
329
330 put_cpu_map_entry(rcpu);
331 return 0;
332}
333
334static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
335 int map_id)
336{
337 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
338 struct bpf_cpu_map_entry *rcpu;
339 struct xdp_bulk_queue *bq;
340 int numa, err, i;
341
342 /* Have map->numa_node, but choose node of redirect target CPU */
343 numa = cpu_to_node(cpu);
344
345 rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
346 if (!rcpu)
347 return NULL;
348
349 /* Alloc percpu bulkq */
350 rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq),
351 sizeof(void *), gfp);
352 if (!rcpu->bulkq)
353 goto free_rcu;
354
355 for_each_possible_cpu(i) {
356 bq = per_cpu_ptr(rcpu->bulkq, i);
357 bq->obj = rcpu;
358 }
359
360 /* Alloc queue */
361 rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
362 if (!rcpu->queue)
363 goto free_bulkq;
364
365 err = ptr_ring_init(rcpu->queue, qsize, gfp);
366 if (err)
367 goto free_queue;
368
369 rcpu->cpu = cpu;
370 rcpu->map_id = map_id;
371 rcpu->qsize = qsize;
372
373 /* Setup kthread */
374 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
375 "cpumap/%d/map:%d", cpu, map_id);
376 if (IS_ERR(rcpu->kthread))
377 goto free_ptr_ring;
378
379 get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
380 get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
381
382 /* Make sure kthread runs on a single CPU */
383 kthread_bind(rcpu->kthread, cpu);
384 wake_up_process(rcpu->kthread);
385
386 return rcpu;
387
388free_ptr_ring:
389 ptr_ring_cleanup(rcpu->queue, NULL);
390free_queue:
391 kfree(rcpu->queue);
392free_bulkq:
393 free_percpu(rcpu->bulkq);
394free_rcu:
395 kfree(rcpu);
396 return NULL;
397}
398
399static void __cpu_map_entry_free(struct rcu_head *rcu)
400{
401 struct bpf_cpu_map_entry *rcpu;
402 int cpu;
403
404 /* This cpu_map_entry have been disconnected from map and one
405 * RCU graze-period have elapsed. Thus, XDP cannot queue any
406 * new packets and cannot change/set flush_needed that can
407 * find this entry.
408 */
409 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
410
411 /* Flush remaining packets in percpu bulkq */
412 for_each_online_cpu(cpu) {
413 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
414
415 /* No concurrent bq_enqueue can run at this point */
416 bq_flush_to_queue(bq, false);
417 }
418 free_percpu(rcpu->bulkq);
419 /* Cannot kthread_stop() here, last put free rcpu resources */
420 put_cpu_map_entry(rcpu);
421}
422
423/* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to
424 * ensure any driver rcu critical sections have completed, but this
425 * does not guarantee a flush has happened yet. Because driver side
426 * rcu_read_lock/unlock only protects the running XDP program. The
427 * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
428 * pending flush op doesn't fail.
429 *
430 * The bpf_cpu_map_entry is still used by the kthread, and there can
431 * still be pending packets (in queue and percpu bulkq). A refcnt
432 * makes sure to last user (kthread_stop vs. call_rcu) free memory
433 * resources.
434 *
435 * The rcu callback __cpu_map_entry_free flush remaining packets in
436 * percpu bulkq to queue. Due to caller map_delete_elem() disable
437 * preemption, cannot call kthread_stop() to make sure queue is empty.
438 * Instead a work_queue is started for stopping kthread,
439 * cpu_map_kthread_stop, which waits for an RCU graze period before
440 * stopping kthread, emptying the queue.
441 */
442static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
443 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
444{
445 struct bpf_cpu_map_entry *old_rcpu;
446
447 old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
448 if (old_rcpu) {
449 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
450 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
451 schedule_work(&old_rcpu->kthread_stop_wq);
452 }
453}
454
455static int cpu_map_delete_elem(struct bpf_map *map, void *key)
456{
457 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
458 u32 key_cpu = *(u32 *)key;
459
460 if (key_cpu >= map->max_entries)
461 return -EINVAL;
462
463 /* notice caller map_delete_elem() use preempt_disable() */
464 __cpu_map_entry_replace(cmap, key_cpu, NULL);
465 return 0;
466}
467
468static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
469 u64 map_flags)
470{
471 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
472 struct bpf_cpu_map_entry *rcpu;
473
474 /* Array index key correspond to CPU number */
475 u32 key_cpu = *(u32 *)key;
476 /* Value is the queue size */
477 u32 qsize = *(u32 *)value;
478
479 if (unlikely(map_flags > BPF_EXIST))
480 return -EINVAL;
481 if (unlikely(key_cpu >= cmap->map.max_entries))
482 return -E2BIG;
483 if (unlikely(map_flags == BPF_NOEXIST))
484 return -EEXIST;
485 if (unlikely(qsize > 16384)) /* sanity limit on qsize */
486 return -EOVERFLOW;
487
488 /* Make sure CPU is a valid possible cpu */
489 if (!cpu_possible(key_cpu))
490 return -ENODEV;
491
492 if (qsize == 0) {
493 rcpu = NULL; /* Same as deleting */
494 } else {
495 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
496 rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id);
497 if (!rcpu)
498 return -ENOMEM;
499 rcpu->cmap = cmap;
500 }
501 rcu_read_lock();
502 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
503 rcu_read_unlock();
504 return 0;
505}
506
507static void cpu_map_free(struct bpf_map *map)
508{
509 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
510 int cpu;
511 u32 i;
512
513 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
514 * so the bpf programs (can be more than one that used this map) were
515 * disconnected from events. Wait for outstanding critical sections in
516 * these programs to complete. The rcu critical section only guarantees
517 * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map.
518 * It does __not__ ensure pending flush operations (if any) are
519 * complete.
520 */
521
522 bpf_clear_redirect_map(map);
523 synchronize_rcu();
524
525 /* To ensure all pending flush operations have completed wait for flush
526 * list be empty on _all_ cpus. Because the above synchronize_rcu()
527 * ensures the map is disconnected from the program we can assume no new
528 * items will be added to the list.
529 */
530 for_each_online_cpu(cpu) {
531 struct list_head *flush_list = per_cpu_ptr(cmap->flush_list, cpu);
532
533 while (!list_empty(flush_list))
534 cond_resched();
535 }
536
537 /* For cpu_map the remote CPUs can still be using the entries
538 * (struct bpf_cpu_map_entry).
539 */
540 for (i = 0; i < cmap->map.max_entries; i++) {
541 struct bpf_cpu_map_entry *rcpu;
542
543 rcpu = READ_ONCE(cmap->cpu_map[i]);
544 if (!rcpu)
545 continue;
546
547 /* bq flush and cleanup happens after RCU graze-period */
548 __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
549 }
550 free_percpu(cmap->flush_list);
551 bpf_map_area_free(cmap->cpu_map);
552 kfree(cmap);
553}
554
555struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
556{
557 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
558 struct bpf_cpu_map_entry *rcpu;
559
560 if (key >= map->max_entries)
561 return NULL;
562
563 rcpu = READ_ONCE(cmap->cpu_map[key]);
564 return rcpu;
565}
566
567static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
568{
569 struct bpf_cpu_map_entry *rcpu =
570 __cpu_map_lookup_elem(map, *(u32 *)key);
571
572 return rcpu ? &rcpu->qsize : NULL;
573}
574
575static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
576{
577 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
578 u32 index = key ? *(u32 *)key : U32_MAX;
579 u32 *next = next_key;
580
581 if (index >= cmap->map.max_entries) {
582 *next = 0;
583 return 0;
584 }
585
586 if (index == cmap->map.max_entries - 1)
587 return -ENOENT;
588 *next = index + 1;
589 return 0;
590}
591
592const struct bpf_map_ops cpu_map_ops = {
593 .map_alloc = cpu_map_alloc,
594 .map_free = cpu_map_free,
595 .map_delete_elem = cpu_map_delete_elem,
596 .map_update_elem = cpu_map_update_elem,
597 .map_lookup_elem = cpu_map_lookup_elem,
598 .map_get_next_key = cpu_map_get_next_key,
599 .map_check_btf = map_check_no_btf,
600};
601
602static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
603{
604 struct bpf_cpu_map_entry *rcpu = bq->obj;
605 unsigned int processed = 0, drops = 0;
606 const int to_cpu = rcpu->cpu;
607 struct ptr_ring *q;
608 int i;
609
610 if (unlikely(!bq->count))
611 return 0;
612
613 q = rcpu->queue;
614 spin_lock(&q->producer_lock);
615
616 for (i = 0; i < bq->count; i++) {
617 struct xdp_frame *xdpf = bq->q[i];
618 int err;
619
620 err = __ptr_ring_produce(q, xdpf);
621 if (err) {
622 drops++;
623 if (likely(in_napi_ctx))
624 xdp_return_frame_rx_napi(xdpf);
625 else
626 xdp_return_frame(xdpf);
627 }
628 processed++;
629 }
630 bq->count = 0;
631 spin_unlock(&q->producer_lock);
632
633 __list_del_clearprev(&bq->flush_node);
634
635 /* Feedback loop via tracepoints */
636 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
637 return 0;
638}
639
640/* Runs under RCU-read-side, plus in softirq under NAPI protection.
641 * Thus, safe percpu variable access.
642 */
643static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
644{
645 struct list_head *flush_list = this_cpu_ptr(rcpu->cmap->flush_list);
646 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
647
648 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
649 bq_flush_to_queue(bq, true);
650
651 /* Notice, xdp_buff/page MUST be queued here, long enough for
652 * driver to code invoking us to finished, due to driver
653 * (e.g. ixgbe) recycle tricks based on page-refcnt.
654 *
655 * Thus, incoming xdp_frame is always queued here (else we race
656 * with another CPU on page-refcnt and remaining driver code).
657 * Queue time is very short, as driver will invoke flush
658 * operation, when completing napi->poll call.
659 */
660 bq->q[bq->count++] = xdpf;
661
662 if (!bq->flush_node.prev)
663 list_add(&bq->flush_node, flush_list);
664
665 return 0;
666}
667
668int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
669 struct net_device *dev_rx)
670{
671 struct xdp_frame *xdpf;
672
673 xdpf = convert_to_xdp_frame(xdp);
674 if (unlikely(!xdpf))
675 return -EOVERFLOW;
676
677 /* Info needed when constructing SKB on remote CPU */
678 xdpf->dev_rx = dev_rx;
679
680 bq_enqueue(rcpu, xdpf);
681 return 0;
682}
683
684void __cpu_map_flush(struct bpf_map *map)
685{
686 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
687 struct list_head *flush_list = this_cpu_ptr(cmap->flush_list);
688 struct xdp_bulk_queue *bq, *tmp;
689
690 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
691 bq_flush_to_queue(bq, true);
692
693 /* If already running, costs spin_lock_irqsave + smb_mb */
694 wake_up_process(bq->obj->kthread);
695 }
696}
1// SPDX-License-Identifier: GPL-2.0-only
2/* bpf/cpumap.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5 */
6
7/* The 'cpumap' is primarily used as a backend map for XDP BPF helper
8 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
9 *
10 * Unlike devmap which redirects XDP frames out another NIC device,
11 * this map type redirects raw XDP frames to another CPU. The remote
12 * CPU will do SKB-allocation and call the normal network stack.
13 *
14 * This is a scalability and isolation mechanism, that allow
15 * separating the early driver network XDP layer, from the rest of the
16 * netstack, and assigning dedicated CPUs for this stage. This
17 * basically allows for 10G wirespeed pre-filtering via bpf.
18 */
19#include <linux/bpf.h>
20#include <linux/filter.h>
21#include <linux/ptr_ring.h>
22#include <net/xdp.h>
23
24#include <linux/sched.h>
25#include <linux/workqueue.h>
26#include <linux/kthread.h>
27#include <linux/capability.h>
28#include <trace/events/xdp.h>
29
30#include <linux/netdevice.h> /* netif_receive_skb_list */
31#include <linux/etherdevice.h> /* eth_type_trans */
32
33/* General idea: XDP packets getting XDP redirected to another CPU,
34 * will maximum be stored/queued for one driver ->poll() call. It is
35 * guaranteed that queueing the frame and the flush operation happen on
36 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
37 * which queue in bpf_cpu_map_entry contains packets.
38 */
39
40#define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
41struct bpf_cpu_map_entry;
42struct bpf_cpu_map;
43
44struct xdp_bulk_queue {
45 void *q[CPU_MAP_BULK_SIZE];
46 struct list_head flush_node;
47 struct bpf_cpu_map_entry *obj;
48 unsigned int count;
49};
50
51/* Struct for every remote "destination" CPU in map */
52struct bpf_cpu_map_entry {
53 u32 cpu; /* kthread CPU and map index */
54 int map_id; /* Back reference to map */
55
56 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
57 struct xdp_bulk_queue __percpu *bulkq;
58
59 struct bpf_cpu_map *cmap;
60
61 /* Queue with potential multi-producers, and single-consumer kthread */
62 struct ptr_ring *queue;
63 struct task_struct *kthread;
64
65 struct bpf_cpumap_val value;
66 struct bpf_prog *prog;
67
68 atomic_t refcnt; /* Control when this struct can be free'ed */
69 struct rcu_head rcu;
70
71 struct work_struct kthread_stop_wq;
72};
73
74struct bpf_cpu_map {
75 struct bpf_map map;
76 /* Below members specific for map type */
77 struct bpf_cpu_map_entry __rcu **cpu_map;
78};
79
80static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
81
82static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
83{
84 u32 value_size = attr->value_size;
85 struct bpf_cpu_map *cmap;
86 int err = -ENOMEM;
87
88 if (!bpf_capable())
89 return ERR_PTR(-EPERM);
90
91 /* check sanity of attributes */
92 if (attr->max_entries == 0 || attr->key_size != 4 ||
93 (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
94 value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
95 attr->map_flags & ~BPF_F_NUMA_NODE)
96 return ERR_PTR(-EINVAL);
97
98 cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT);
99 if (!cmap)
100 return ERR_PTR(-ENOMEM);
101
102 bpf_map_init_from_attr(&cmap->map, attr);
103
104 /* Pre-limit array size based on NR_CPUS, not final CPU check */
105 if (cmap->map.max_entries > NR_CPUS) {
106 err = -E2BIG;
107 goto free_cmap;
108 }
109
110 /* Alloc array for possible remote "destination" CPUs */
111 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
112 sizeof(struct bpf_cpu_map_entry *),
113 cmap->map.numa_node);
114 if (!cmap->cpu_map)
115 goto free_cmap;
116
117 return &cmap->map;
118free_cmap:
119 kfree(cmap);
120 return ERR_PTR(err);
121}
122
123static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
124{
125 atomic_inc(&rcpu->refcnt);
126}
127
128/* called from workqueue, to workaround syscall using preempt_disable */
129static void cpu_map_kthread_stop(struct work_struct *work)
130{
131 struct bpf_cpu_map_entry *rcpu;
132
133 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
134
135 /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
136 * as it waits until all in-flight call_rcu() callbacks complete.
137 */
138 rcu_barrier();
139
140 /* kthread_stop will wake_up_process and wait for it to complete */
141 kthread_stop(rcpu->kthread);
142}
143
144static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
145{
146 /* The tear-down procedure should have made sure that queue is
147 * empty. See __cpu_map_entry_replace() and work-queue
148 * invoked cpu_map_kthread_stop(). Catch any broken behaviour
149 * gracefully and warn once.
150 */
151 struct xdp_frame *xdpf;
152
153 while ((xdpf = ptr_ring_consume(ring)))
154 if (WARN_ON_ONCE(xdpf))
155 xdp_return_frame(xdpf);
156}
157
158static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
159{
160 if (atomic_dec_and_test(&rcpu->refcnt)) {
161 if (rcpu->prog)
162 bpf_prog_put(rcpu->prog);
163 /* The queue should be empty at this point */
164 __cpu_map_ring_cleanup(rcpu->queue);
165 ptr_ring_cleanup(rcpu->queue, NULL);
166 kfree(rcpu->queue);
167 kfree(rcpu);
168 }
169}
170
171static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
172 void **frames, int n,
173 struct xdp_cpumap_stats *stats)
174{
175 struct xdp_rxq_info rxq;
176 struct xdp_buff xdp;
177 int i, nframes = 0;
178
179 if (!rcpu->prog)
180 return n;
181
182 rcu_read_lock_bh();
183
184 xdp_set_return_frame_no_direct();
185 xdp.rxq = &rxq;
186
187 for (i = 0; i < n; i++) {
188 struct xdp_frame *xdpf = frames[i];
189 u32 act;
190 int err;
191
192 rxq.dev = xdpf->dev_rx;
193 rxq.mem = xdpf->mem;
194 /* TODO: report queue_index to xdp_rxq_info */
195
196 xdp_convert_frame_to_buff(xdpf, &xdp);
197
198 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
199 switch (act) {
200 case XDP_PASS:
201 err = xdp_update_frame_from_buff(&xdp, xdpf);
202 if (err < 0) {
203 xdp_return_frame(xdpf);
204 stats->drop++;
205 } else {
206 frames[nframes++] = xdpf;
207 stats->pass++;
208 }
209 break;
210 case XDP_REDIRECT:
211 err = xdp_do_redirect(xdpf->dev_rx, &xdp,
212 rcpu->prog);
213 if (unlikely(err)) {
214 xdp_return_frame(xdpf);
215 stats->drop++;
216 } else {
217 stats->redirect++;
218 }
219 break;
220 default:
221 bpf_warn_invalid_xdp_action(act);
222 fallthrough;
223 case XDP_DROP:
224 xdp_return_frame(xdpf);
225 stats->drop++;
226 break;
227 }
228 }
229
230 if (stats->redirect)
231 xdp_do_flush_map();
232
233 xdp_clear_return_frame_no_direct();
234
235 rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
236
237 return nframes;
238}
239
240#define CPUMAP_BATCH 8
241
242static int cpu_map_kthread_run(void *data)
243{
244 struct bpf_cpu_map_entry *rcpu = data;
245
246 set_current_state(TASK_INTERRUPTIBLE);
247
248 /* When kthread gives stop order, then rcpu have been disconnected
249 * from map, thus no new packets can enter. Remaining in-flight
250 * per CPU stored packets are flushed to this queue. Wait honoring
251 * kthread_stop signal until queue is empty.
252 */
253 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
254 struct xdp_cpumap_stats stats = {}; /* zero stats */
255 unsigned int kmem_alloc_drops = 0, sched = 0;
256 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
257 void *frames[CPUMAP_BATCH];
258 void *skbs[CPUMAP_BATCH];
259 int i, n, m, nframes;
260 LIST_HEAD(list);
261
262 /* Release CPU reschedule checks */
263 if (__ptr_ring_empty(rcpu->queue)) {
264 set_current_state(TASK_INTERRUPTIBLE);
265 /* Recheck to avoid lost wake-up */
266 if (__ptr_ring_empty(rcpu->queue)) {
267 schedule();
268 sched = 1;
269 } else {
270 __set_current_state(TASK_RUNNING);
271 }
272 } else {
273 sched = cond_resched();
274 }
275
276 /*
277 * The bpf_cpu_map_entry is single consumer, with this
278 * kthread CPU pinned. Lockless access to ptr_ring
279 * consume side valid as no-resize allowed of queue.
280 */
281 n = __ptr_ring_consume_batched(rcpu->queue, frames,
282 CPUMAP_BATCH);
283 for (i = 0; i < n; i++) {
284 void *f = frames[i];
285 struct page *page = virt_to_page(f);
286
287 /* Bring struct page memory area to curr CPU. Read by
288 * build_skb_around via page_is_pfmemalloc(), and when
289 * freed written by page_frag_free call.
290 */
291 prefetchw(page);
292 }
293
294 /* Support running another XDP prog on this CPU */
295 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats);
296 if (nframes) {
297 m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
298 if (unlikely(m == 0)) {
299 for (i = 0; i < nframes; i++)
300 skbs[i] = NULL; /* effect: xdp_return_frame */
301 kmem_alloc_drops += nframes;
302 }
303 }
304
305 local_bh_disable();
306 for (i = 0; i < nframes; i++) {
307 struct xdp_frame *xdpf = frames[i];
308 struct sk_buff *skb = skbs[i];
309
310 skb = __xdp_build_skb_from_frame(xdpf, skb,
311 xdpf->dev_rx);
312 if (!skb) {
313 xdp_return_frame(xdpf);
314 continue;
315 }
316
317 list_add_tail(&skb->list, &list);
318 }
319 netif_receive_skb_list(&list);
320
321 /* Feedback loop via tracepoint */
322 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
323 sched, &stats);
324
325 local_bh_enable(); /* resched point, may call do_softirq() */
326 }
327 __set_current_state(TASK_RUNNING);
328
329 put_cpu_map_entry(rcpu);
330 return 0;
331}
332
333bool cpu_map_prog_allowed(struct bpf_map *map)
334{
335 return map->map_type == BPF_MAP_TYPE_CPUMAP &&
336 map->value_size != offsetofend(struct bpf_cpumap_val, qsize);
337}
338
339static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
340{
341 struct bpf_prog *prog;
342
343 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
344 if (IS_ERR(prog))
345 return PTR_ERR(prog);
346
347 if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
348 bpf_prog_put(prog);
349 return -EINVAL;
350 }
351
352 rcpu->value.bpf_prog.id = prog->aux->id;
353 rcpu->prog = prog;
354
355 return 0;
356}
357
358static struct bpf_cpu_map_entry *
359__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
360 u32 cpu)
361{
362 int numa, err, i, fd = value->bpf_prog.fd;
363 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
364 struct bpf_cpu_map_entry *rcpu;
365 struct xdp_bulk_queue *bq;
366
367 /* Have map->numa_node, but choose node of redirect target CPU */
368 numa = cpu_to_node(cpu);
369
370 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
371 if (!rcpu)
372 return NULL;
373
374 /* Alloc percpu bulkq */
375 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
376 sizeof(void *), gfp);
377 if (!rcpu->bulkq)
378 goto free_rcu;
379
380 for_each_possible_cpu(i) {
381 bq = per_cpu_ptr(rcpu->bulkq, i);
382 bq->obj = rcpu;
383 }
384
385 /* Alloc queue */
386 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
387 numa);
388 if (!rcpu->queue)
389 goto free_bulkq;
390
391 err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
392 if (err)
393 goto free_queue;
394
395 rcpu->cpu = cpu;
396 rcpu->map_id = map->id;
397 rcpu->value.qsize = value->qsize;
398
399 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
400 goto free_ptr_ring;
401
402 /* Setup kthread */
403 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
404 "cpumap/%d/map:%d", cpu,
405 map->id);
406 if (IS_ERR(rcpu->kthread))
407 goto free_prog;
408
409 get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
410 get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
411
412 /* Make sure kthread runs on a single CPU */
413 kthread_bind(rcpu->kthread, cpu);
414 wake_up_process(rcpu->kthread);
415
416 return rcpu;
417
418free_prog:
419 if (rcpu->prog)
420 bpf_prog_put(rcpu->prog);
421free_ptr_ring:
422 ptr_ring_cleanup(rcpu->queue, NULL);
423free_queue:
424 kfree(rcpu->queue);
425free_bulkq:
426 free_percpu(rcpu->bulkq);
427free_rcu:
428 kfree(rcpu);
429 return NULL;
430}
431
432static void __cpu_map_entry_free(struct rcu_head *rcu)
433{
434 struct bpf_cpu_map_entry *rcpu;
435
436 /* This cpu_map_entry have been disconnected from map and one
437 * RCU grace-period have elapsed. Thus, XDP cannot queue any
438 * new packets and cannot change/set flush_needed that can
439 * find this entry.
440 */
441 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
442
443 free_percpu(rcpu->bulkq);
444 /* Cannot kthread_stop() here, last put free rcpu resources */
445 put_cpu_map_entry(rcpu);
446}
447
448/* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to
449 * ensure any driver rcu critical sections have completed, but this
450 * does not guarantee a flush has happened yet. Because driver side
451 * rcu_read_lock/unlock only protects the running XDP program. The
452 * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
453 * pending flush op doesn't fail.
454 *
455 * The bpf_cpu_map_entry is still used by the kthread, and there can
456 * still be pending packets (in queue and percpu bulkq). A refcnt
457 * makes sure to last user (kthread_stop vs. call_rcu) free memory
458 * resources.
459 *
460 * The rcu callback __cpu_map_entry_free flush remaining packets in
461 * percpu bulkq to queue. Due to caller map_delete_elem() disable
462 * preemption, cannot call kthread_stop() to make sure queue is empty.
463 * Instead a work_queue is started for stopping kthread,
464 * cpu_map_kthread_stop, which waits for an RCU grace period before
465 * stopping kthread, emptying the queue.
466 */
467static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
468 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
469{
470 struct bpf_cpu_map_entry *old_rcpu;
471
472 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
473 if (old_rcpu) {
474 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
475 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
476 schedule_work(&old_rcpu->kthread_stop_wq);
477 }
478}
479
480static int cpu_map_delete_elem(struct bpf_map *map, void *key)
481{
482 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
483 u32 key_cpu = *(u32 *)key;
484
485 if (key_cpu >= map->max_entries)
486 return -EINVAL;
487
488 /* notice caller map_delete_elem() use preempt_disable() */
489 __cpu_map_entry_replace(cmap, key_cpu, NULL);
490 return 0;
491}
492
493static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
494 u64 map_flags)
495{
496 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
497 struct bpf_cpumap_val cpumap_value = {};
498 struct bpf_cpu_map_entry *rcpu;
499 /* Array index key correspond to CPU number */
500 u32 key_cpu = *(u32 *)key;
501
502 memcpy(&cpumap_value, value, map->value_size);
503
504 if (unlikely(map_flags > BPF_EXIST))
505 return -EINVAL;
506 if (unlikely(key_cpu >= cmap->map.max_entries))
507 return -E2BIG;
508 if (unlikely(map_flags == BPF_NOEXIST))
509 return -EEXIST;
510 if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
511 return -EOVERFLOW;
512
513 /* Make sure CPU is a valid possible cpu */
514 if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
515 return -ENODEV;
516
517 if (cpumap_value.qsize == 0) {
518 rcpu = NULL; /* Same as deleting */
519 } else {
520 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
521 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
522 if (!rcpu)
523 return -ENOMEM;
524 rcpu->cmap = cmap;
525 }
526 rcu_read_lock();
527 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
528 rcu_read_unlock();
529 return 0;
530}
531
532static void cpu_map_free(struct bpf_map *map)
533{
534 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
535 u32 i;
536
537 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
538 * so the bpf programs (can be more than one that used this map) were
539 * disconnected from events. Wait for outstanding critical sections in
540 * these programs to complete. The rcu critical section only guarantees
541 * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map.
542 * It does __not__ ensure pending flush operations (if any) are
543 * complete.
544 */
545
546 synchronize_rcu();
547
548 /* For cpu_map the remote CPUs can still be using the entries
549 * (struct bpf_cpu_map_entry).
550 */
551 for (i = 0; i < cmap->map.max_entries; i++) {
552 struct bpf_cpu_map_entry *rcpu;
553
554 rcpu = rcu_dereference_raw(cmap->cpu_map[i]);
555 if (!rcpu)
556 continue;
557
558 /* bq flush and cleanup happens after RCU grace-period */
559 __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
560 }
561 bpf_map_area_free(cmap->cpu_map);
562 kfree(cmap);
563}
564
565/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
566 * by local_bh_disable() (from XDP calls inside NAPI). The
567 * rcu_read_lock_bh_held() below makes lockdep accept both.
568 */
569static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
570{
571 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
572 struct bpf_cpu_map_entry *rcpu;
573
574 if (key >= map->max_entries)
575 return NULL;
576
577 rcpu = rcu_dereference_check(cmap->cpu_map[key],
578 rcu_read_lock_bh_held());
579 return rcpu;
580}
581
582static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
583{
584 struct bpf_cpu_map_entry *rcpu =
585 __cpu_map_lookup_elem(map, *(u32 *)key);
586
587 return rcpu ? &rcpu->value : NULL;
588}
589
590static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
591{
592 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
593 u32 index = key ? *(u32 *)key : U32_MAX;
594 u32 *next = next_key;
595
596 if (index >= cmap->map.max_entries) {
597 *next = 0;
598 return 0;
599 }
600
601 if (index == cmap->map.max_entries - 1)
602 return -ENOENT;
603 *next = index + 1;
604 return 0;
605}
606
607static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
608{
609 return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
610 __cpu_map_lookup_elem);
611}
612
613static int cpu_map_btf_id;
614const struct bpf_map_ops cpu_map_ops = {
615 .map_meta_equal = bpf_map_meta_equal,
616 .map_alloc = cpu_map_alloc,
617 .map_free = cpu_map_free,
618 .map_delete_elem = cpu_map_delete_elem,
619 .map_update_elem = cpu_map_update_elem,
620 .map_lookup_elem = cpu_map_lookup_elem,
621 .map_get_next_key = cpu_map_get_next_key,
622 .map_check_btf = map_check_no_btf,
623 .map_btf_name = "bpf_cpu_map",
624 .map_btf_id = &cpu_map_btf_id,
625 .map_redirect = cpu_map_redirect,
626};
627
628static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
629{
630 struct bpf_cpu_map_entry *rcpu = bq->obj;
631 unsigned int processed = 0, drops = 0;
632 const int to_cpu = rcpu->cpu;
633 struct ptr_ring *q;
634 int i;
635
636 if (unlikely(!bq->count))
637 return;
638
639 q = rcpu->queue;
640 spin_lock(&q->producer_lock);
641
642 for (i = 0; i < bq->count; i++) {
643 struct xdp_frame *xdpf = bq->q[i];
644 int err;
645
646 err = __ptr_ring_produce(q, xdpf);
647 if (err) {
648 drops++;
649 xdp_return_frame_rx_napi(xdpf);
650 }
651 processed++;
652 }
653 bq->count = 0;
654 spin_unlock(&q->producer_lock);
655
656 __list_del_clearprev(&bq->flush_node);
657
658 /* Feedback loop via tracepoints */
659 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
660}
661
662/* Runs under RCU-read-side, plus in softirq under NAPI protection.
663 * Thus, safe percpu variable access.
664 */
665static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
666{
667 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
668 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
669
670 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
671 bq_flush_to_queue(bq);
672
673 /* Notice, xdp_buff/page MUST be queued here, long enough for
674 * driver to code invoking us to finished, due to driver
675 * (e.g. ixgbe) recycle tricks based on page-refcnt.
676 *
677 * Thus, incoming xdp_frame is always queued here (else we race
678 * with another CPU on page-refcnt and remaining driver code).
679 * Queue time is very short, as driver will invoke flush
680 * operation, when completing napi->poll call.
681 */
682 bq->q[bq->count++] = xdpf;
683
684 if (!bq->flush_node.prev)
685 list_add(&bq->flush_node, flush_list);
686}
687
688int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
689 struct net_device *dev_rx)
690{
691 struct xdp_frame *xdpf;
692
693 xdpf = xdp_convert_buff_to_frame(xdp);
694 if (unlikely(!xdpf))
695 return -EOVERFLOW;
696
697 /* Info needed when constructing SKB on remote CPU */
698 xdpf->dev_rx = dev_rx;
699
700 bq_enqueue(rcpu, xdpf);
701 return 0;
702}
703
704void __cpu_map_flush(void)
705{
706 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
707 struct xdp_bulk_queue *bq, *tmp;
708
709 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
710 bq_flush_to_queue(bq);
711
712 /* If already running, costs spin_lock_irqsave + smb_mb */
713 wake_up_process(bq->obj->kthread);
714 }
715}
716
717static int __init cpu_map_init(void)
718{
719 int cpu;
720
721 for_each_possible_cpu(cpu)
722 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
723 return 0;
724}
725
726subsys_initcall(cpu_map_init);