Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 */
4
5/* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
9 *
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
22 *
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
28 *
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
40 *
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
46 */
47#include <linux/bpf.h>
48#include <net/xdp.h>
49#include <linux/filter.h>
50#include <trace/events/xdp.h>
51#include <linux/btf_ids.h>
52
53#define DEV_CREATE_FLAG_MASK \
54 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55
56struct xdp_dev_bulk_queue {
57 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 struct list_head flush_node;
59 struct net_device *dev;
60 struct net_device *dev_rx;
61 struct bpf_prog *xdp_prog;
62 unsigned int count;
63};
64
65struct bpf_dtab_netdev {
66 struct net_device *dev; /* must be first member, due to tracepoint */
67 struct hlist_node index_hlist;
68 struct bpf_dtab *dtab;
69 struct bpf_prog *xdp_prog;
70 struct rcu_head rcu;
71 unsigned int idx;
72 struct bpf_devmap_val val;
73};
74
75struct bpf_dtab {
76 struct bpf_map map;
77 struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
78 struct list_head list;
79
80 /* these are only used for DEVMAP_HASH type maps */
81 struct hlist_head *dev_index_head;
82 spinlock_t index_lock;
83 unsigned int items;
84 u32 n_buckets;
85};
86
87static DEFINE_PER_CPU(struct list_head, dev_flush_list);
88static DEFINE_SPINLOCK(dev_map_lock);
89static LIST_HEAD(dev_map_list);
90
91static struct hlist_head *dev_map_create_hash(unsigned int entries,
92 int numa_node)
93{
94 int i;
95 struct hlist_head *hash;
96
97 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
98 if (hash != NULL)
99 for (i = 0; i < entries; i++)
100 INIT_HLIST_HEAD(&hash[i]);
101
102 return hash;
103}
104
105static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
106 int idx)
107{
108 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
109}
110
111static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
112{
113 u32 valsize = attr->value_size;
114
115 /* check sanity of attributes. 2 value sizes supported:
116 * 4 bytes: ifindex
117 * 8 bytes: ifindex + prog fd
118 */
119 if (attr->max_entries == 0 || attr->key_size != 4 ||
120 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
121 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
122 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123 return -EINVAL;
124
125 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
126 * verifier prevents writes from the BPF side
127 */
128 attr->map_flags |= BPF_F_RDONLY_PROG;
129
130
131 bpf_map_init_from_attr(&dtab->map, attr);
132
133 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
134 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135
136 if (!dtab->n_buckets) /* Overflow check */
137 return -EINVAL;
138 }
139
140 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
141 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
142 dtab->map.numa_node);
143 if (!dtab->dev_index_head)
144 return -ENOMEM;
145
146 spin_lock_init(&dtab->index_lock);
147 } else {
148 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
149 sizeof(struct bpf_dtab_netdev *),
150 dtab->map.numa_node);
151 if (!dtab->netdev_map)
152 return -ENOMEM;
153 }
154
155 return 0;
156}
157
158static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
159{
160 struct bpf_dtab *dtab;
161 int err;
162
163 if (!capable(CAP_NET_ADMIN))
164 return ERR_PTR(-EPERM);
165
166 dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
167 if (!dtab)
168 return ERR_PTR(-ENOMEM);
169
170 err = dev_map_init_map(dtab, attr);
171 if (err) {
172 bpf_map_area_free(dtab);
173 return ERR_PTR(err);
174 }
175
176 spin_lock(&dev_map_lock);
177 list_add_tail_rcu(&dtab->list, &dev_map_list);
178 spin_unlock(&dev_map_lock);
179
180 return &dtab->map;
181}
182
183static void dev_map_free(struct bpf_map *map)
184{
185 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
186 int i;
187
188 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
189 * so the programs (can be more than one that used this map) were
190 * disconnected from events. The following synchronize_rcu() guarantees
191 * both rcu read critical sections complete and waits for
192 * preempt-disable regions (NAPI being the relevant context here) so we
193 * are certain there will be no further reads against the netdev_map and
194 * all flush operations are complete. Flush operations can only be done
195 * from NAPI context for this reason.
196 */
197
198 spin_lock(&dev_map_lock);
199 list_del_rcu(&dtab->list);
200 spin_unlock(&dev_map_lock);
201
202 bpf_clear_redirect_map(map);
203 synchronize_rcu();
204
205 /* Make sure prior __dev_map_entry_free() have completed. */
206 rcu_barrier();
207
208 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
209 for (i = 0; i < dtab->n_buckets; i++) {
210 struct bpf_dtab_netdev *dev;
211 struct hlist_head *head;
212 struct hlist_node *next;
213
214 head = dev_map_index_hash(dtab, i);
215
216 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
217 hlist_del_rcu(&dev->index_hlist);
218 if (dev->xdp_prog)
219 bpf_prog_put(dev->xdp_prog);
220 dev_put(dev->dev);
221 kfree(dev);
222 }
223 }
224
225 bpf_map_area_free(dtab->dev_index_head);
226 } else {
227 for (i = 0; i < dtab->map.max_entries; i++) {
228 struct bpf_dtab_netdev *dev;
229
230 dev = rcu_dereference_raw(dtab->netdev_map[i]);
231 if (!dev)
232 continue;
233
234 if (dev->xdp_prog)
235 bpf_prog_put(dev->xdp_prog);
236 dev_put(dev->dev);
237 kfree(dev);
238 }
239
240 bpf_map_area_free(dtab->netdev_map);
241 }
242
243 bpf_map_area_free(dtab);
244}
245
246static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
247{
248 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
249 u32 index = key ? *(u32 *)key : U32_MAX;
250 u32 *next = next_key;
251
252 if (index >= dtab->map.max_entries) {
253 *next = 0;
254 return 0;
255 }
256
257 if (index == dtab->map.max_entries - 1)
258 return -ENOENT;
259 *next = index + 1;
260 return 0;
261}
262
263/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
264 * by local_bh_disable() (from XDP calls inside NAPI). The
265 * rcu_read_lock_bh_held() below makes lockdep accept both.
266 */
267static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
268{
269 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
270 struct hlist_head *head = dev_map_index_hash(dtab, key);
271 struct bpf_dtab_netdev *dev;
272
273 hlist_for_each_entry_rcu(dev, head, index_hlist,
274 lockdep_is_held(&dtab->index_lock))
275 if (dev->idx == key)
276 return dev;
277
278 return NULL;
279}
280
281static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
282 void *next_key)
283{
284 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
285 u32 idx, *next = next_key;
286 struct bpf_dtab_netdev *dev, *next_dev;
287 struct hlist_head *head;
288 int i = 0;
289
290 if (!key)
291 goto find_first;
292
293 idx = *(u32 *)key;
294
295 dev = __dev_map_hash_lookup_elem(map, idx);
296 if (!dev)
297 goto find_first;
298
299 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
300 struct bpf_dtab_netdev, index_hlist);
301
302 if (next_dev) {
303 *next = next_dev->idx;
304 return 0;
305 }
306
307 i = idx & (dtab->n_buckets - 1);
308 i++;
309
310 find_first:
311 for (; i < dtab->n_buckets; i++) {
312 head = dev_map_index_hash(dtab, i);
313
314 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
315 struct bpf_dtab_netdev,
316 index_hlist);
317 if (next_dev) {
318 *next = next_dev->idx;
319 return 0;
320 }
321 }
322
323 return -ENOENT;
324}
325
326static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
327 struct xdp_frame **frames, int n,
328 struct net_device *dev)
329{
330 struct xdp_txq_info txq = { .dev = dev };
331 struct xdp_buff xdp;
332 int i, nframes = 0;
333
334 for (i = 0; i < n; i++) {
335 struct xdp_frame *xdpf = frames[i];
336 u32 act;
337 int err;
338
339 xdp_convert_frame_to_buff(xdpf, &xdp);
340 xdp.txq = &txq;
341
342 act = bpf_prog_run_xdp(xdp_prog, &xdp);
343 switch (act) {
344 case XDP_PASS:
345 err = xdp_update_frame_from_buff(&xdp, xdpf);
346 if (unlikely(err < 0))
347 xdp_return_frame_rx_napi(xdpf);
348 else
349 frames[nframes++] = xdpf;
350 break;
351 default:
352 bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
353 fallthrough;
354 case XDP_ABORTED:
355 trace_xdp_exception(dev, xdp_prog, act);
356 fallthrough;
357 case XDP_DROP:
358 xdp_return_frame_rx_napi(xdpf);
359 break;
360 }
361 }
362 return nframes; /* sent frames count */
363}
364
365static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
366{
367 struct net_device *dev = bq->dev;
368 unsigned int cnt = bq->count;
369 int sent = 0, err = 0;
370 int to_send = cnt;
371 int i;
372
373 if (unlikely(!cnt))
374 return;
375
376 for (i = 0; i < cnt; i++) {
377 struct xdp_frame *xdpf = bq->q[i];
378
379 prefetch(xdpf);
380 }
381
382 if (bq->xdp_prog) {
383 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
384 if (!to_send)
385 goto out;
386 }
387
388 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
389 if (sent < 0) {
390 /* If ndo_xdp_xmit fails with an errno, no frames have
391 * been xmit'ed.
392 */
393 err = sent;
394 sent = 0;
395 }
396
397 /* If not all frames have been transmitted, it is our
398 * responsibility to free them
399 */
400 for (i = sent; unlikely(i < to_send); i++)
401 xdp_return_frame_rx_napi(bq->q[i]);
402
403out:
404 bq->count = 0;
405 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
406}
407
408/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
409 * driver before returning from its napi->poll() routine. See the comment above
410 * xdp_do_flush() in filter.c.
411 */
412void __dev_flush(void)
413{
414 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
415 struct xdp_dev_bulk_queue *bq, *tmp;
416
417 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
418 bq_xmit_all(bq, XDP_XMIT_FLUSH);
419 bq->dev_rx = NULL;
420 bq->xdp_prog = NULL;
421 __list_del_clearprev(&bq->flush_node);
422 }
423}
424
425/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
426 * by local_bh_disable() (from XDP calls inside NAPI). The
427 * rcu_read_lock_bh_held() below makes lockdep accept both.
428 */
429static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
430{
431 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
432 struct bpf_dtab_netdev *obj;
433
434 if (key >= map->max_entries)
435 return NULL;
436
437 obj = rcu_dereference_check(dtab->netdev_map[key],
438 rcu_read_lock_bh_held());
439 return obj;
440}
441
442/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
443 * variable access, and map elements stick around. See comment above
444 * xdp_do_flush() in filter.c.
445 */
446static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
447 struct net_device *dev_rx, struct bpf_prog *xdp_prog)
448{
449 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
450 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
451
452 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
453 bq_xmit_all(bq, 0);
454
455 /* Ingress dev_rx will be the same for all xdp_frame's in
456 * bulk_queue, because bq stored per-CPU and must be flushed
457 * from net_device drivers NAPI func end.
458 *
459 * Do the same with xdp_prog and flush_list since these fields
460 * are only ever modified together.
461 */
462 if (!bq->dev_rx) {
463 bq->dev_rx = dev_rx;
464 bq->xdp_prog = xdp_prog;
465 list_add(&bq->flush_node, flush_list);
466 }
467
468 bq->q[bq->count++] = xdpf;
469}
470
471static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
472 struct net_device *dev_rx,
473 struct bpf_prog *xdp_prog)
474{
475 int err;
476
477 if (!dev->netdev_ops->ndo_xdp_xmit)
478 return -EOPNOTSUPP;
479
480 err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
481 if (unlikely(err))
482 return err;
483
484 bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
485 return 0;
486}
487
488static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
489{
490 struct xdp_txq_info txq = { .dev = dst->dev };
491 struct xdp_buff xdp;
492 u32 act;
493
494 if (!dst->xdp_prog)
495 return XDP_PASS;
496
497 __skb_pull(skb, skb->mac_len);
498 xdp.txq = &txq;
499
500 act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
501 switch (act) {
502 case XDP_PASS:
503 __skb_push(skb, skb->mac_len);
504 break;
505 default:
506 bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
507 fallthrough;
508 case XDP_ABORTED:
509 trace_xdp_exception(dst->dev, dst->xdp_prog, act);
510 fallthrough;
511 case XDP_DROP:
512 kfree_skb(skb);
513 break;
514 }
515
516 return act;
517}
518
519int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
520 struct net_device *dev_rx)
521{
522 return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
523}
524
525int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
526 struct net_device *dev_rx)
527{
528 struct net_device *dev = dst->dev;
529
530 return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
531}
532
533static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
534{
535 if (!obj ||
536 !obj->dev->netdev_ops->ndo_xdp_xmit)
537 return false;
538
539 if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
540 return false;
541
542 return true;
543}
544
545static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
546 struct net_device *dev_rx,
547 struct xdp_frame *xdpf)
548{
549 struct xdp_frame *nxdpf;
550
551 nxdpf = xdpf_clone(xdpf);
552 if (!nxdpf)
553 return -ENOMEM;
554
555 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
556
557 return 0;
558}
559
560static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
561{
562 while (num_excluded--) {
563 if (ifindex == excluded[num_excluded])
564 return true;
565 }
566 return false;
567}
568
569/* Get ifindex of each upper device. 'indexes' must be able to hold at
570 * least MAX_NEST_DEV elements.
571 * Returns the number of ifindexes added.
572 */
573static int get_upper_ifindexes(struct net_device *dev, int *indexes)
574{
575 struct net_device *upper;
576 struct list_head *iter;
577 int n = 0;
578
579 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
580 indexes[n++] = upper->ifindex;
581 }
582 return n;
583}
584
585int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
586 struct bpf_map *map, bool exclude_ingress)
587{
588 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
589 struct bpf_dtab_netdev *dst, *last_dst = NULL;
590 int excluded_devices[1+MAX_NEST_DEV];
591 struct hlist_head *head;
592 int num_excluded = 0;
593 unsigned int i;
594 int err;
595
596 if (exclude_ingress) {
597 num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
598 excluded_devices[num_excluded++] = dev_rx->ifindex;
599 }
600
601 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
602 for (i = 0; i < map->max_entries; i++) {
603 dst = rcu_dereference_check(dtab->netdev_map[i],
604 rcu_read_lock_bh_held());
605 if (!is_valid_dst(dst, xdpf))
606 continue;
607
608 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
609 continue;
610
611 /* we only need n-1 clones; last_dst enqueued below */
612 if (!last_dst) {
613 last_dst = dst;
614 continue;
615 }
616
617 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
618 if (err)
619 return err;
620
621 last_dst = dst;
622 }
623 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
624 for (i = 0; i < dtab->n_buckets; i++) {
625 head = dev_map_index_hash(dtab, i);
626 hlist_for_each_entry_rcu(dst, head, index_hlist,
627 lockdep_is_held(&dtab->index_lock)) {
628 if (!is_valid_dst(dst, xdpf))
629 continue;
630
631 if (is_ifindex_excluded(excluded_devices, num_excluded,
632 dst->dev->ifindex))
633 continue;
634
635 /* we only need n-1 clones; last_dst enqueued below */
636 if (!last_dst) {
637 last_dst = dst;
638 continue;
639 }
640
641 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
642 if (err)
643 return err;
644
645 last_dst = dst;
646 }
647 }
648 }
649
650 /* consume the last copy of the frame */
651 if (last_dst)
652 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
653 else
654 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
655
656 return 0;
657}
658
659int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
660 struct bpf_prog *xdp_prog)
661{
662 int err;
663
664 err = xdp_ok_fwd_dev(dst->dev, skb->len);
665 if (unlikely(err))
666 return err;
667
668 /* Redirect has already succeeded semantically at this point, so we just
669 * return 0 even if packet is dropped. Helper below takes care of
670 * freeing skb.
671 */
672 if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
673 return 0;
674
675 skb->dev = dst->dev;
676 generic_xdp_tx(skb, xdp_prog);
677
678 return 0;
679}
680
681static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
682 struct sk_buff *skb,
683 struct bpf_prog *xdp_prog)
684{
685 struct sk_buff *nskb;
686 int err;
687
688 nskb = skb_clone(skb, GFP_ATOMIC);
689 if (!nskb)
690 return -ENOMEM;
691
692 err = dev_map_generic_redirect(dst, nskb, xdp_prog);
693 if (unlikely(err)) {
694 consume_skb(nskb);
695 return err;
696 }
697
698 return 0;
699}
700
701int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
702 struct bpf_prog *xdp_prog, struct bpf_map *map,
703 bool exclude_ingress)
704{
705 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
706 struct bpf_dtab_netdev *dst, *last_dst = NULL;
707 int excluded_devices[1+MAX_NEST_DEV];
708 struct hlist_head *head;
709 struct hlist_node *next;
710 int num_excluded = 0;
711 unsigned int i;
712 int err;
713
714 if (exclude_ingress) {
715 num_excluded = get_upper_ifindexes(dev, excluded_devices);
716 excluded_devices[num_excluded++] = dev->ifindex;
717 }
718
719 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
720 for (i = 0; i < map->max_entries; i++) {
721 dst = rcu_dereference_check(dtab->netdev_map[i],
722 rcu_read_lock_bh_held());
723 if (!dst)
724 continue;
725
726 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
727 continue;
728
729 /* we only need n-1 clones; last_dst enqueued below */
730 if (!last_dst) {
731 last_dst = dst;
732 continue;
733 }
734
735 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
736 if (err)
737 return err;
738
739 last_dst = dst;
740
741 }
742 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
743 for (i = 0; i < dtab->n_buckets; i++) {
744 head = dev_map_index_hash(dtab, i);
745 hlist_for_each_entry_safe(dst, next, head, index_hlist) {
746 if (!dst)
747 continue;
748
749 if (is_ifindex_excluded(excluded_devices, num_excluded,
750 dst->dev->ifindex))
751 continue;
752
753 /* we only need n-1 clones; last_dst enqueued below */
754 if (!last_dst) {
755 last_dst = dst;
756 continue;
757 }
758
759 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
760 if (err)
761 return err;
762
763 last_dst = dst;
764 }
765 }
766 }
767
768 /* consume the first skb and return */
769 if (last_dst)
770 return dev_map_generic_redirect(last_dst, skb, xdp_prog);
771
772 /* dtab is empty */
773 consume_skb(skb);
774 return 0;
775}
776
777static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
778{
779 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
780
781 return obj ? &obj->val : NULL;
782}
783
784static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
785{
786 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
787 *(u32 *)key);
788 return obj ? &obj->val : NULL;
789}
790
791static void __dev_map_entry_free(struct rcu_head *rcu)
792{
793 struct bpf_dtab_netdev *dev;
794
795 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
796 if (dev->xdp_prog)
797 bpf_prog_put(dev->xdp_prog);
798 dev_put(dev->dev);
799 kfree(dev);
800}
801
802static int dev_map_delete_elem(struct bpf_map *map, void *key)
803{
804 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
805 struct bpf_dtab_netdev *old_dev;
806 int k = *(u32 *)key;
807
808 if (k >= map->max_entries)
809 return -EINVAL;
810
811 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
812 if (old_dev)
813 call_rcu(&old_dev->rcu, __dev_map_entry_free);
814 return 0;
815}
816
817static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
818{
819 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
820 struct bpf_dtab_netdev *old_dev;
821 int k = *(u32 *)key;
822 unsigned long flags;
823 int ret = -ENOENT;
824
825 spin_lock_irqsave(&dtab->index_lock, flags);
826
827 old_dev = __dev_map_hash_lookup_elem(map, k);
828 if (old_dev) {
829 dtab->items--;
830 hlist_del_init_rcu(&old_dev->index_hlist);
831 call_rcu(&old_dev->rcu, __dev_map_entry_free);
832 ret = 0;
833 }
834 spin_unlock_irqrestore(&dtab->index_lock, flags);
835
836 return ret;
837}
838
839static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
840 struct bpf_dtab *dtab,
841 struct bpf_devmap_val *val,
842 unsigned int idx)
843{
844 struct bpf_prog *prog = NULL;
845 struct bpf_dtab_netdev *dev;
846
847 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
848 GFP_NOWAIT | __GFP_NOWARN,
849 dtab->map.numa_node);
850 if (!dev)
851 return ERR_PTR(-ENOMEM);
852
853 dev->dev = dev_get_by_index(net, val->ifindex);
854 if (!dev->dev)
855 goto err_out;
856
857 if (val->bpf_prog.fd > 0) {
858 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
859 BPF_PROG_TYPE_XDP, false);
860 if (IS_ERR(prog))
861 goto err_put_dev;
862 if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
863 !bpf_prog_map_compatible(&dtab->map, prog))
864 goto err_put_prog;
865 }
866
867 dev->idx = idx;
868 dev->dtab = dtab;
869 if (prog) {
870 dev->xdp_prog = prog;
871 dev->val.bpf_prog.id = prog->aux->id;
872 } else {
873 dev->xdp_prog = NULL;
874 dev->val.bpf_prog.id = 0;
875 }
876 dev->val.ifindex = val->ifindex;
877
878 return dev;
879err_put_prog:
880 bpf_prog_put(prog);
881err_put_dev:
882 dev_put(dev->dev);
883err_out:
884 kfree(dev);
885 return ERR_PTR(-EINVAL);
886}
887
888static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
889 void *key, void *value, u64 map_flags)
890{
891 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
892 struct bpf_dtab_netdev *dev, *old_dev;
893 struct bpf_devmap_val val = {};
894 u32 i = *(u32 *)key;
895
896 if (unlikely(map_flags > BPF_EXIST))
897 return -EINVAL;
898 if (unlikely(i >= dtab->map.max_entries))
899 return -E2BIG;
900 if (unlikely(map_flags == BPF_NOEXIST))
901 return -EEXIST;
902
903 /* already verified value_size <= sizeof val */
904 memcpy(&val, value, map->value_size);
905
906 if (!val.ifindex) {
907 dev = NULL;
908 /* can not specify fd if ifindex is 0 */
909 if (val.bpf_prog.fd > 0)
910 return -EINVAL;
911 } else {
912 dev = __dev_map_alloc_node(net, dtab, &val, i);
913 if (IS_ERR(dev))
914 return PTR_ERR(dev);
915 }
916
917 /* Use call_rcu() here to ensure rcu critical sections have completed
918 * Remembering the driver side flush operation will happen before the
919 * net device is removed.
920 */
921 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
922 if (old_dev)
923 call_rcu(&old_dev->rcu, __dev_map_entry_free);
924
925 return 0;
926}
927
928static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
929 u64 map_flags)
930{
931 return __dev_map_update_elem(current->nsproxy->net_ns,
932 map, key, value, map_flags);
933}
934
935static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
936 void *key, void *value, u64 map_flags)
937{
938 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
939 struct bpf_dtab_netdev *dev, *old_dev;
940 struct bpf_devmap_val val = {};
941 u32 idx = *(u32 *)key;
942 unsigned long flags;
943 int err = -EEXIST;
944
945 /* already verified value_size <= sizeof val */
946 memcpy(&val, value, map->value_size);
947
948 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
949 return -EINVAL;
950
951 spin_lock_irqsave(&dtab->index_lock, flags);
952
953 old_dev = __dev_map_hash_lookup_elem(map, idx);
954 if (old_dev && (map_flags & BPF_NOEXIST))
955 goto out_err;
956
957 dev = __dev_map_alloc_node(net, dtab, &val, idx);
958 if (IS_ERR(dev)) {
959 err = PTR_ERR(dev);
960 goto out_err;
961 }
962
963 if (old_dev) {
964 hlist_del_rcu(&old_dev->index_hlist);
965 } else {
966 if (dtab->items >= dtab->map.max_entries) {
967 spin_unlock_irqrestore(&dtab->index_lock, flags);
968 call_rcu(&dev->rcu, __dev_map_entry_free);
969 return -E2BIG;
970 }
971 dtab->items++;
972 }
973
974 hlist_add_head_rcu(&dev->index_hlist,
975 dev_map_index_hash(dtab, idx));
976 spin_unlock_irqrestore(&dtab->index_lock, flags);
977
978 if (old_dev)
979 call_rcu(&old_dev->rcu, __dev_map_entry_free);
980
981 return 0;
982
983out_err:
984 spin_unlock_irqrestore(&dtab->index_lock, flags);
985 return err;
986}
987
988static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
989 u64 map_flags)
990{
991 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
992 map, key, value, map_flags);
993}
994
995static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
996{
997 return __bpf_xdp_redirect_map(map, ifindex, flags,
998 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
999 __dev_map_lookup_elem);
1000}
1001
1002static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1003{
1004 return __bpf_xdp_redirect_map(map, ifindex, flags,
1005 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1006 __dev_map_hash_lookup_elem);
1007}
1008
1009BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1010const struct bpf_map_ops dev_map_ops = {
1011 .map_meta_equal = bpf_map_meta_equal,
1012 .map_alloc = dev_map_alloc,
1013 .map_free = dev_map_free,
1014 .map_get_next_key = dev_map_get_next_key,
1015 .map_lookup_elem = dev_map_lookup_elem,
1016 .map_update_elem = dev_map_update_elem,
1017 .map_delete_elem = dev_map_delete_elem,
1018 .map_check_btf = map_check_no_btf,
1019 .map_btf_id = &dev_map_btf_ids[0],
1020 .map_redirect = dev_map_redirect,
1021};
1022
1023const struct bpf_map_ops dev_map_hash_ops = {
1024 .map_meta_equal = bpf_map_meta_equal,
1025 .map_alloc = dev_map_alloc,
1026 .map_free = dev_map_free,
1027 .map_get_next_key = dev_map_hash_get_next_key,
1028 .map_lookup_elem = dev_map_hash_lookup_elem,
1029 .map_update_elem = dev_map_hash_update_elem,
1030 .map_delete_elem = dev_map_hash_delete_elem,
1031 .map_check_btf = map_check_no_btf,
1032 .map_btf_id = &dev_map_btf_ids[0],
1033 .map_redirect = dev_hash_map_redirect,
1034};
1035
1036static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1037 struct net_device *netdev)
1038{
1039 unsigned long flags;
1040 u32 i;
1041
1042 spin_lock_irqsave(&dtab->index_lock, flags);
1043 for (i = 0; i < dtab->n_buckets; i++) {
1044 struct bpf_dtab_netdev *dev;
1045 struct hlist_head *head;
1046 struct hlist_node *next;
1047
1048 head = dev_map_index_hash(dtab, i);
1049
1050 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1051 if (netdev != dev->dev)
1052 continue;
1053
1054 dtab->items--;
1055 hlist_del_rcu(&dev->index_hlist);
1056 call_rcu(&dev->rcu, __dev_map_entry_free);
1057 }
1058 }
1059 spin_unlock_irqrestore(&dtab->index_lock, flags);
1060}
1061
1062static int dev_map_notification(struct notifier_block *notifier,
1063 ulong event, void *ptr)
1064{
1065 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1066 struct bpf_dtab *dtab;
1067 int i, cpu;
1068
1069 switch (event) {
1070 case NETDEV_REGISTER:
1071 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1072 break;
1073
1074 /* will be freed in free_netdev() */
1075 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1076 if (!netdev->xdp_bulkq)
1077 return NOTIFY_BAD;
1078
1079 for_each_possible_cpu(cpu)
1080 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1081 break;
1082 case NETDEV_UNREGISTER:
1083 /* This rcu_read_lock/unlock pair is needed because
1084 * dev_map_list is an RCU list AND to ensure a delete
1085 * operation does not free a netdev_map entry while we
1086 * are comparing it against the netdev being unregistered.
1087 */
1088 rcu_read_lock();
1089 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1090 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1091 dev_map_hash_remove_netdev(dtab, netdev);
1092 continue;
1093 }
1094
1095 for (i = 0; i < dtab->map.max_entries; i++) {
1096 struct bpf_dtab_netdev *dev, *odev;
1097
1098 dev = rcu_dereference(dtab->netdev_map[i]);
1099 if (!dev || netdev != dev->dev)
1100 continue;
1101 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1102 if (dev == odev)
1103 call_rcu(&dev->rcu,
1104 __dev_map_entry_free);
1105 }
1106 }
1107 rcu_read_unlock();
1108 break;
1109 default:
1110 break;
1111 }
1112 return NOTIFY_OK;
1113}
1114
1115static struct notifier_block dev_map_notifier = {
1116 .notifier_call = dev_map_notification,
1117};
1118
1119static int __init dev_map_init(void)
1120{
1121 int cpu;
1122
1123 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1124 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1125 offsetof(struct _bpf_dtab_netdev, dev));
1126 register_netdevice_notifier(&dev_map_notifier);
1127
1128 for_each_possible_cpu(cpu)
1129 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1130 return 0;
1131}
1132
1133subsys_initcall(dev_map_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 */
4
5/* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
9 *
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
22 *
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
28 *
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
40 *
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
46 */
47#include <linux/bpf.h>
48#include <net/xdp.h>
49#include <linux/filter.h>
50#include <trace/events/xdp.h>
51
52#define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
54
55struct xdp_dev_bulk_queue {
56 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
57 struct list_head flush_node;
58 struct net_device *dev;
59 struct net_device *dev_rx;
60 unsigned int count;
61};
62
63struct bpf_dtab_netdev {
64 struct net_device *dev; /* must be first member, due to tracepoint */
65 struct hlist_node index_hlist;
66 struct bpf_dtab *dtab;
67 struct bpf_prog *xdp_prog;
68 struct rcu_head rcu;
69 unsigned int idx;
70 struct bpf_devmap_val val;
71};
72
73struct bpf_dtab {
74 struct bpf_map map;
75 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
76 struct list_head list;
77
78 /* these are only used for DEVMAP_HASH type maps */
79 struct hlist_head *dev_index_head;
80 spinlock_t index_lock;
81 unsigned int items;
82 u32 n_buckets;
83};
84
85static DEFINE_PER_CPU(struct list_head, dev_flush_list);
86static DEFINE_SPINLOCK(dev_map_lock);
87static LIST_HEAD(dev_map_list);
88
89static struct hlist_head *dev_map_create_hash(unsigned int entries,
90 int numa_node)
91{
92 int i;
93 struct hlist_head *hash;
94
95 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
96 if (hash != NULL)
97 for (i = 0; i < entries; i++)
98 INIT_HLIST_HEAD(&hash[i]);
99
100 return hash;
101}
102
103static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
104 int idx)
105{
106 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
107}
108
109static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
110{
111 u32 valsize = attr->value_size;
112 u64 cost = 0;
113 int err;
114
115 /* check sanity of attributes. 2 value sizes supported:
116 * 4 bytes: ifindex
117 * 8 bytes: ifindex + prog fd
118 */
119 if (attr->max_entries == 0 || attr->key_size != 4 ||
120 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
121 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
122 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123 return -EINVAL;
124
125 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
126 * verifier prevents writes from the BPF side
127 */
128 attr->map_flags |= BPF_F_RDONLY_PROG;
129
130
131 bpf_map_init_from_attr(&dtab->map, attr);
132
133 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
134 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135
136 if (!dtab->n_buckets) /* Overflow check */
137 return -EINVAL;
138 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
139 } else {
140 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
141 }
142
143 /* if map size is larger than memlock limit, reject it */
144 err = bpf_map_charge_init(&dtab->map.memory, cost);
145 if (err)
146 return -EINVAL;
147
148 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
149 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
150 dtab->map.numa_node);
151 if (!dtab->dev_index_head)
152 goto free_charge;
153
154 spin_lock_init(&dtab->index_lock);
155 } else {
156 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
157 sizeof(struct bpf_dtab_netdev *),
158 dtab->map.numa_node);
159 if (!dtab->netdev_map)
160 goto free_charge;
161 }
162
163 return 0;
164
165free_charge:
166 bpf_map_charge_finish(&dtab->map.memory);
167 return -ENOMEM;
168}
169
170static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
171{
172 struct bpf_dtab *dtab;
173 int err;
174
175 if (!capable(CAP_NET_ADMIN))
176 return ERR_PTR(-EPERM);
177
178 dtab = kzalloc(sizeof(*dtab), GFP_USER);
179 if (!dtab)
180 return ERR_PTR(-ENOMEM);
181
182 err = dev_map_init_map(dtab, attr);
183 if (err) {
184 kfree(dtab);
185 return ERR_PTR(err);
186 }
187
188 spin_lock(&dev_map_lock);
189 list_add_tail_rcu(&dtab->list, &dev_map_list);
190 spin_unlock(&dev_map_lock);
191
192 return &dtab->map;
193}
194
195static void dev_map_free(struct bpf_map *map)
196{
197 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
198 int i;
199
200 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
201 * so the programs (can be more than one that used this map) were
202 * disconnected from events. The following synchronize_rcu() guarantees
203 * both rcu read critical sections complete and waits for
204 * preempt-disable regions (NAPI being the relevant context here) so we
205 * are certain there will be no further reads against the netdev_map and
206 * all flush operations are complete. Flush operations can only be done
207 * from NAPI context for this reason.
208 */
209
210 spin_lock(&dev_map_lock);
211 list_del_rcu(&dtab->list);
212 spin_unlock(&dev_map_lock);
213
214 bpf_clear_redirect_map(map);
215 synchronize_rcu();
216
217 /* Make sure prior __dev_map_entry_free() have completed. */
218 rcu_barrier();
219
220 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
221 for (i = 0; i < dtab->n_buckets; i++) {
222 struct bpf_dtab_netdev *dev;
223 struct hlist_head *head;
224 struct hlist_node *next;
225
226 head = dev_map_index_hash(dtab, i);
227
228 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
229 hlist_del_rcu(&dev->index_hlist);
230 if (dev->xdp_prog)
231 bpf_prog_put(dev->xdp_prog);
232 dev_put(dev->dev);
233 kfree(dev);
234 }
235 }
236
237 bpf_map_area_free(dtab->dev_index_head);
238 } else {
239 for (i = 0; i < dtab->map.max_entries; i++) {
240 struct bpf_dtab_netdev *dev;
241
242 dev = dtab->netdev_map[i];
243 if (!dev)
244 continue;
245
246 if (dev->xdp_prog)
247 bpf_prog_put(dev->xdp_prog);
248 dev_put(dev->dev);
249 kfree(dev);
250 }
251
252 bpf_map_area_free(dtab->netdev_map);
253 }
254
255 kfree(dtab);
256}
257
258static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
259{
260 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
261 u32 index = key ? *(u32 *)key : U32_MAX;
262 u32 *next = next_key;
263
264 if (index >= dtab->map.max_entries) {
265 *next = 0;
266 return 0;
267 }
268
269 if (index == dtab->map.max_entries - 1)
270 return -ENOENT;
271 *next = index + 1;
272 return 0;
273}
274
275struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
276{
277 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
278 struct hlist_head *head = dev_map_index_hash(dtab, key);
279 struct bpf_dtab_netdev *dev;
280
281 hlist_for_each_entry_rcu(dev, head, index_hlist,
282 lockdep_is_held(&dtab->index_lock))
283 if (dev->idx == key)
284 return dev;
285
286 return NULL;
287}
288
289static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
290 void *next_key)
291{
292 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
293 u32 idx, *next = next_key;
294 struct bpf_dtab_netdev *dev, *next_dev;
295 struct hlist_head *head;
296 int i = 0;
297
298 if (!key)
299 goto find_first;
300
301 idx = *(u32 *)key;
302
303 dev = __dev_map_hash_lookup_elem(map, idx);
304 if (!dev)
305 goto find_first;
306
307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
308 struct bpf_dtab_netdev, index_hlist);
309
310 if (next_dev) {
311 *next = next_dev->idx;
312 return 0;
313 }
314
315 i = idx & (dtab->n_buckets - 1);
316 i++;
317
318 find_first:
319 for (; i < dtab->n_buckets; i++) {
320 head = dev_map_index_hash(dtab, i);
321
322 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
323 struct bpf_dtab_netdev,
324 index_hlist);
325 if (next_dev) {
326 *next = next_dev->idx;
327 return 0;
328 }
329 }
330
331 return -ENOENT;
332}
333
334bool dev_map_can_have_prog(struct bpf_map *map)
335{
336 if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
337 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
338 map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
339 return true;
340
341 return false;
342}
343
344static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
345{
346 struct net_device *dev = bq->dev;
347 int sent = 0, drops = 0, err = 0;
348 int i;
349
350 if (unlikely(!bq->count))
351 return 0;
352
353 for (i = 0; i < bq->count; i++) {
354 struct xdp_frame *xdpf = bq->q[i];
355
356 prefetch(xdpf);
357 }
358
359 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
360 if (sent < 0) {
361 err = sent;
362 sent = 0;
363 goto error;
364 }
365 drops = bq->count - sent;
366out:
367 bq->count = 0;
368
369 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
370 bq->dev_rx = NULL;
371 __list_del_clearprev(&bq->flush_node);
372 return 0;
373error:
374 /* If ndo_xdp_xmit fails with an errno, no frames have been
375 * xmit'ed and it's our responsibility to them free all.
376 */
377 for (i = 0; i < bq->count; i++) {
378 struct xdp_frame *xdpf = bq->q[i];
379
380 xdp_return_frame_rx_napi(xdpf);
381 drops++;
382 }
383 goto out;
384}
385
386/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
387 * from the driver before returning from its napi->poll() routine. The poll()
388 * routine is called either from busy_poll context or net_rx_action signaled
389 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
390 * net device can be torn down. On devmap tear down we ensure the flush list
391 * is empty before completing to ensure all flush operations have completed.
392 * When drivers update the bpf program they may need to ensure any flush ops
393 * are also complete. Using synchronize_rcu or call_rcu will suffice for this
394 * because both wait for napi context to exit.
395 */
396void __dev_flush(void)
397{
398 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
399 struct xdp_dev_bulk_queue *bq, *tmp;
400
401 list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
402 bq_xmit_all(bq, XDP_XMIT_FLUSH);
403}
404
405/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
406 * update happens in parallel here a dev_put wont happen until after reading the
407 * ifindex.
408 */
409struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
410{
411 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
412 struct bpf_dtab_netdev *obj;
413
414 if (key >= map->max_entries)
415 return NULL;
416
417 obj = READ_ONCE(dtab->netdev_map[key]);
418 return obj;
419}
420
421/* Runs under RCU-read-side, plus in softirq under NAPI protection.
422 * Thus, safe percpu variable access.
423 */
424static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
425 struct net_device *dev_rx)
426{
427 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
428 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
429
430 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
431 bq_xmit_all(bq, 0);
432
433 /* Ingress dev_rx will be the same for all xdp_frame's in
434 * bulk_queue, because bq stored per-CPU and must be flushed
435 * from net_device drivers NAPI func end.
436 */
437 if (!bq->dev_rx)
438 bq->dev_rx = dev_rx;
439
440 bq->q[bq->count++] = xdpf;
441
442 if (!bq->flush_node.prev)
443 list_add(&bq->flush_node, flush_list);
444
445 return 0;
446}
447
448static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
449 struct net_device *dev_rx)
450{
451 struct xdp_frame *xdpf;
452 int err;
453
454 if (!dev->netdev_ops->ndo_xdp_xmit)
455 return -EOPNOTSUPP;
456
457 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
458 if (unlikely(err))
459 return err;
460
461 xdpf = xdp_convert_buff_to_frame(xdp);
462 if (unlikely(!xdpf))
463 return -EOVERFLOW;
464
465 return bq_enqueue(dev, xdpf, dev_rx);
466}
467
468static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
469 struct xdp_buff *xdp,
470 struct bpf_prog *xdp_prog)
471{
472 struct xdp_txq_info txq = { .dev = dev };
473 u32 act;
474
475 xdp_set_data_meta_invalid(xdp);
476 xdp->txq = &txq;
477
478 act = bpf_prog_run_xdp(xdp_prog, xdp);
479 switch (act) {
480 case XDP_PASS:
481 return xdp;
482 case XDP_DROP:
483 break;
484 default:
485 bpf_warn_invalid_xdp_action(act);
486 fallthrough;
487 case XDP_ABORTED:
488 trace_xdp_exception(dev, xdp_prog, act);
489 break;
490 }
491
492 xdp_return_buff(xdp);
493 return NULL;
494}
495
496int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
497 struct net_device *dev_rx)
498{
499 return __xdp_enqueue(dev, xdp, dev_rx);
500}
501
502int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
503 struct net_device *dev_rx)
504{
505 struct net_device *dev = dst->dev;
506
507 if (dst->xdp_prog) {
508 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
509 if (!xdp)
510 return 0;
511 }
512 return __xdp_enqueue(dev, xdp, dev_rx);
513}
514
515int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
516 struct bpf_prog *xdp_prog)
517{
518 int err;
519
520 err = xdp_ok_fwd_dev(dst->dev, skb->len);
521 if (unlikely(err))
522 return err;
523 skb->dev = dst->dev;
524 generic_xdp_tx(skb, xdp_prog);
525
526 return 0;
527}
528
529static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
530{
531 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
532
533 return obj ? &obj->val : NULL;
534}
535
536static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
537{
538 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
539 *(u32 *)key);
540 return obj ? &obj->val : NULL;
541}
542
543static void __dev_map_entry_free(struct rcu_head *rcu)
544{
545 struct bpf_dtab_netdev *dev;
546
547 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
548 if (dev->xdp_prog)
549 bpf_prog_put(dev->xdp_prog);
550 dev_put(dev->dev);
551 kfree(dev);
552}
553
554static int dev_map_delete_elem(struct bpf_map *map, void *key)
555{
556 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
557 struct bpf_dtab_netdev *old_dev;
558 int k = *(u32 *)key;
559
560 if (k >= map->max_entries)
561 return -EINVAL;
562
563 /* Use call_rcu() here to ensure any rcu critical sections have
564 * completed as well as any flush operations because call_rcu
565 * will wait for preempt-disable region to complete, NAPI in this
566 * context. And additionally, the driver tear down ensures all
567 * soft irqs are complete before removing the net device in the
568 * case of dev_put equals zero.
569 */
570 old_dev = xchg(&dtab->netdev_map[k], NULL);
571 if (old_dev)
572 call_rcu(&old_dev->rcu, __dev_map_entry_free);
573 return 0;
574}
575
576static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
577{
578 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
579 struct bpf_dtab_netdev *old_dev;
580 int k = *(u32 *)key;
581 unsigned long flags;
582 int ret = -ENOENT;
583
584 spin_lock_irqsave(&dtab->index_lock, flags);
585
586 old_dev = __dev_map_hash_lookup_elem(map, k);
587 if (old_dev) {
588 dtab->items--;
589 hlist_del_init_rcu(&old_dev->index_hlist);
590 call_rcu(&old_dev->rcu, __dev_map_entry_free);
591 ret = 0;
592 }
593 spin_unlock_irqrestore(&dtab->index_lock, flags);
594
595 return ret;
596}
597
598static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
599 struct bpf_dtab *dtab,
600 struct bpf_devmap_val *val,
601 unsigned int idx)
602{
603 struct bpf_prog *prog = NULL;
604 struct bpf_dtab_netdev *dev;
605
606 dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
607 dtab->map.numa_node);
608 if (!dev)
609 return ERR_PTR(-ENOMEM);
610
611 dev->dev = dev_get_by_index(net, val->ifindex);
612 if (!dev->dev)
613 goto err_out;
614
615 if (val->bpf_prog.fd > 0) {
616 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
617 BPF_PROG_TYPE_XDP, false);
618 if (IS_ERR(prog))
619 goto err_put_dev;
620 if (prog->expected_attach_type != BPF_XDP_DEVMAP)
621 goto err_put_prog;
622 }
623
624 dev->idx = idx;
625 dev->dtab = dtab;
626 if (prog) {
627 dev->xdp_prog = prog;
628 dev->val.bpf_prog.id = prog->aux->id;
629 } else {
630 dev->xdp_prog = NULL;
631 dev->val.bpf_prog.id = 0;
632 }
633 dev->val.ifindex = val->ifindex;
634
635 return dev;
636err_put_prog:
637 bpf_prog_put(prog);
638err_put_dev:
639 dev_put(dev->dev);
640err_out:
641 kfree(dev);
642 return ERR_PTR(-EINVAL);
643}
644
645static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
646 void *key, void *value, u64 map_flags)
647{
648 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
649 struct bpf_dtab_netdev *dev, *old_dev;
650 struct bpf_devmap_val val = {};
651 u32 i = *(u32 *)key;
652
653 if (unlikely(map_flags > BPF_EXIST))
654 return -EINVAL;
655 if (unlikely(i >= dtab->map.max_entries))
656 return -E2BIG;
657 if (unlikely(map_flags == BPF_NOEXIST))
658 return -EEXIST;
659
660 /* already verified value_size <= sizeof val */
661 memcpy(&val, value, map->value_size);
662
663 if (!val.ifindex) {
664 dev = NULL;
665 /* can not specify fd if ifindex is 0 */
666 if (val.bpf_prog.fd > 0)
667 return -EINVAL;
668 } else {
669 dev = __dev_map_alloc_node(net, dtab, &val, i);
670 if (IS_ERR(dev))
671 return PTR_ERR(dev);
672 }
673
674 /* Use call_rcu() here to ensure rcu critical sections have completed
675 * Remembering the driver side flush operation will happen before the
676 * net device is removed.
677 */
678 old_dev = xchg(&dtab->netdev_map[i], dev);
679 if (old_dev)
680 call_rcu(&old_dev->rcu, __dev_map_entry_free);
681
682 return 0;
683}
684
685static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
686 u64 map_flags)
687{
688 return __dev_map_update_elem(current->nsproxy->net_ns,
689 map, key, value, map_flags);
690}
691
692static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
693 void *key, void *value, u64 map_flags)
694{
695 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
696 struct bpf_dtab_netdev *dev, *old_dev;
697 struct bpf_devmap_val val = {};
698 u32 idx = *(u32 *)key;
699 unsigned long flags;
700 int err = -EEXIST;
701
702 /* already verified value_size <= sizeof val */
703 memcpy(&val, value, map->value_size);
704
705 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
706 return -EINVAL;
707
708 spin_lock_irqsave(&dtab->index_lock, flags);
709
710 old_dev = __dev_map_hash_lookup_elem(map, idx);
711 if (old_dev && (map_flags & BPF_NOEXIST))
712 goto out_err;
713
714 dev = __dev_map_alloc_node(net, dtab, &val, idx);
715 if (IS_ERR(dev)) {
716 err = PTR_ERR(dev);
717 goto out_err;
718 }
719
720 if (old_dev) {
721 hlist_del_rcu(&old_dev->index_hlist);
722 } else {
723 if (dtab->items >= dtab->map.max_entries) {
724 spin_unlock_irqrestore(&dtab->index_lock, flags);
725 call_rcu(&dev->rcu, __dev_map_entry_free);
726 return -E2BIG;
727 }
728 dtab->items++;
729 }
730
731 hlist_add_head_rcu(&dev->index_hlist,
732 dev_map_index_hash(dtab, idx));
733 spin_unlock_irqrestore(&dtab->index_lock, flags);
734
735 if (old_dev)
736 call_rcu(&old_dev->rcu, __dev_map_entry_free);
737
738 return 0;
739
740out_err:
741 spin_unlock_irqrestore(&dtab->index_lock, flags);
742 return err;
743}
744
745static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
746 u64 map_flags)
747{
748 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
749 map, key, value, map_flags);
750}
751
752static int dev_map_btf_id;
753const struct bpf_map_ops dev_map_ops = {
754 .map_alloc = dev_map_alloc,
755 .map_free = dev_map_free,
756 .map_get_next_key = dev_map_get_next_key,
757 .map_lookup_elem = dev_map_lookup_elem,
758 .map_update_elem = dev_map_update_elem,
759 .map_delete_elem = dev_map_delete_elem,
760 .map_check_btf = map_check_no_btf,
761 .map_btf_name = "bpf_dtab",
762 .map_btf_id = &dev_map_btf_id,
763};
764
765static int dev_map_hash_map_btf_id;
766const struct bpf_map_ops dev_map_hash_ops = {
767 .map_alloc = dev_map_alloc,
768 .map_free = dev_map_free,
769 .map_get_next_key = dev_map_hash_get_next_key,
770 .map_lookup_elem = dev_map_hash_lookup_elem,
771 .map_update_elem = dev_map_hash_update_elem,
772 .map_delete_elem = dev_map_hash_delete_elem,
773 .map_check_btf = map_check_no_btf,
774 .map_btf_name = "bpf_dtab",
775 .map_btf_id = &dev_map_hash_map_btf_id,
776};
777
778static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
779 struct net_device *netdev)
780{
781 unsigned long flags;
782 u32 i;
783
784 spin_lock_irqsave(&dtab->index_lock, flags);
785 for (i = 0; i < dtab->n_buckets; i++) {
786 struct bpf_dtab_netdev *dev;
787 struct hlist_head *head;
788 struct hlist_node *next;
789
790 head = dev_map_index_hash(dtab, i);
791
792 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
793 if (netdev != dev->dev)
794 continue;
795
796 dtab->items--;
797 hlist_del_rcu(&dev->index_hlist);
798 call_rcu(&dev->rcu, __dev_map_entry_free);
799 }
800 }
801 spin_unlock_irqrestore(&dtab->index_lock, flags);
802}
803
804static int dev_map_notification(struct notifier_block *notifier,
805 ulong event, void *ptr)
806{
807 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
808 struct bpf_dtab *dtab;
809 int i, cpu;
810
811 switch (event) {
812 case NETDEV_REGISTER:
813 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
814 break;
815
816 /* will be freed in free_netdev() */
817 netdev->xdp_bulkq =
818 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue),
819 sizeof(void *), GFP_ATOMIC);
820 if (!netdev->xdp_bulkq)
821 return NOTIFY_BAD;
822
823 for_each_possible_cpu(cpu)
824 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
825 break;
826 case NETDEV_UNREGISTER:
827 /* This rcu_read_lock/unlock pair is needed because
828 * dev_map_list is an RCU list AND to ensure a delete
829 * operation does not free a netdev_map entry while we
830 * are comparing it against the netdev being unregistered.
831 */
832 rcu_read_lock();
833 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
834 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
835 dev_map_hash_remove_netdev(dtab, netdev);
836 continue;
837 }
838
839 for (i = 0; i < dtab->map.max_entries; i++) {
840 struct bpf_dtab_netdev *dev, *odev;
841
842 dev = READ_ONCE(dtab->netdev_map[i]);
843 if (!dev || netdev != dev->dev)
844 continue;
845 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
846 if (dev == odev)
847 call_rcu(&dev->rcu,
848 __dev_map_entry_free);
849 }
850 }
851 rcu_read_unlock();
852 break;
853 default:
854 break;
855 }
856 return NOTIFY_OK;
857}
858
859static struct notifier_block dev_map_notifier = {
860 .notifier_call = dev_map_notification,
861};
862
863static int __init dev_map_init(void)
864{
865 int cpu;
866
867 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
868 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
869 offsetof(struct _bpf_dtab_netdev, dev));
870 register_netdevice_notifier(&dev_map_notifier);
871
872 for_each_possible_cpu(cpu)
873 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
874 return 0;
875}
876
877subsys_initcall(dev_map_init);