Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
   3 */
   4
   5/* Devmaps primary use is as a backend map for XDP BPF helper call
   6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
   7 * spent some effort to ensure the datapath with redirect maps does not use
   8 * any locking. This is a quick note on the details.
   9 *
  10 * We have three possible paths to get into the devmap control plane bpf
  11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
  12 * will invoke an update, delete, or lookup operation. To ensure updates and
  13 * deletes appear atomic from the datapath side xchg() is used to modify the
  14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
  15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
  16 * an rcu grace period before free'ing the old data structures. This ensures the
  17 * datapath always has a valid copy. However, the datapath does a "flush"
  18 * operation that pushes any pending packets in the driver outside the RCU
  19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
  20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
  21 * this list is empty, indicating outstanding flush operations have completed.
  22 *
  23 * BPF syscalls may race with BPF program calls on any of the update, delete
  24 * or lookup operations. As noted above the xchg() operation also keep the
  25 * netdev_map consistent in this case. From the devmap side BPF programs
  26 * calling into these operations are the same as multiple user space threads
  27 * making system calls.
  28 *
  29 * Finally, any of the above may race with a netdev_unregister notifier. The
  30 * unregister notifier must search for net devices in the map structure that
  31 * contain a reference to the net device and remove them. This is a two step
  32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
  33 * check to see if the ifindex is the same as the net_device being removed.
  34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
  35 * removed, in the case of a concurrent update or delete operation it is
  36 * possible that the initially referenced dev is no longer in the map. As the
  37 * notifier hook walks the map we know that new dev references can not be
  38 * added by the user because core infrastructure ensures dev_get_by_index()
  39 * calls will fail at this point.
  40 *
  41 * The devmap_hash type is a map type which interprets keys as ifindexes and
  42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
  43 * densely packed instead of having holes in the lookup array for unused
  44 * ifindexes. The setup and packet enqueue/send code is shared between the two
  45 * types of devmap; only the lookup and insertion is different.
  46 */
  47#include <linux/bpf.h>
  48#include <net/xdp.h>
  49#include <linux/filter.h>
  50#include <trace/events/xdp.h>
  51#include <linux/btf_ids.h>
  52
  53#define DEV_CREATE_FLAG_MASK \
  54	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  55
  56struct xdp_dev_bulk_queue {
 
 
 
  57	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
  58	struct list_head flush_node;
  59	struct net_device *dev;
  60	struct net_device *dev_rx;
  61	struct bpf_prog *xdp_prog;
  62	unsigned int count;
  63};
  64
  65struct bpf_dtab_netdev {
  66	struct net_device *dev; /* must be first member, due to tracepoint */
  67	struct hlist_node index_hlist;
  68	struct bpf_dtab *dtab;
  69	struct bpf_prog *xdp_prog;
  70	struct rcu_head rcu;
  71	unsigned int idx;
  72	struct bpf_devmap_val val;
  73};
  74
  75struct bpf_dtab {
  76	struct bpf_map map;
  77	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
 
  78	struct list_head list;
  79
  80	/* these are only used for DEVMAP_HASH type maps */
  81	struct hlist_head *dev_index_head;
  82	spinlock_t index_lock;
  83	unsigned int items;
  84	u32 n_buckets;
  85};
  86
  87static DEFINE_PER_CPU(struct list_head, dev_flush_list);
  88static DEFINE_SPINLOCK(dev_map_lock);
  89static LIST_HEAD(dev_map_list);
  90
  91static struct hlist_head *dev_map_create_hash(unsigned int entries,
  92					      int numa_node)
  93{
  94	int i;
  95	struct hlist_head *hash;
  96
  97	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
  98	if (hash != NULL)
  99		for (i = 0; i < entries; i++)
 100			INIT_HLIST_HEAD(&hash[i]);
 101
 102	return hash;
 103}
 104
 105static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
 106						    int idx)
 107{
 108	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
 109}
 110
 111static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
 112{
 113	u32 valsize = attr->value_size;
 
 114
 115	/* check sanity of attributes. 2 value sizes supported:
 116	 * 4 bytes: ifindex
 117	 * 8 bytes: ifindex + prog fd
 118	 */
 119	if (attr->max_entries == 0 || attr->key_size != 4 ||
 120	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
 121	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
 122	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
 123		return -EINVAL;
 124
 125	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
 126	 * verifier prevents writes from the BPF side
 127	 */
 128	attr->map_flags |= BPF_F_RDONLY_PROG;
 129
 130
 131	bpf_map_init_from_attr(&dtab->map, attr);
 132
 
 
 
 
 133	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 134		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
 135
 136		if (!dtab->n_buckets) /* Overflow check */
 137			return -EINVAL;
 
 138	}
 139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 140	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 141		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
 142							   dtab->map.numa_node);
 143		if (!dtab->dev_index_head)
 144			return -ENOMEM;
 145
 146		spin_lock_init(&dtab->index_lock);
 147	} else {
 148		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
 149						      sizeof(struct bpf_dtab_netdev *),
 150						      dtab->map.numa_node);
 151		if (!dtab->netdev_map)
 152			return -ENOMEM;
 153	}
 154
 155	return 0;
 
 
 
 
 
 
 
 
 156}
 157
 158static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 159{
 160	struct bpf_dtab *dtab;
 161	int err;
 162
 163	if (!capable(CAP_NET_ADMIN))
 164		return ERR_PTR(-EPERM);
 165
 166	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
 167	if (!dtab)
 168		return ERR_PTR(-ENOMEM);
 169
 170	err = dev_map_init_map(dtab, attr);
 171	if (err) {
 172		bpf_map_area_free(dtab);
 173		return ERR_PTR(err);
 174	}
 175
 176	spin_lock(&dev_map_lock);
 177	list_add_tail_rcu(&dtab->list, &dev_map_list);
 178	spin_unlock(&dev_map_lock);
 179
 180	return &dtab->map;
 181}
 182
 183static void dev_map_free(struct bpf_map *map)
 184{
 185	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 186	int i;
 187
 188	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
 189	 * so the programs (can be more than one that used this map) were
 190	 * disconnected from events. The following synchronize_rcu() guarantees
 191	 * both rcu read critical sections complete and waits for
 192	 * preempt-disable regions (NAPI being the relevant context here) so we
 193	 * are certain there will be no further reads against the netdev_map and
 194	 * all flush operations are complete. Flush operations can only be done
 195	 * from NAPI context for this reason.
 196	 */
 197
 198	spin_lock(&dev_map_lock);
 199	list_del_rcu(&dtab->list);
 200	spin_unlock(&dev_map_lock);
 201
 202	bpf_clear_redirect_map(map);
 203	synchronize_rcu();
 204
 205	/* Make sure prior __dev_map_entry_free() have completed. */
 206	rcu_barrier();
 207
 208	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 209		for (i = 0; i < dtab->n_buckets; i++) {
 210			struct bpf_dtab_netdev *dev;
 211			struct hlist_head *head;
 212			struct hlist_node *next;
 213
 214			head = dev_map_index_hash(dtab, i);
 215
 216			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
 217				hlist_del_rcu(&dev->index_hlist);
 218				if (dev->xdp_prog)
 219					bpf_prog_put(dev->xdp_prog);
 220				dev_put(dev->dev);
 221				kfree(dev);
 222			}
 223		}
 224
 225		bpf_map_area_free(dtab->dev_index_head);
 226	} else {
 227		for (i = 0; i < dtab->map.max_entries; i++) {
 228			struct bpf_dtab_netdev *dev;
 229
 230			dev = rcu_dereference_raw(dtab->netdev_map[i]);
 231			if (!dev)
 232				continue;
 233
 234			if (dev->xdp_prog)
 235				bpf_prog_put(dev->xdp_prog);
 236			dev_put(dev->dev);
 237			kfree(dev);
 238		}
 239
 240		bpf_map_area_free(dtab->netdev_map);
 
 
 
 
 
 
 241	}
 242
 243	bpf_map_area_free(dtab);
 
 
 
 244}
 245
 246static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 247{
 248	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 249	u32 index = key ? *(u32 *)key : U32_MAX;
 250	u32 *next = next_key;
 251
 252	if (index >= dtab->map.max_entries) {
 253		*next = 0;
 254		return 0;
 255	}
 256
 257	if (index == dtab->map.max_entries - 1)
 258		return -ENOENT;
 259	*next = index + 1;
 260	return 0;
 261}
 262
 263/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
 264 * by local_bh_disable() (from XDP calls inside NAPI). The
 265 * rcu_read_lock_bh_held() below makes lockdep accept both.
 266 */
 267static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
 
 
 268{
 269	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 270	struct hlist_head *head = dev_map_index_hash(dtab, key);
 271	struct bpf_dtab_netdev *dev;
 272
 273	hlist_for_each_entry_rcu(dev, head, index_hlist,
 274				 lockdep_is_held(&dtab->index_lock))
 275		if (dev->idx == key)
 276			return dev;
 277
 278	return NULL;
 279}
 280
 281static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
 282				    void *next_key)
 283{
 284	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 285	u32 idx, *next = next_key;
 286	struct bpf_dtab_netdev *dev, *next_dev;
 287	struct hlist_head *head;
 288	int i = 0;
 289
 290	if (!key)
 291		goto find_first;
 292
 293	idx = *(u32 *)key;
 294
 295	dev = __dev_map_hash_lookup_elem(map, idx);
 296	if (!dev)
 297		goto find_first;
 298
 299	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
 300				    struct bpf_dtab_netdev, index_hlist);
 301
 302	if (next_dev) {
 303		*next = next_dev->idx;
 304		return 0;
 305	}
 306
 307	i = idx & (dtab->n_buckets - 1);
 308	i++;
 309
 310 find_first:
 311	for (; i < dtab->n_buckets; i++) {
 312		head = dev_map_index_hash(dtab, i);
 313
 314		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
 315					    struct bpf_dtab_netdev,
 316					    index_hlist);
 317		if (next_dev) {
 318			*next = next_dev->idx;
 319			return 0;
 320		}
 321	}
 322
 323	return -ENOENT;
 324}
 325
 326static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
 327				struct xdp_frame **frames, int n,
 328				struct net_device *dev)
 329{
 330	struct xdp_txq_info txq = { .dev = dev };
 331	struct xdp_buff xdp;
 332	int i, nframes = 0;
 333
 334	for (i = 0; i < n; i++) {
 335		struct xdp_frame *xdpf = frames[i];
 336		u32 act;
 337		int err;
 338
 339		xdp_convert_frame_to_buff(xdpf, &xdp);
 340		xdp.txq = &txq;
 341
 342		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 343		switch (act) {
 344		case XDP_PASS:
 345			err = xdp_update_frame_from_buff(&xdp, xdpf);
 346			if (unlikely(err < 0))
 347				xdp_return_frame_rx_napi(xdpf);
 348			else
 349				frames[nframes++] = xdpf;
 350			break;
 351		default:
 352			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
 353			fallthrough;
 354		case XDP_ABORTED:
 355			trace_xdp_exception(dev, xdp_prog, act);
 356			fallthrough;
 357		case XDP_DROP:
 358			xdp_return_frame_rx_napi(xdpf);
 359			break;
 360		}
 361	}
 362	return nframes; /* sent frames count */
 363}
 364
 365static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
 366{
 367	struct net_device *dev = bq->dev;
 368	unsigned int cnt = bq->count;
 369	int sent = 0, err = 0;
 370	int to_send = cnt;
 371	int i;
 372
 373	if (unlikely(!cnt))
 374		return;
 375
 376	for (i = 0; i < cnt; i++) {
 377		struct xdp_frame *xdpf = bq->q[i];
 378
 379		prefetch(xdpf);
 380	}
 381
 382	if (bq->xdp_prog) {
 383		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
 384		if (!to_send)
 385			goto out;
 386	}
 387
 388	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
 389	if (sent < 0) {
 390		/* If ndo_xdp_xmit fails with an errno, no frames have
 391		 * been xmit'ed.
 392		 */
 393		err = sent;
 394		sent = 0;
 
 395	}
 
 
 
 396
 397	/* If not all frames have been transmitted, it is our
 398	 * responsibility to free them
 
 
 
 
 
 
 399	 */
 400	for (i = sent; unlikely(i < to_send); i++)
 401		xdp_return_frame_rx_napi(bq->q[i]);
 402
 403out:
 404	bq->count = 0;
 405	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
 
 
 
 
 
 406}
 407
 408/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
 409 * driver before returning from its napi->poll() routine. See the comment above
 410 * xdp_do_flush() in filter.c.
 
 
 
 411 */
 412void __dev_flush(void)
 413{
 414	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
 415	struct xdp_dev_bulk_queue *bq, *tmp;
 
 416
 417	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
 418		bq_xmit_all(bq, XDP_XMIT_FLUSH);
 419		bq->dev_rx = NULL;
 420		bq->xdp_prog = NULL;
 421		__list_del_clearprev(&bq->flush_node);
 422	}
 423}
 424
 425/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
 426 * by local_bh_disable() (from XDP calls inside NAPI). The
 427 * rcu_read_lock_bh_held() below makes lockdep accept both.
 428 */
 429static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
 430{
 431	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 432	struct bpf_dtab_netdev *obj;
 433
 434	if (key >= map->max_entries)
 435		return NULL;
 436
 437	obj = rcu_dereference_check(dtab->netdev_map[key],
 438				    rcu_read_lock_bh_held());
 439	return obj;
 440}
 441
 442/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
 443 * variable access, and map elements stick around. See comment above
 444 * xdp_do_flush() in filter.c.
 445 */
 446static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
 447		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
 
 448{
 449	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
 450	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
 451
 452	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
 453		bq_xmit_all(bq, 0);
 454
 455	/* Ingress dev_rx will be the same for all xdp_frame's in
 456	 * bulk_queue, because bq stored per-CPU and must be flushed
 457	 * from net_device drivers NAPI func end.
 458	 *
 459	 * Do the same with xdp_prog and flush_list since these fields
 460	 * are only ever modified together.
 461	 */
 462	if (!bq->dev_rx) {
 463		bq->dev_rx = dev_rx;
 464		bq->xdp_prog = xdp_prog;
 465		list_add(&bq->flush_node, flush_list);
 466	}
 467
 468	bq->q[bq->count++] = xdpf;
 469}
 470
 471static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
 472				struct net_device *dev_rx,
 473				struct bpf_prog *xdp_prog)
 474{
 475	int err;
 476
 477	if (!dev->netdev_ops->ndo_xdp_xmit)
 478		return -EOPNOTSUPP;
 479
 480	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
 481	if (unlikely(err))
 482		return err;
 483
 484	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
 485	return 0;
 486}
 487
 488static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
 489{
 490	struct xdp_txq_info txq = { .dev = dst->dev };
 491	struct xdp_buff xdp;
 492	u32 act;
 493
 494	if (!dst->xdp_prog)
 495		return XDP_PASS;
 496
 497	__skb_pull(skb, skb->mac_len);
 498	xdp.txq = &txq;
 499
 500	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
 501	switch (act) {
 502	case XDP_PASS:
 503		__skb_push(skb, skb->mac_len);
 504		break;
 505	default:
 506		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
 507		fallthrough;
 508	case XDP_ABORTED:
 509		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
 510		fallthrough;
 511	case XDP_DROP:
 512		kfree_skb(skb);
 513		break;
 514	}
 515
 516	return act;
 517}
 518
 519int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
 520		    struct net_device *dev_rx)
 521{
 522	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
 523}
 524
 525int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
 526		    struct net_device *dev_rx)
 527{
 528	struct net_device *dev = dst->dev;
 529
 530	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
 531}
 532
 533static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
 534{
 535	if (!obj ||
 536	    !obj->dev->netdev_ops->ndo_xdp_xmit)
 537		return false;
 538
 539	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
 540		return false;
 541
 542	return true;
 543}
 544
 545static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
 546				 struct net_device *dev_rx,
 547				 struct xdp_frame *xdpf)
 548{
 549	struct xdp_frame *nxdpf;
 550
 551	nxdpf = xdpf_clone(xdpf);
 552	if (!nxdpf)
 553		return -ENOMEM;
 554
 555	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
 556
 557	return 0;
 558}
 559
 560static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
 561{
 562	while (num_excluded--) {
 563		if (ifindex == excluded[num_excluded])
 564			return true;
 565	}
 566	return false;
 567}
 568
 569/* Get ifindex of each upper device. 'indexes' must be able to hold at
 570 * least MAX_NEST_DEV elements.
 571 * Returns the number of ifindexes added.
 572 */
 573static int get_upper_ifindexes(struct net_device *dev, int *indexes)
 574{
 575	struct net_device *upper;
 576	struct list_head *iter;
 577	int n = 0;
 578
 579	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
 580		indexes[n++] = upper->ifindex;
 581	}
 582	return n;
 583}
 584
 585int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
 586			  struct bpf_map *map, bool exclude_ingress)
 587{
 588	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 589	struct bpf_dtab_netdev *dst, *last_dst = NULL;
 590	int excluded_devices[1+MAX_NEST_DEV];
 591	struct hlist_head *head;
 592	int num_excluded = 0;
 593	unsigned int i;
 594	int err;
 595
 596	if (exclude_ingress) {
 597		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
 598		excluded_devices[num_excluded++] = dev_rx->ifindex;
 599	}
 600
 601	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
 602		for (i = 0; i < map->max_entries; i++) {
 603			dst = rcu_dereference_check(dtab->netdev_map[i],
 604						    rcu_read_lock_bh_held());
 605			if (!is_valid_dst(dst, xdpf))
 606				continue;
 607
 608			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
 609				continue;
 610
 611			/* we only need n-1 clones; last_dst enqueued below */
 612			if (!last_dst) {
 613				last_dst = dst;
 614				continue;
 615			}
 616
 617			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
 618			if (err)
 619				return err;
 620
 621			last_dst = dst;
 622		}
 623	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
 624		for (i = 0; i < dtab->n_buckets; i++) {
 625			head = dev_map_index_hash(dtab, i);
 626			hlist_for_each_entry_rcu(dst, head, index_hlist,
 627						 lockdep_is_held(&dtab->index_lock)) {
 628				if (!is_valid_dst(dst, xdpf))
 629					continue;
 630
 631				if (is_ifindex_excluded(excluded_devices, num_excluded,
 632							dst->dev->ifindex))
 633					continue;
 634
 635				/* we only need n-1 clones; last_dst enqueued below */
 636				if (!last_dst) {
 637					last_dst = dst;
 638					continue;
 639				}
 640
 641				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
 642				if (err)
 643					return err;
 644
 645				last_dst = dst;
 646			}
 647		}
 648	}
 649
 650	/* consume the last copy of the frame */
 651	if (last_dst)
 652		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
 653	else
 654		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
 655
 656	return 0;
 657}
 658
 659int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
 660			     struct bpf_prog *xdp_prog)
 661{
 662	int err;
 663
 664	err = xdp_ok_fwd_dev(dst->dev, skb->len);
 665	if (unlikely(err))
 666		return err;
 667
 668	/* Redirect has already succeeded semantically at this point, so we just
 669	 * return 0 even if packet is dropped. Helper below takes care of
 670	 * freeing skb.
 671	 */
 672	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
 673		return 0;
 674
 675	skb->dev = dst->dev;
 676	generic_xdp_tx(skb, xdp_prog);
 677
 678	return 0;
 679}
 680
 681static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
 682				  struct sk_buff *skb,
 683				  struct bpf_prog *xdp_prog)
 684{
 685	struct sk_buff *nskb;
 686	int err;
 687
 688	nskb = skb_clone(skb, GFP_ATOMIC);
 689	if (!nskb)
 690		return -ENOMEM;
 691
 692	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
 693	if (unlikely(err)) {
 694		consume_skb(nskb);
 695		return err;
 696	}
 697
 698	return 0;
 699}
 700
 701int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
 702			   struct bpf_prog *xdp_prog, struct bpf_map *map,
 703			   bool exclude_ingress)
 704{
 705	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 706	struct bpf_dtab_netdev *dst, *last_dst = NULL;
 707	int excluded_devices[1+MAX_NEST_DEV];
 708	struct hlist_head *head;
 709	struct hlist_node *next;
 710	int num_excluded = 0;
 711	unsigned int i;
 712	int err;
 713
 714	if (exclude_ingress) {
 715		num_excluded = get_upper_ifindexes(dev, excluded_devices);
 716		excluded_devices[num_excluded++] = dev->ifindex;
 717	}
 718
 719	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
 720		for (i = 0; i < map->max_entries; i++) {
 721			dst = rcu_dereference_check(dtab->netdev_map[i],
 722						    rcu_read_lock_bh_held());
 723			if (!dst)
 724				continue;
 725
 726			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
 727				continue;
 728
 729			/* we only need n-1 clones; last_dst enqueued below */
 730			if (!last_dst) {
 731				last_dst = dst;
 732				continue;
 733			}
 734
 735			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
 736			if (err)
 737				return err;
 738
 739			last_dst = dst;
 740
 741		}
 742	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
 743		for (i = 0; i < dtab->n_buckets; i++) {
 744			head = dev_map_index_hash(dtab, i);
 745			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
 746				if (!dst)
 747					continue;
 748
 749				if (is_ifindex_excluded(excluded_devices, num_excluded,
 750							dst->dev->ifindex))
 751					continue;
 752
 753				/* we only need n-1 clones; last_dst enqueued below */
 754				if (!last_dst) {
 755					last_dst = dst;
 756					continue;
 757				}
 758
 759				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
 760				if (err)
 761					return err;
 762
 763				last_dst = dst;
 764			}
 765		}
 766	}
 767
 768	/* consume the first skb and return */
 769	if (last_dst)
 770		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
 771
 772	/* dtab is empty */
 773	consume_skb(skb);
 774	return 0;
 775}
 776
 777static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 778{
 779	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
 
 780
 781	return obj ? &obj->val : NULL;
 782}
 783
 784static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
 785{
 786	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
 787								*(u32 *)key);
 788	return obj ? &obj->val : NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 789}
 790
 791static void __dev_map_entry_free(struct rcu_head *rcu)
 792{
 793	struct bpf_dtab_netdev *dev;
 794
 795	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
 796	if (dev->xdp_prog)
 797		bpf_prog_put(dev->xdp_prog);
 798	dev_put(dev->dev);
 799	kfree(dev);
 800}
 801
 802static int dev_map_delete_elem(struct bpf_map *map, void *key)
 803{
 804	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 805	struct bpf_dtab_netdev *old_dev;
 806	int k = *(u32 *)key;
 807
 808	if (k >= map->max_entries)
 809		return -EINVAL;
 810
 811	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
 
 
 
 
 
 
 
 
 812	if (old_dev)
 813		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 814	return 0;
 815}
 816
 817static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
 818{
 819	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 820	struct bpf_dtab_netdev *old_dev;
 821	int k = *(u32 *)key;
 822	unsigned long flags;
 823	int ret = -ENOENT;
 824
 825	spin_lock_irqsave(&dtab->index_lock, flags);
 826
 827	old_dev = __dev_map_hash_lookup_elem(map, k);
 828	if (old_dev) {
 829		dtab->items--;
 830		hlist_del_init_rcu(&old_dev->index_hlist);
 831		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 832		ret = 0;
 833	}
 834	spin_unlock_irqrestore(&dtab->index_lock, flags);
 835
 836	return ret;
 837}
 838
 839static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
 840						    struct bpf_dtab *dtab,
 841						    struct bpf_devmap_val *val,
 842						    unsigned int idx)
 843{
 844	struct bpf_prog *prog = NULL;
 845	struct bpf_dtab_netdev *dev;
 
 
 846
 847	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
 848				   GFP_NOWAIT | __GFP_NOWARN,
 849				   dtab->map.numa_node);
 850	if (!dev)
 851		return ERR_PTR(-ENOMEM);
 852
 853	dev->dev = dev_get_by_index(net, val->ifindex);
 854	if (!dev->dev)
 855		goto err_out;
 856
 857	if (val->bpf_prog.fd > 0) {
 858		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
 859					     BPF_PROG_TYPE_XDP, false);
 860		if (IS_ERR(prog))
 861			goto err_put_dev;
 862		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
 863		    !bpf_prog_map_compatible(&dtab->map, prog))
 864			goto err_put_prog;
 
 
 
 
 
 865	}
 866
 867	dev->idx = idx;
 868	dev->dtab = dtab;
 869	if (prog) {
 870		dev->xdp_prog = prog;
 871		dev->val.bpf_prog.id = prog->aux->id;
 872	} else {
 873		dev->xdp_prog = NULL;
 874		dev->val.bpf_prog.id = 0;
 875	}
 876	dev->val.ifindex = val->ifindex;
 877
 878	return dev;
 879err_put_prog:
 880	bpf_prog_put(prog);
 881err_put_dev:
 882	dev_put(dev->dev);
 883err_out:
 884	kfree(dev);
 885	return ERR_PTR(-EINVAL);
 886}
 887
 888static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
 889				 void *key, void *value, u64 map_flags)
 890{
 891	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 892	struct bpf_dtab_netdev *dev, *old_dev;
 893	struct bpf_devmap_val val = {};
 894	u32 i = *(u32 *)key;
 895
 896	if (unlikely(map_flags > BPF_EXIST))
 897		return -EINVAL;
 898	if (unlikely(i >= dtab->map.max_entries))
 899		return -E2BIG;
 900	if (unlikely(map_flags == BPF_NOEXIST))
 901		return -EEXIST;
 902
 903	/* already verified value_size <= sizeof val */
 904	memcpy(&val, value, map->value_size);
 905
 906	if (!val.ifindex) {
 907		dev = NULL;
 908		/* can not specify fd if ifindex is 0 */
 909		if (val.bpf_prog.fd > 0)
 910			return -EINVAL;
 911	} else {
 912		dev = __dev_map_alloc_node(net, dtab, &val, i);
 913		if (IS_ERR(dev))
 914			return PTR_ERR(dev);
 915	}
 916
 917	/* Use call_rcu() here to ensure rcu critical sections have completed
 918	 * Remembering the driver side flush operation will happen before the
 919	 * net device is removed.
 920	 */
 921	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
 922	if (old_dev)
 923		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 924
 925	return 0;
 926}
 927
 928static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
 929			       u64 map_flags)
 930{
 931	return __dev_map_update_elem(current->nsproxy->net_ns,
 932				     map, key, value, map_flags);
 933}
 934
 935static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
 936				     void *key, void *value, u64 map_flags)
 937{
 938	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 939	struct bpf_dtab_netdev *dev, *old_dev;
 940	struct bpf_devmap_val val = {};
 941	u32 idx = *(u32 *)key;
 942	unsigned long flags;
 943	int err = -EEXIST;
 944
 945	/* already verified value_size <= sizeof val */
 946	memcpy(&val, value, map->value_size);
 947
 948	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
 949		return -EINVAL;
 950
 951	spin_lock_irqsave(&dtab->index_lock, flags);
 952
 953	old_dev = __dev_map_hash_lookup_elem(map, idx);
 954	if (old_dev && (map_flags & BPF_NOEXIST))
 955		goto out_err;
 956
 957	dev = __dev_map_alloc_node(net, dtab, &val, idx);
 958	if (IS_ERR(dev)) {
 959		err = PTR_ERR(dev);
 960		goto out_err;
 961	}
 962
 963	if (old_dev) {
 964		hlist_del_rcu(&old_dev->index_hlist);
 965	} else {
 966		if (dtab->items >= dtab->map.max_entries) {
 967			spin_unlock_irqrestore(&dtab->index_lock, flags);
 968			call_rcu(&dev->rcu, __dev_map_entry_free);
 969			return -E2BIG;
 970		}
 971		dtab->items++;
 972	}
 973
 974	hlist_add_head_rcu(&dev->index_hlist,
 975			   dev_map_index_hash(dtab, idx));
 976	spin_unlock_irqrestore(&dtab->index_lock, flags);
 977
 978	if (old_dev)
 979		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 980
 981	return 0;
 982
 983out_err:
 984	spin_unlock_irqrestore(&dtab->index_lock, flags);
 985	return err;
 986}
 987
 988static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
 989				   u64 map_flags)
 990{
 991	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
 992					 map, key, value, map_flags);
 993}
 994
 995static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
 996{
 997	return __bpf_xdp_redirect_map(map, ifindex, flags,
 998				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
 999				      __dev_map_lookup_elem);
1000}
1001
1002static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1003{
1004	return __bpf_xdp_redirect_map(map, ifindex, flags,
1005				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1006				      __dev_map_hash_lookup_elem);
1007}
1008
1009BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1010const struct bpf_map_ops dev_map_ops = {
1011	.map_meta_equal = bpf_map_meta_equal,
1012	.map_alloc = dev_map_alloc,
1013	.map_free = dev_map_free,
1014	.map_get_next_key = dev_map_get_next_key,
1015	.map_lookup_elem = dev_map_lookup_elem,
1016	.map_update_elem = dev_map_update_elem,
1017	.map_delete_elem = dev_map_delete_elem,
1018	.map_check_btf = map_check_no_btf,
1019	.map_btf_id = &dev_map_btf_ids[0],
1020	.map_redirect = dev_map_redirect,
1021};
1022
1023const struct bpf_map_ops dev_map_hash_ops = {
1024	.map_meta_equal = bpf_map_meta_equal,
1025	.map_alloc = dev_map_alloc,
1026	.map_free = dev_map_free,
1027	.map_get_next_key = dev_map_hash_get_next_key,
1028	.map_lookup_elem = dev_map_hash_lookup_elem,
1029	.map_update_elem = dev_map_hash_update_elem,
1030	.map_delete_elem = dev_map_hash_delete_elem,
1031	.map_check_btf = map_check_no_btf,
1032	.map_btf_id = &dev_map_btf_ids[0],
1033	.map_redirect = dev_hash_map_redirect,
1034};
1035
1036static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1037				       struct net_device *netdev)
1038{
1039	unsigned long flags;
1040	u32 i;
1041
1042	spin_lock_irqsave(&dtab->index_lock, flags);
1043	for (i = 0; i < dtab->n_buckets; i++) {
1044		struct bpf_dtab_netdev *dev;
1045		struct hlist_head *head;
1046		struct hlist_node *next;
1047
1048		head = dev_map_index_hash(dtab, i);
1049
1050		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1051			if (netdev != dev->dev)
1052				continue;
1053
1054			dtab->items--;
1055			hlist_del_rcu(&dev->index_hlist);
1056			call_rcu(&dev->rcu, __dev_map_entry_free);
1057		}
1058	}
1059	spin_unlock_irqrestore(&dtab->index_lock, flags);
1060}
1061
1062static int dev_map_notification(struct notifier_block *notifier,
1063				ulong event, void *ptr)
1064{
1065	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1066	struct bpf_dtab *dtab;
1067	int i, cpu;
1068
1069	switch (event) {
1070	case NETDEV_REGISTER:
1071		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1072			break;
1073
1074		/* will be freed in free_netdev() */
1075		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1076		if (!netdev->xdp_bulkq)
1077			return NOTIFY_BAD;
1078
1079		for_each_possible_cpu(cpu)
1080			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1081		break;
1082	case NETDEV_UNREGISTER:
1083		/* This rcu_read_lock/unlock pair is needed because
1084		 * dev_map_list is an RCU list AND to ensure a delete
1085		 * operation does not free a netdev_map entry while we
1086		 * are comparing it against the netdev being unregistered.
1087		 */
1088		rcu_read_lock();
1089		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1090			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1091				dev_map_hash_remove_netdev(dtab, netdev);
1092				continue;
1093			}
1094
1095			for (i = 0; i < dtab->map.max_entries; i++) {
1096				struct bpf_dtab_netdev *dev, *odev;
1097
1098				dev = rcu_dereference(dtab->netdev_map[i]);
1099				if (!dev || netdev != dev->dev)
1100					continue;
1101				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1102				if (dev == odev)
1103					call_rcu(&dev->rcu,
1104						 __dev_map_entry_free);
1105			}
1106		}
1107		rcu_read_unlock();
1108		break;
1109	default:
1110		break;
1111	}
1112	return NOTIFY_OK;
1113}
1114
1115static struct notifier_block dev_map_notifier = {
1116	.notifier_call = dev_map_notification,
1117};
1118
1119static int __init dev_map_init(void)
1120{
1121	int cpu;
1122
1123	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1124	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1125		     offsetof(struct _bpf_dtab_netdev, dev));
1126	register_netdevice_notifier(&dev_map_notifier);
1127
1128	for_each_possible_cpu(cpu)
1129		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1130	return 0;
1131}
1132
1133subsys_initcall(dev_map_init);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  3 */
  4
  5/* Devmaps primary use is as a backend map for XDP BPF helper call
  6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
  7 * spent some effort to ensure the datapath with redirect maps does not use
  8 * any locking. This is a quick note on the details.
  9 *
 10 * We have three possible paths to get into the devmap control plane bpf
 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
 12 * will invoke an update, delete, or lookup operation. To ensure updates and
 13 * deletes appear atomic from the datapath side xchg() is used to modify the
 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
 16 * an rcu grace period before free'ing the old data structures. This ensures the
 17 * datapath always has a valid copy. However, the datapath does a "flush"
 18 * operation that pushes any pending packets in the driver outside the RCU
 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
 21 * this list is empty, indicating outstanding flush operations have completed.
 22 *
 23 * BPF syscalls may race with BPF program calls on any of the update, delete
 24 * or lookup operations. As noted above the xchg() operation also keep the
 25 * netdev_map consistent in this case. From the devmap side BPF programs
 26 * calling into these operations are the same as multiple user space threads
 27 * making system calls.
 28 *
 29 * Finally, any of the above may race with a netdev_unregister notifier. The
 30 * unregister notifier must search for net devices in the map structure that
 31 * contain a reference to the net device and remove them. This is a two step
 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
 33 * check to see if the ifindex is the same as the net_device being removed.
 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
 35 * removed, in the case of a concurrent update or delete operation it is
 36 * possible that the initially referenced dev is no longer in the map. As the
 37 * notifier hook walks the map we know that new dev references can not be
 38 * added by the user because core infrastructure ensures dev_get_by_index()
 39 * calls will fail at this point.
 40 *
 41 * The devmap_hash type is a map type which interprets keys as ifindexes and
 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
 43 * densely packed instead of having holes in the lookup array for unused
 44 * ifindexes. The setup and packet enqueue/send code is shared between the two
 45 * types of devmap; only the lookup and insertion is different.
 46 */
 47#include <linux/bpf.h>
 48#include <net/xdp.h>
 49#include <linux/filter.h>
 50#include <trace/events/xdp.h>
 
 51
 52#define DEV_CREATE_FLAG_MASK \
 53	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 54
 55#define DEV_MAP_BULK_SIZE 16
 56struct bpf_dtab_netdev;
 57
 58struct xdp_bulk_queue {
 59	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
 60	struct list_head flush_node;
 
 61	struct net_device *dev_rx;
 62	struct bpf_dtab_netdev *obj;
 63	unsigned int count;
 64};
 65
 66struct bpf_dtab_netdev {
 67	struct net_device *dev; /* must be first member, due to tracepoint */
 68	struct hlist_node index_hlist;
 69	struct bpf_dtab *dtab;
 70	struct xdp_bulk_queue __percpu *bulkq;
 71	struct rcu_head rcu;
 72	unsigned int idx; /* keep track of map index for tracepoint */
 
 73};
 74
 75struct bpf_dtab {
 76	struct bpf_map map;
 77	struct bpf_dtab_netdev **netdev_map;
 78	struct list_head __percpu *flush_list;
 79	struct list_head list;
 80
 81	/* these are only used for DEVMAP_HASH type maps */
 82	struct hlist_head *dev_index_head;
 83	spinlock_t index_lock;
 84	unsigned int items;
 85	u32 n_buckets;
 86};
 87
 
 88static DEFINE_SPINLOCK(dev_map_lock);
 89static LIST_HEAD(dev_map_list);
 90
 91static struct hlist_head *dev_map_create_hash(unsigned int entries)
 
 92{
 93	int i;
 94	struct hlist_head *hash;
 95
 96	hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
 97	if (hash != NULL)
 98		for (i = 0; i < entries; i++)
 99			INIT_HLIST_HEAD(&hash[i]);
100
101	return hash;
102}
103
 
 
 
 
 
 
104static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
105{
106	int err, cpu;
107	u64 cost;
108
109	/* check sanity of attributes */
 
 
 
110	if (attr->max_entries == 0 || attr->key_size != 4 ||
111	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
 
 
112		return -EINVAL;
113
114	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
115	 * verifier prevents writes from the BPF side
116	 */
117	attr->map_flags |= BPF_F_RDONLY_PROG;
118
119
120	bpf_map_init_from_attr(&dtab->map, attr);
121
122	/* make sure page count doesn't overflow */
123	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
124	cost += sizeof(struct list_head) * num_possible_cpus();
125
126	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
127		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
128
129		if (!dtab->n_buckets) /* Overflow check */
130			return -EINVAL;
131		cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
132	}
133
134	/* if map size is larger than memlock limit, reject it */
135	err = bpf_map_charge_init(&dtab->map.memory, cost);
136	if (err)
137		return -EINVAL;
138
139	dtab->flush_list = alloc_percpu(struct list_head);
140	if (!dtab->flush_list)
141		goto free_charge;
142
143	for_each_possible_cpu(cpu)
144		INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
145
146	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
147					      sizeof(struct bpf_dtab_netdev *),
148					      dtab->map.numa_node);
149	if (!dtab->netdev_map)
150		goto free_percpu;
151
152	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
153		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
 
154		if (!dtab->dev_index_head)
155			goto free_map_area;
156
157		spin_lock_init(&dtab->index_lock);
 
 
 
 
 
 
158	}
159
160	return 0;
161
162free_map_area:
163	bpf_map_area_free(dtab->netdev_map);
164free_percpu:
165	free_percpu(dtab->flush_list);
166free_charge:
167	bpf_map_charge_finish(&dtab->map.memory);
168	return -ENOMEM;
169}
170
171static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
172{
173	struct bpf_dtab *dtab;
174	int err;
175
176	if (!capable(CAP_NET_ADMIN))
177		return ERR_PTR(-EPERM);
178
179	dtab = kzalloc(sizeof(*dtab), GFP_USER);
180	if (!dtab)
181		return ERR_PTR(-ENOMEM);
182
183	err = dev_map_init_map(dtab, attr);
184	if (err) {
185		kfree(dtab);
186		return ERR_PTR(err);
187	}
188
189	spin_lock(&dev_map_lock);
190	list_add_tail_rcu(&dtab->list, &dev_map_list);
191	spin_unlock(&dev_map_lock);
192
193	return &dtab->map;
194}
195
196static void dev_map_free(struct bpf_map *map)
197{
198	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
199	int i, cpu;
200
201	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
202	 * so the programs (can be more than one that used this map) were
203	 * disconnected from events. Wait for outstanding critical sections in
204	 * these programs to complete. The rcu critical section only guarantees
205	 * no further reads against netdev_map. It does __not__ ensure pending
206	 * flush operations (if any) are complete.
 
 
207	 */
208
209	spin_lock(&dev_map_lock);
210	list_del_rcu(&dtab->list);
211	spin_unlock(&dev_map_lock);
212
213	bpf_clear_redirect_map(map);
214	synchronize_rcu();
215
216	/* Make sure prior __dev_map_entry_free() have completed. */
217	rcu_barrier();
218
219	/* To ensure all pending flush operations have completed wait for flush
220	 * list to empty on _all_ cpus.
221	 * Because the above synchronize_rcu() ensures the map is disconnected
222	 * from the program we can assume no new items will be added.
223	 */
224	for_each_online_cpu(cpu) {
225		struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
227		while (!list_empty(flush_list))
228			cond_resched();
229	}
230
231	for (i = 0; i < dtab->map.max_entries; i++) {
232		struct bpf_dtab_netdev *dev;
 
 
 
233
234		dev = dtab->netdev_map[i];
235		if (!dev)
236			continue;
237
238		free_percpu(dev->bulkq);
239		dev_put(dev->dev);
240		kfree(dev);
241	}
242
243	free_percpu(dtab->flush_list);
244	bpf_map_area_free(dtab->netdev_map);
245	kfree(dtab->dev_index_head);
246	kfree(dtab);
247}
248
249static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
250{
251	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
252	u32 index = key ? *(u32 *)key : U32_MAX;
253	u32 *next = next_key;
254
255	if (index >= dtab->map.max_entries) {
256		*next = 0;
257		return 0;
258	}
259
260	if (index == dtab->map.max_entries - 1)
261		return -ENOENT;
262	*next = index + 1;
263	return 0;
264}
265
266static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
267						    int idx)
268{
269	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
270}
271
272struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
273{
274	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
275	struct hlist_head *head = dev_map_index_hash(dtab, key);
276	struct bpf_dtab_netdev *dev;
277
278	hlist_for_each_entry_rcu(dev, head, index_hlist)
 
279		if (dev->idx == key)
280			return dev;
281
282	return NULL;
283}
284
285static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
286				    void *next_key)
287{
288	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
289	u32 idx, *next = next_key;
290	struct bpf_dtab_netdev *dev, *next_dev;
291	struct hlist_head *head;
292	int i = 0;
293
294	if (!key)
295		goto find_first;
296
297	idx = *(u32 *)key;
298
299	dev = __dev_map_hash_lookup_elem(map, idx);
300	if (!dev)
301		goto find_first;
302
303	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
304				    struct bpf_dtab_netdev, index_hlist);
305
306	if (next_dev) {
307		*next = next_dev->idx;
308		return 0;
309	}
310
311	i = idx & (dtab->n_buckets - 1);
312	i++;
313
314 find_first:
315	for (; i < dtab->n_buckets; i++) {
316		head = dev_map_index_hash(dtab, i);
317
318		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
319					    struct bpf_dtab_netdev,
320					    index_hlist);
321		if (next_dev) {
322			*next = next_dev->idx;
323			return 0;
324		}
325	}
326
327	return -ENOENT;
328}
329
330static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
331		       bool in_napi_ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332{
333	struct bpf_dtab_netdev *obj = bq->obj;
334	struct net_device *dev = obj->dev;
335	int sent = 0, drops = 0, err = 0;
 
336	int i;
337
338	if (unlikely(!bq->count))
339		return 0;
340
341	for (i = 0; i < bq->count; i++) {
342		struct xdp_frame *xdpf = bq->q[i];
343
344		prefetch(xdpf);
345	}
346
347	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
 
 
 
 
 
 
348	if (sent < 0) {
 
 
 
349		err = sent;
350		sent = 0;
351		goto error;
352	}
353	drops = bq->count - sent;
354out:
355	bq->count = 0;
356
357	trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx,
358			      sent, drops, bq->dev_rx, dev, err);
359	bq->dev_rx = NULL;
360	__list_del_clearprev(&bq->flush_node);
361	return 0;
362error:
363	/* If ndo_xdp_xmit fails with an errno, no frames have been
364	 * xmit'ed and it's our responsibility to them free all.
365	 */
366	for (i = 0; i < bq->count; i++) {
367		struct xdp_frame *xdpf = bq->q[i];
368
369		/* RX path under NAPI protection, can return frames faster */
370		if (likely(in_napi_ctx))
371			xdp_return_frame_rx_napi(xdpf);
372		else
373			xdp_return_frame(xdpf);
374		drops++;
375	}
376	goto out;
377}
378
379/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
380 * from the driver before returning from its napi->poll() routine. The poll()
381 * routine is called either from busy_poll context or net_rx_action signaled
382 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
383 * net device can be torn down. On devmap tear down we ensure the flush list
384 * is empty before completing to ensure all flush operations have completed.
385 */
386void __dev_map_flush(struct bpf_map *map)
387{
388	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
389	struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
390	struct xdp_bulk_queue *bq, *tmp;
391
392	rcu_read_lock();
393	list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
394		bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
395	rcu_read_unlock();
 
 
396}
397
398/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
399 * update happens in parallel here a dev_put wont happen until after reading the
400 * ifindex.
401 */
402struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
403{
404	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
405	struct bpf_dtab_netdev *obj;
406
407	if (key >= map->max_entries)
408		return NULL;
409
410	obj = READ_ONCE(dtab->netdev_map[key]);
 
411	return obj;
412}
413
414/* Runs under RCU-read-side, plus in softirq under NAPI protection.
415 * Thus, safe percpu variable access.
 
416 */
417static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
418		      struct net_device *dev_rx)
419
420{
421	struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
422	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
423
424	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
425		bq_xmit_all(bq, 0, true);
426
427	/* Ingress dev_rx will be the same for all xdp_frame's in
428	 * bulk_queue, because bq stored per-CPU and must be flushed
429	 * from net_device drivers NAPI func end.
 
 
 
430	 */
431	if (!bq->dev_rx)
432		bq->dev_rx = dev_rx;
 
 
 
433
434	bq->q[bq->count++] = xdpf;
 
 
 
 
 
 
 
 
 
 
435
436	if (!bq->flush_node.prev)
437		list_add(&bq->flush_node, flush_list);
 
438
 
439	return 0;
440}
441
442int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443		    struct net_device *dev_rx)
444{
445	struct net_device *dev = dst->dev;
446	struct xdp_frame *xdpf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447	int err;
448
449	if (!dev->netdev_ops->ndo_xdp_xmit)
450		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
452	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
453	if (unlikely(err))
454		return err;
 
455
456	xdpf = convert_to_xdp_frame(xdp);
457	if (unlikely(!xdpf))
458		return -EOVERFLOW;
 
 
459
460	return bq_enqueue(dst, xdpf, dev_rx);
461}
462
463int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
464			     struct bpf_prog *xdp_prog)
465{
466	int err;
467
468	err = xdp_ok_fwd_dev(dst->dev, skb->len);
469	if (unlikely(err))
470		return err;
 
 
 
 
 
 
 
 
471	skb->dev = dst->dev;
472	generic_xdp_tx(skb, xdp_prog);
473
474	return 0;
475}
476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
478{
479	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
480	struct net_device *dev = obj ? obj->dev : NULL;
481
482	return dev ? &dev->ifindex : NULL;
483}
484
485static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
486{
487	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
488								*(u32 *)key);
489	struct net_device *dev = obj ? obj->dev : NULL;
490
491	return dev ? &dev->ifindex : NULL;
492}
493
494static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
495{
496	if (dev->dev->netdev_ops->ndo_xdp_xmit) {
497		struct xdp_bulk_queue *bq;
498		int cpu;
499
500		rcu_read_lock();
501		for_each_online_cpu(cpu) {
502			bq = per_cpu_ptr(dev->bulkq, cpu);
503			bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
504		}
505		rcu_read_unlock();
506	}
507}
508
509static void __dev_map_entry_free(struct rcu_head *rcu)
510{
511	struct bpf_dtab_netdev *dev;
512
513	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
514	dev_map_flush_old(dev);
515	free_percpu(dev->bulkq);
516	dev_put(dev->dev);
517	kfree(dev);
518}
519
520static int dev_map_delete_elem(struct bpf_map *map, void *key)
521{
522	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
523	struct bpf_dtab_netdev *old_dev;
524	int k = *(u32 *)key;
525
526	if (k >= map->max_entries)
527		return -EINVAL;
528
529	/* Use call_rcu() here to ensure any rcu critical sections have
530	 * completed, but this does not guarantee a flush has happened
531	 * yet. Because driver side rcu_read_lock/unlock only protects the
532	 * running XDP program. However, for pending flush operations the
533	 * dev and ctx are stored in another per cpu map. And additionally,
534	 * the driver tear down ensures all soft irqs are complete before
535	 * removing the net device in the case of dev_put equals zero.
536	 */
537	old_dev = xchg(&dtab->netdev_map[k], NULL);
538	if (old_dev)
539		call_rcu(&old_dev->rcu, __dev_map_entry_free);
540	return 0;
541}
542
543static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
544{
545	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
546	struct bpf_dtab_netdev *old_dev;
547	int k = *(u32 *)key;
548	unsigned long flags;
549	int ret = -ENOENT;
550
551	spin_lock_irqsave(&dtab->index_lock, flags);
552
553	old_dev = __dev_map_hash_lookup_elem(map, k);
554	if (old_dev) {
555		dtab->items--;
556		hlist_del_init_rcu(&old_dev->index_hlist);
557		call_rcu(&old_dev->rcu, __dev_map_entry_free);
558		ret = 0;
559	}
560	spin_unlock_irqrestore(&dtab->index_lock, flags);
561
562	return ret;
563}
564
565static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
566						    struct bpf_dtab *dtab,
567						    u32 ifindex,
568						    unsigned int idx)
569{
570	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
571	struct bpf_dtab_netdev *dev;
572	struct xdp_bulk_queue *bq;
573	int cpu;
574
575	dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
 
 
576	if (!dev)
577		return ERR_PTR(-ENOMEM);
578
579	dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
580					sizeof(void *), gfp);
581	if (!dev->bulkq) {
582		kfree(dev);
583		return ERR_PTR(-ENOMEM);
584	}
585
586	for_each_possible_cpu(cpu) {
587		bq = per_cpu_ptr(dev->bulkq, cpu);
588		bq->obj = dev;
589	}
590
591	dev->dev = dev_get_by_index(net, ifindex);
592	if (!dev->dev) {
593		free_percpu(dev->bulkq);
594		kfree(dev);
595		return ERR_PTR(-EINVAL);
596	}
597
598	dev->idx = idx;
599	dev->dtab = dtab;
 
 
 
 
 
 
 
 
600
601	return dev;
 
 
 
 
 
 
 
602}
603
604static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
605				 void *key, void *value, u64 map_flags)
606{
607	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
608	struct bpf_dtab_netdev *dev, *old_dev;
609	u32 ifindex = *(u32 *)value;
610	u32 i = *(u32 *)key;
611
612	if (unlikely(map_flags > BPF_EXIST))
613		return -EINVAL;
614	if (unlikely(i >= dtab->map.max_entries))
615		return -E2BIG;
616	if (unlikely(map_flags == BPF_NOEXIST))
617		return -EEXIST;
618
619	if (!ifindex) {
 
 
 
620		dev = NULL;
 
 
 
621	} else {
622		dev = __dev_map_alloc_node(net, dtab, ifindex, i);
623		if (IS_ERR(dev))
624			return PTR_ERR(dev);
625	}
626
627	/* Use call_rcu() here to ensure rcu critical sections have completed
628	 * Remembering the driver side flush operation will happen before the
629	 * net device is removed.
630	 */
631	old_dev = xchg(&dtab->netdev_map[i], dev);
632	if (old_dev)
633		call_rcu(&old_dev->rcu, __dev_map_entry_free);
634
635	return 0;
636}
637
638static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
639			       u64 map_flags)
640{
641	return __dev_map_update_elem(current->nsproxy->net_ns,
642				     map, key, value, map_flags);
643}
644
645static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
646				     void *key, void *value, u64 map_flags)
647{
648	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
649	struct bpf_dtab_netdev *dev, *old_dev;
650	u32 ifindex = *(u32 *)value;
651	u32 idx = *(u32 *)key;
652	unsigned long flags;
653	int err = -EEXIST;
654
655	if (unlikely(map_flags > BPF_EXIST || !ifindex))
 
 
 
656		return -EINVAL;
657
658	spin_lock_irqsave(&dtab->index_lock, flags);
659
660	old_dev = __dev_map_hash_lookup_elem(map, idx);
661	if (old_dev && (map_flags & BPF_NOEXIST))
662		goto out_err;
663
664	dev = __dev_map_alloc_node(net, dtab, ifindex, idx);
665	if (IS_ERR(dev)) {
666		err = PTR_ERR(dev);
667		goto out_err;
668	}
669
670	if (old_dev) {
671		hlist_del_rcu(&old_dev->index_hlist);
672	} else {
673		if (dtab->items >= dtab->map.max_entries) {
674			spin_unlock_irqrestore(&dtab->index_lock, flags);
675			call_rcu(&dev->rcu, __dev_map_entry_free);
676			return -E2BIG;
677		}
678		dtab->items++;
679	}
680
681	hlist_add_head_rcu(&dev->index_hlist,
682			   dev_map_index_hash(dtab, idx));
683	spin_unlock_irqrestore(&dtab->index_lock, flags);
684
685	if (old_dev)
686		call_rcu(&old_dev->rcu, __dev_map_entry_free);
687
688	return 0;
689
690out_err:
691	spin_unlock_irqrestore(&dtab->index_lock, flags);
692	return err;
693}
694
695static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
696				   u64 map_flags)
697{
698	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
699					 map, key, value, map_flags);
700}
701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
702const struct bpf_map_ops dev_map_ops = {
 
703	.map_alloc = dev_map_alloc,
704	.map_free = dev_map_free,
705	.map_get_next_key = dev_map_get_next_key,
706	.map_lookup_elem = dev_map_lookup_elem,
707	.map_update_elem = dev_map_update_elem,
708	.map_delete_elem = dev_map_delete_elem,
709	.map_check_btf = map_check_no_btf,
 
 
710};
711
712const struct bpf_map_ops dev_map_hash_ops = {
 
713	.map_alloc = dev_map_alloc,
714	.map_free = dev_map_free,
715	.map_get_next_key = dev_map_hash_get_next_key,
716	.map_lookup_elem = dev_map_hash_lookup_elem,
717	.map_update_elem = dev_map_hash_update_elem,
718	.map_delete_elem = dev_map_hash_delete_elem,
719	.map_check_btf = map_check_no_btf,
 
 
720};
721
722static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
723				       struct net_device *netdev)
724{
725	unsigned long flags;
726	u32 i;
727
728	spin_lock_irqsave(&dtab->index_lock, flags);
729	for (i = 0; i < dtab->n_buckets; i++) {
730		struct bpf_dtab_netdev *dev;
731		struct hlist_head *head;
732		struct hlist_node *next;
733
734		head = dev_map_index_hash(dtab, i);
735
736		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
737			if (netdev != dev->dev)
738				continue;
739
740			dtab->items--;
741			hlist_del_rcu(&dev->index_hlist);
742			call_rcu(&dev->rcu, __dev_map_entry_free);
743		}
744	}
745	spin_unlock_irqrestore(&dtab->index_lock, flags);
746}
747
748static int dev_map_notification(struct notifier_block *notifier,
749				ulong event, void *ptr)
750{
751	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
752	struct bpf_dtab *dtab;
753	int i;
754
755	switch (event) {
 
 
 
 
 
 
 
 
 
 
 
 
756	case NETDEV_UNREGISTER:
757		/* This rcu_read_lock/unlock pair is needed because
758		 * dev_map_list is an RCU list AND to ensure a delete
759		 * operation does not free a netdev_map entry while we
760		 * are comparing it against the netdev being unregistered.
761		 */
762		rcu_read_lock();
763		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
764			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
765				dev_map_hash_remove_netdev(dtab, netdev);
766				continue;
767			}
768
769			for (i = 0; i < dtab->map.max_entries; i++) {
770				struct bpf_dtab_netdev *dev, *odev;
771
772				dev = READ_ONCE(dtab->netdev_map[i]);
773				if (!dev || netdev != dev->dev)
774					continue;
775				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
776				if (dev == odev)
777					call_rcu(&dev->rcu,
778						 __dev_map_entry_free);
779			}
780		}
781		rcu_read_unlock();
782		break;
783	default:
784		break;
785	}
786	return NOTIFY_OK;
787}
788
789static struct notifier_block dev_map_notifier = {
790	.notifier_call = dev_map_notification,
791};
792
793static int __init dev_map_init(void)
794{
 
 
795	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
796	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
797		     offsetof(struct _bpf_dtab_netdev, dev));
798	register_netdevice_notifier(&dev_map_notifier);
 
 
 
799	return 0;
800}
801
802subsys_initcall(dev_map_init);