Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
   3 */
   4
   5/* Devmaps primary use is as a backend map for XDP BPF helper call
   6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
   7 * spent some effort to ensure the datapath with redirect maps does not use
   8 * any locking. This is a quick note on the details.
   9 *
  10 * We have three possible paths to get into the devmap control plane bpf
  11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
  12 * will invoke an update, delete, or lookup operation. To ensure updates and
  13 * deletes appear atomic from the datapath side xchg() is used to modify the
  14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
  15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
  16 * an rcu grace period before free'ing the old data structures. This ensures the
  17 * datapath always has a valid copy. However, the datapath does a "flush"
  18 * operation that pushes any pending packets in the driver outside the RCU
  19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
  20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
  21 * this list is empty, indicating outstanding flush operations have completed.
  22 *
  23 * BPF syscalls may race with BPF program calls on any of the update, delete
  24 * or lookup operations. As noted above the xchg() operation also keep the
  25 * netdev_map consistent in this case. From the devmap side BPF programs
  26 * calling into these operations are the same as multiple user space threads
  27 * making system calls.
  28 *
  29 * Finally, any of the above may race with a netdev_unregister notifier. The
  30 * unregister notifier must search for net devices in the map structure that
  31 * contain a reference to the net device and remove them. This is a two step
  32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
  33 * check to see if the ifindex is the same as the net_device being removed.
  34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
  35 * removed, in the case of a concurrent update or delete operation it is
  36 * possible that the initially referenced dev is no longer in the map. As the
  37 * notifier hook walks the map we know that new dev references can not be
  38 * added by the user because core infrastructure ensures dev_get_by_index()
  39 * calls will fail at this point.
  40 *
  41 * The devmap_hash type is a map type which interprets keys as ifindexes and
  42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
  43 * densely packed instead of having holes in the lookup array for unused
  44 * ifindexes. The setup and packet enqueue/send code is shared between the two
  45 * types of devmap; only the lookup and insertion is different.
  46 */
  47#include <linux/bpf.h>
  48#include <net/xdp.h>
  49#include <linux/filter.h>
  50#include <trace/events/xdp.h>
  51#include <linux/btf_ids.h>
  52
  53#define DEV_CREATE_FLAG_MASK \
  54	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  55
  56struct xdp_dev_bulk_queue {
  57	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
  58	struct list_head flush_node;
  59	struct net_device *dev;
  60	struct net_device *dev_rx;
  61	struct bpf_prog *xdp_prog;
  62	unsigned int count;
  63};
  64
  65struct bpf_dtab_netdev {
  66	struct net_device *dev; /* must be first member, due to tracepoint */
  67	struct hlist_node index_hlist;
  68	struct bpf_dtab *dtab;
  69	struct bpf_prog *xdp_prog;
  70	struct rcu_head rcu;
  71	unsigned int idx;
  72	struct bpf_devmap_val val;
  73};
  74
  75struct bpf_dtab {
  76	struct bpf_map map;
  77	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
  78	struct list_head list;
  79
  80	/* these are only used for DEVMAP_HASH type maps */
  81	struct hlist_head *dev_index_head;
  82	spinlock_t index_lock;
  83	unsigned int items;
  84	u32 n_buckets;
  85};
  86
  87static DEFINE_PER_CPU(struct list_head, dev_flush_list);
  88static DEFINE_SPINLOCK(dev_map_lock);
  89static LIST_HEAD(dev_map_list);
  90
  91static struct hlist_head *dev_map_create_hash(unsigned int entries,
  92					      int numa_node)
  93{
  94	int i;
  95	struct hlist_head *hash;
  96
  97	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
  98	if (hash != NULL)
  99		for (i = 0; i < entries; i++)
 100			INIT_HLIST_HEAD(&hash[i]);
 101
 102	return hash;
 103}
 104
 105static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
 106						    int idx)
 107{
 108	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
 109}
 110
 111static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
 112{
 113	u32 valsize = attr->value_size;
 114
 115	/* check sanity of attributes. 2 value sizes supported:
 116	 * 4 bytes: ifindex
 117	 * 8 bytes: ifindex + prog fd
 118	 */
 119	if (attr->max_entries == 0 || attr->key_size != 4 ||
 120	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
 121	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
 122	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
 123		return -EINVAL;
 124
 125	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
 126	 * verifier prevents writes from the BPF side
 127	 */
 128	attr->map_flags |= BPF_F_RDONLY_PROG;
 129
 130
 131	bpf_map_init_from_attr(&dtab->map, attr);
 132
 133	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 134		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
 135
 136		if (!dtab->n_buckets) /* Overflow check */
 137			return -EINVAL;
 138	}
 139
 140	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 141		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
 142							   dtab->map.numa_node);
 143		if (!dtab->dev_index_head)
 144			return -ENOMEM;
 145
 146		spin_lock_init(&dtab->index_lock);
 147	} else {
 148		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
 149						      sizeof(struct bpf_dtab_netdev *),
 150						      dtab->map.numa_node);
 151		if (!dtab->netdev_map)
 152			return -ENOMEM;
 153	}
 154
 155	return 0;
 156}
 157
 158static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 159{
 160	struct bpf_dtab *dtab;
 161	int err;
 162
 163	if (!capable(CAP_NET_ADMIN))
 164		return ERR_PTR(-EPERM);
 165
 166	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
 167	if (!dtab)
 168		return ERR_PTR(-ENOMEM);
 169
 170	err = dev_map_init_map(dtab, attr);
 171	if (err) {
 172		bpf_map_area_free(dtab);
 173		return ERR_PTR(err);
 174	}
 175
 176	spin_lock(&dev_map_lock);
 177	list_add_tail_rcu(&dtab->list, &dev_map_list);
 178	spin_unlock(&dev_map_lock);
 179
 180	return &dtab->map;
 181}
 182
 183static void dev_map_free(struct bpf_map *map)
 184{
 185	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 186	int i;
 187
 188	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
 189	 * so the programs (can be more than one that used this map) were
 190	 * disconnected from events. The following synchronize_rcu() guarantees
 191	 * both rcu read critical sections complete and waits for
 192	 * preempt-disable regions (NAPI being the relevant context here) so we
 193	 * are certain there will be no further reads against the netdev_map and
 194	 * all flush operations are complete. Flush operations can only be done
 195	 * from NAPI context for this reason.
 196	 */
 197
 198	spin_lock(&dev_map_lock);
 199	list_del_rcu(&dtab->list);
 200	spin_unlock(&dev_map_lock);
 201
 202	bpf_clear_redirect_map(map);
 203	synchronize_rcu();
 204
 205	/* Make sure prior __dev_map_entry_free() have completed. */
 206	rcu_barrier();
 207
 208	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 209		for (i = 0; i < dtab->n_buckets; i++) {
 210			struct bpf_dtab_netdev *dev;
 211			struct hlist_head *head;
 212			struct hlist_node *next;
 213
 214			head = dev_map_index_hash(dtab, i);
 215
 216			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
 217				hlist_del_rcu(&dev->index_hlist);
 218				if (dev->xdp_prog)
 219					bpf_prog_put(dev->xdp_prog);
 220				dev_put(dev->dev);
 221				kfree(dev);
 222			}
 223		}
 224
 225		bpf_map_area_free(dtab->dev_index_head);
 226	} else {
 227		for (i = 0; i < dtab->map.max_entries; i++) {
 228			struct bpf_dtab_netdev *dev;
 229
 230			dev = rcu_dereference_raw(dtab->netdev_map[i]);
 231			if (!dev)
 232				continue;
 233
 234			if (dev->xdp_prog)
 235				bpf_prog_put(dev->xdp_prog);
 236			dev_put(dev->dev);
 237			kfree(dev);
 238		}
 239
 240		bpf_map_area_free(dtab->netdev_map);
 241	}
 242
 243	bpf_map_area_free(dtab);
 244}
 245
 246static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 247{
 248	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 249	u32 index = key ? *(u32 *)key : U32_MAX;
 250	u32 *next = next_key;
 251
 252	if (index >= dtab->map.max_entries) {
 253		*next = 0;
 254		return 0;
 255	}
 256
 257	if (index == dtab->map.max_entries - 1)
 258		return -ENOENT;
 259	*next = index + 1;
 260	return 0;
 261}
 262
 263/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
 264 * by local_bh_disable() (from XDP calls inside NAPI). The
 265 * rcu_read_lock_bh_held() below makes lockdep accept both.
 266 */
 267static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
 268{
 269	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 270	struct hlist_head *head = dev_map_index_hash(dtab, key);
 271	struct bpf_dtab_netdev *dev;
 272
 273	hlist_for_each_entry_rcu(dev, head, index_hlist,
 274				 lockdep_is_held(&dtab->index_lock))
 275		if (dev->idx == key)
 276			return dev;
 277
 278	return NULL;
 279}
 280
 281static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
 282				    void *next_key)
 283{
 284	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 285	u32 idx, *next = next_key;
 286	struct bpf_dtab_netdev *dev, *next_dev;
 287	struct hlist_head *head;
 288	int i = 0;
 289
 290	if (!key)
 291		goto find_first;
 292
 293	idx = *(u32 *)key;
 294
 295	dev = __dev_map_hash_lookup_elem(map, idx);
 296	if (!dev)
 297		goto find_first;
 298
 299	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
 300				    struct bpf_dtab_netdev, index_hlist);
 301
 302	if (next_dev) {
 303		*next = next_dev->idx;
 304		return 0;
 305	}
 306
 307	i = idx & (dtab->n_buckets - 1);
 308	i++;
 309
 310 find_first:
 311	for (; i < dtab->n_buckets; i++) {
 312		head = dev_map_index_hash(dtab, i);
 313
 314		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
 315					    struct bpf_dtab_netdev,
 316					    index_hlist);
 317		if (next_dev) {
 318			*next = next_dev->idx;
 319			return 0;
 320		}
 321	}
 322
 323	return -ENOENT;
 324}
 325
 
 
 
 
 
 
 
 
 
 
 326static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
 327				struct xdp_frame **frames, int n,
 328				struct net_device *dev)
 329{
 330	struct xdp_txq_info txq = { .dev = dev };
 331	struct xdp_buff xdp;
 332	int i, nframes = 0;
 333
 334	for (i = 0; i < n; i++) {
 335		struct xdp_frame *xdpf = frames[i];
 336		u32 act;
 337		int err;
 338
 339		xdp_convert_frame_to_buff(xdpf, &xdp);
 340		xdp.txq = &txq;
 341
 342		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 343		switch (act) {
 344		case XDP_PASS:
 345			err = xdp_update_frame_from_buff(&xdp, xdpf);
 346			if (unlikely(err < 0))
 347				xdp_return_frame_rx_napi(xdpf);
 348			else
 349				frames[nframes++] = xdpf;
 350			break;
 351		default:
 352			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
 353			fallthrough;
 354		case XDP_ABORTED:
 355			trace_xdp_exception(dev, xdp_prog, act);
 356			fallthrough;
 357		case XDP_DROP:
 358			xdp_return_frame_rx_napi(xdpf);
 359			break;
 360		}
 361	}
 362	return nframes; /* sent frames count */
 363}
 364
 365static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
 366{
 367	struct net_device *dev = bq->dev;
 368	unsigned int cnt = bq->count;
 369	int sent = 0, err = 0;
 370	int to_send = cnt;
 371	int i;
 372
 373	if (unlikely(!cnt))
 374		return;
 375
 376	for (i = 0; i < cnt; i++) {
 377		struct xdp_frame *xdpf = bq->q[i];
 378
 379		prefetch(xdpf);
 380	}
 381
 382	if (bq->xdp_prog) {
 383		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
 384		if (!to_send)
 385			goto out;
 386	}
 387
 388	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
 389	if (sent < 0) {
 390		/* If ndo_xdp_xmit fails with an errno, no frames have
 391		 * been xmit'ed.
 392		 */
 393		err = sent;
 394		sent = 0;
 395	}
 396
 397	/* If not all frames have been transmitted, it is our
 398	 * responsibility to free them
 399	 */
 400	for (i = sent; unlikely(i < to_send); i++)
 401		xdp_return_frame_rx_napi(bq->q[i]);
 402
 403out:
 404	bq->count = 0;
 405	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
 406}
 407
 408/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
 409 * driver before returning from its napi->poll() routine. See the comment above
 410 * xdp_do_flush() in filter.c.
 411 */
 412void __dev_flush(void)
 413{
 414	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
 415	struct xdp_dev_bulk_queue *bq, *tmp;
 416
 417	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
 418		bq_xmit_all(bq, XDP_XMIT_FLUSH);
 419		bq->dev_rx = NULL;
 420		bq->xdp_prog = NULL;
 421		__list_del_clearprev(&bq->flush_node);
 422	}
 423}
 424
 425/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
 426 * by local_bh_disable() (from XDP calls inside NAPI). The
 427 * rcu_read_lock_bh_held() below makes lockdep accept both.
 428 */
 429static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
 430{
 431	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 432	struct bpf_dtab_netdev *obj;
 433
 434	if (key >= map->max_entries)
 435		return NULL;
 436
 437	obj = rcu_dereference_check(dtab->netdev_map[key],
 438				    rcu_read_lock_bh_held());
 439	return obj;
 440}
 441
 442/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
 443 * variable access, and map elements stick around. See comment above
 444 * xdp_do_flush() in filter.c.
 445 */
 446static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
 447		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
 448{
 449	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
 450	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
 451
 452	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
 453		bq_xmit_all(bq, 0);
 454
 455	/* Ingress dev_rx will be the same for all xdp_frame's in
 456	 * bulk_queue, because bq stored per-CPU and must be flushed
 457	 * from net_device drivers NAPI func end.
 458	 *
 459	 * Do the same with xdp_prog and flush_list since these fields
 460	 * are only ever modified together.
 461	 */
 462	if (!bq->dev_rx) {
 463		bq->dev_rx = dev_rx;
 464		bq->xdp_prog = xdp_prog;
 465		list_add(&bq->flush_node, flush_list);
 466	}
 467
 468	bq->q[bq->count++] = xdpf;
 469}
 470
 471static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
 472				struct net_device *dev_rx,
 473				struct bpf_prog *xdp_prog)
 474{
 
 475	int err;
 476
 477	if (!dev->netdev_ops->ndo_xdp_xmit)
 478		return -EOPNOTSUPP;
 479
 480	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
 481	if (unlikely(err))
 482		return err;
 483
 
 
 
 
 484	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
 485	return 0;
 486}
 487
 488static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
 489{
 490	struct xdp_txq_info txq = { .dev = dst->dev };
 491	struct xdp_buff xdp;
 492	u32 act;
 493
 494	if (!dst->xdp_prog)
 495		return XDP_PASS;
 496
 497	__skb_pull(skb, skb->mac_len);
 498	xdp.txq = &txq;
 499
 500	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
 501	switch (act) {
 502	case XDP_PASS:
 503		__skb_push(skb, skb->mac_len);
 504		break;
 505	default:
 506		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
 507		fallthrough;
 508	case XDP_ABORTED:
 509		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
 510		fallthrough;
 511	case XDP_DROP:
 512		kfree_skb(skb);
 513		break;
 514	}
 515
 516	return act;
 517}
 518
 519int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
 520		    struct net_device *dev_rx)
 521{
 522	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
 523}
 524
 525int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
 526		    struct net_device *dev_rx)
 527{
 528	struct net_device *dev = dst->dev;
 529
 530	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
 531}
 532
 533static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
 
 534{
 535	if (!obj ||
 536	    !obj->dev->netdev_ops->ndo_xdp_xmit)
 537		return false;
 538
 539	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
 540		return false;
 541
 542	return true;
 543}
 544
 545static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
 546				 struct net_device *dev_rx,
 547				 struct xdp_frame *xdpf)
 548{
 549	struct xdp_frame *nxdpf;
 550
 551	nxdpf = xdpf_clone(xdpf);
 552	if (!nxdpf)
 553		return -ENOMEM;
 554
 555	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
 556
 557	return 0;
 558}
 559
 560static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
 561{
 562	while (num_excluded--) {
 563		if (ifindex == excluded[num_excluded])
 564			return true;
 565	}
 566	return false;
 567}
 568
 569/* Get ifindex of each upper device. 'indexes' must be able to hold at
 570 * least MAX_NEST_DEV elements.
 571 * Returns the number of ifindexes added.
 572 */
 573static int get_upper_ifindexes(struct net_device *dev, int *indexes)
 574{
 575	struct net_device *upper;
 576	struct list_head *iter;
 577	int n = 0;
 578
 579	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
 580		indexes[n++] = upper->ifindex;
 581	}
 582	return n;
 583}
 584
 585int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
 586			  struct bpf_map *map, bool exclude_ingress)
 587{
 588	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 
 589	struct bpf_dtab_netdev *dst, *last_dst = NULL;
 590	int excluded_devices[1+MAX_NEST_DEV];
 591	struct hlist_head *head;
 592	int num_excluded = 0;
 593	unsigned int i;
 594	int err;
 595
 596	if (exclude_ingress) {
 597		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
 598		excluded_devices[num_excluded++] = dev_rx->ifindex;
 599	}
 600
 601	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
 602		for (i = 0; i < map->max_entries; i++) {
 603			dst = rcu_dereference_check(dtab->netdev_map[i],
 604						    rcu_read_lock_bh_held());
 605			if (!is_valid_dst(dst, xdpf))
 606				continue;
 607
 608			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
 609				continue;
 610
 611			/* we only need n-1 clones; last_dst enqueued below */
 612			if (!last_dst) {
 613				last_dst = dst;
 614				continue;
 615			}
 616
 617			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
 618			if (err)
 619				return err;
 620
 621			last_dst = dst;
 622		}
 623	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
 624		for (i = 0; i < dtab->n_buckets; i++) {
 625			head = dev_map_index_hash(dtab, i);
 626			hlist_for_each_entry_rcu(dst, head, index_hlist,
 627						 lockdep_is_held(&dtab->index_lock)) {
 628				if (!is_valid_dst(dst, xdpf))
 629					continue;
 630
 631				if (is_ifindex_excluded(excluded_devices, num_excluded,
 632							dst->dev->ifindex))
 633					continue;
 634
 635				/* we only need n-1 clones; last_dst enqueued below */
 636				if (!last_dst) {
 637					last_dst = dst;
 638					continue;
 639				}
 640
 641				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
 642				if (err)
 643					return err;
 644
 645				last_dst = dst;
 646			}
 647		}
 648	}
 649
 650	/* consume the last copy of the frame */
 651	if (last_dst)
 652		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
 653	else
 654		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
 655
 656	return 0;
 657}
 658
 659int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
 660			     struct bpf_prog *xdp_prog)
 661{
 662	int err;
 663
 664	err = xdp_ok_fwd_dev(dst->dev, skb->len);
 665	if (unlikely(err))
 666		return err;
 667
 668	/* Redirect has already succeeded semantically at this point, so we just
 669	 * return 0 even if packet is dropped. Helper below takes care of
 670	 * freeing skb.
 671	 */
 672	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
 673		return 0;
 674
 675	skb->dev = dst->dev;
 676	generic_xdp_tx(skb, xdp_prog);
 677
 678	return 0;
 679}
 680
 681static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
 682				  struct sk_buff *skb,
 683				  struct bpf_prog *xdp_prog)
 684{
 685	struct sk_buff *nskb;
 686	int err;
 687
 688	nskb = skb_clone(skb, GFP_ATOMIC);
 689	if (!nskb)
 690		return -ENOMEM;
 691
 692	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
 693	if (unlikely(err)) {
 694		consume_skb(nskb);
 695		return err;
 696	}
 697
 698	return 0;
 699}
 700
 701int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
 702			   struct bpf_prog *xdp_prog, struct bpf_map *map,
 703			   bool exclude_ingress)
 704{
 705	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 
 706	struct bpf_dtab_netdev *dst, *last_dst = NULL;
 707	int excluded_devices[1+MAX_NEST_DEV];
 708	struct hlist_head *head;
 709	struct hlist_node *next;
 710	int num_excluded = 0;
 711	unsigned int i;
 712	int err;
 713
 714	if (exclude_ingress) {
 715		num_excluded = get_upper_ifindexes(dev, excluded_devices);
 716		excluded_devices[num_excluded++] = dev->ifindex;
 717	}
 718
 719	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
 720		for (i = 0; i < map->max_entries; i++) {
 721			dst = rcu_dereference_check(dtab->netdev_map[i],
 722						    rcu_read_lock_bh_held());
 723			if (!dst)
 724				continue;
 725
 726			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
 727				continue;
 728
 729			/* we only need n-1 clones; last_dst enqueued below */
 730			if (!last_dst) {
 731				last_dst = dst;
 732				continue;
 733			}
 734
 735			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
 736			if (err)
 737				return err;
 738
 739			last_dst = dst;
 740
 741		}
 742	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
 743		for (i = 0; i < dtab->n_buckets; i++) {
 744			head = dev_map_index_hash(dtab, i);
 745			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
 746				if (!dst)
 747					continue;
 748
 749				if (is_ifindex_excluded(excluded_devices, num_excluded,
 750							dst->dev->ifindex))
 751					continue;
 752
 753				/* we only need n-1 clones; last_dst enqueued below */
 754				if (!last_dst) {
 755					last_dst = dst;
 756					continue;
 757				}
 758
 759				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
 760				if (err)
 761					return err;
 762
 763				last_dst = dst;
 764			}
 765		}
 766	}
 767
 768	/* consume the first skb and return */
 769	if (last_dst)
 770		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
 771
 772	/* dtab is empty */
 773	consume_skb(skb);
 774	return 0;
 775}
 776
 777static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 778{
 779	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
 780
 781	return obj ? &obj->val : NULL;
 782}
 783
 784static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
 785{
 786	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
 787								*(u32 *)key);
 788	return obj ? &obj->val : NULL;
 789}
 790
 791static void __dev_map_entry_free(struct rcu_head *rcu)
 792{
 793	struct bpf_dtab_netdev *dev;
 794
 795	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
 796	if (dev->xdp_prog)
 797		bpf_prog_put(dev->xdp_prog);
 798	dev_put(dev->dev);
 799	kfree(dev);
 800}
 801
 802static int dev_map_delete_elem(struct bpf_map *map, void *key)
 803{
 804	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 805	struct bpf_dtab_netdev *old_dev;
 806	int k = *(u32 *)key;
 807
 808	if (k >= map->max_entries)
 809		return -EINVAL;
 810
 811	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
 812	if (old_dev)
 813		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 814	return 0;
 815}
 816
 817static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
 818{
 819	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 820	struct bpf_dtab_netdev *old_dev;
 821	int k = *(u32 *)key;
 822	unsigned long flags;
 823	int ret = -ENOENT;
 824
 825	spin_lock_irqsave(&dtab->index_lock, flags);
 826
 827	old_dev = __dev_map_hash_lookup_elem(map, k);
 828	if (old_dev) {
 829		dtab->items--;
 830		hlist_del_init_rcu(&old_dev->index_hlist);
 831		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 832		ret = 0;
 833	}
 834	spin_unlock_irqrestore(&dtab->index_lock, flags);
 835
 836	return ret;
 837}
 838
 839static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
 840						    struct bpf_dtab *dtab,
 841						    struct bpf_devmap_val *val,
 842						    unsigned int idx)
 843{
 844	struct bpf_prog *prog = NULL;
 845	struct bpf_dtab_netdev *dev;
 846
 847	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
 848				   GFP_NOWAIT | __GFP_NOWARN,
 849				   dtab->map.numa_node);
 850	if (!dev)
 851		return ERR_PTR(-ENOMEM);
 852
 853	dev->dev = dev_get_by_index(net, val->ifindex);
 854	if (!dev->dev)
 855		goto err_out;
 856
 857	if (val->bpf_prog.fd > 0) {
 858		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
 859					     BPF_PROG_TYPE_XDP, false);
 860		if (IS_ERR(prog))
 861			goto err_put_dev;
 862		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
 863		    !bpf_prog_map_compatible(&dtab->map, prog))
 864			goto err_put_prog;
 865	}
 866
 867	dev->idx = idx;
 868	dev->dtab = dtab;
 869	if (prog) {
 870		dev->xdp_prog = prog;
 871		dev->val.bpf_prog.id = prog->aux->id;
 872	} else {
 873		dev->xdp_prog = NULL;
 874		dev->val.bpf_prog.id = 0;
 875	}
 876	dev->val.ifindex = val->ifindex;
 877
 878	return dev;
 879err_put_prog:
 880	bpf_prog_put(prog);
 881err_put_dev:
 882	dev_put(dev->dev);
 883err_out:
 884	kfree(dev);
 885	return ERR_PTR(-EINVAL);
 886}
 887
 888static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
 889				 void *key, void *value, u64 map_flags)
 890{
 891	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 892	struct bpf_dtab_netdev *dev, *old_dev;
 893	struct bpf_devmap_val val = {};
 894	u32 i = *(u32 *)key;
 895
 896	if (unlikely(map_flags > BPF_EXIST))
 897		return -EINVAL;
 898	if (unlikely(i >= dtab->map.max_entries))
 899		return -E2BIG;
 900	if (unlikely(map_flags == BPF_NOEXIST))
 901		return -EEXIST;
 902
 903	/* already verified value_size <= sizeof val */
 904	memcpy(&val, value, map->value_size);
 905
 906	if (!val.ifindex) {
 907		dev = NULL;
 908		/* can not specify fd if ifindex is 0 */
 909		if (val.bpf_prog.fd > 0)
 910			return -EINVAL;
 911	} else {
 912		dev = __dev_map_alloc_node(net, dtab, &val, i);
 913		if (IS_ERR(dev))
 914			return PTR_ERR(dev);
 915	}
 916
 917	/* Use call_rcu() here to ensure rcu critical sections have completed
 918	 * Remembering the driver side flush operation will happen before the
 919	 * net device is removed.
 920	 */
 921	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
 922	if (old_dev)
 923		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 924
 925	return 0;
 926}
 927
 928static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
 929			       u64 map_flags)
 930{
 931	return __dev_map_update_elem(current->nsproxy->net_ns,
 932				     map, key, value, map_flags);
 933}
 934
 935static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
 936				     void *key, void *value, u64 map_flags)
 937{
 938	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 939	struct bpf_dtab_netdev *dev, *old_dev;
 940	struct bpf_devmap_val val = {};
 941	u32 idx = *(u32 *)key;
 942	unsigned long flags;
 943	int err = -EEXIST;
 944
 945	/* already verified value_size <= sizeof val */
 946	memcpy(&val, value, map->value_size);
 947
 948	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
 949		return -EINVAL;
 950
 951	spin_lock_irqsave(&dtab->index_lock, flags);
 952
 953	old_dev = __dev_map_hash_lookup_elem(map, idx);
 954	if (old_dev && (map_flags & BPF_NOEXIST))
 955		goto out_err;
 956
 957	dev = __dev_map_alloc_node(net, dtab, &val, idx);
 958	if (IS_ERR(dev)) {
 959		err = PTR_ERR(dev);
 960		goto out_err;
 961	}
 962
 963	if (old_dev) {
 964		hlist_del_rcu(&old_dev->index_hlist);
 965	} else {
 966		if (dtab->items >= dtab->map.max_entries) {
 967			spin_unlock_irqrestore(&dtab->index_lock, flags);
 968			call_rcu(&dev->rcu, __dev_map_entry_free);
 969			return -E2BIG;
 970		}
 971		dtab->items++;
 972	}
 973
 974	hlist_add_head_rcu(&dev->index_hlist,
 975			   dev_map_index_hash(dtab, idx));
 976	spin_unlock_irqrestore(&dtab->index_lock, flags);
 977
 978	if (old_dev)
 979		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 980
 981	return 0;
 982
 983out_err:
 984	spin_unlock_irqrestore(&dtab->index_lock, flags);
 985	return err;
 986}
 987
 988static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
 989				   u64 map_flags)
 990{
 991	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
 992					 map, key, value, map_flags);
 993}
 994
 995static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
 996{
 997	return __bpf_xdp_redirect_map(map, ifindex, flags,
 998				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
 999				      __dev_map_lookup_elem);
1000}
1001
1002static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1003{
1004	return __bpf_xdp_redirect_map(map, ifindex, flags,
1005				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1006				      __dev_map_hash_lookup_elem);
1007}
1008
1009BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1010const struct bpf_map_ops dev_map_ops = {
1011	.map_meta_equal = bpf_map_meta_equal,
1012	.map_alloc = dev_map_alloc,
1013	.map_free = dev_map_free,
1014	.map_get_next_key = dev_map_get_next_key,
1015	.map_lookup_elem = dev_map_lookup_elem,
1016	.map_update_elem = dev_map_update_elem,
1017	.map_delete_elem = dev_map_delete_elem,
1018	.map_check_btf = map_check_no_btf,
1019	.map_btf_id = &dev_map_btf_ids[0],
 
1020	.map_redirect = dev_map_redirect,
1021};
1022
 
1023const struct bpf_map_ops dev_map_hash_ops = {
1024	.map_meta_equal = bpf_map_meta_equal,
1025	.map_alloc = dev_map_alloc,
1026	.map_free = dev_map_free,
1027	.map_get_next_key = dev_map_hash_get_next_key,
1028	.map_lookup_elem = dev_map_hash_lookup_elem,
1029	.map_update_elem = dev_map_hash_update_elem,
1030	.map_delete_elem = dev_map_hash_delete_elem,
1031	.map_check_btf = map_check_no_btf,
1032	.map_btf_id = &dev_map_btf_ids[0],
 
1033	.map_redirect = dev_hash_map_redirect,
1034};
1035
1036static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1037				       struct net_device *netdev)
1038{
1039	unsigned long flags;
1040	u32 i;
1041
1042	spin_lock_irqsave(&dtab->index_lock, flags);
1043	for (i = 0; i < dtab->n_buckets; i++) {
1044		struct bpf_dtab_netdev *dev;
1045		struct hlist_head *head;
1046		struct hlist_node *next;
1047
1048		head = dev_map_index_hash(dtab, i);
1049
1050		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1051			if (netdev != dev->dev)
1052				continue;
1053
1054			dtab->items--;
1055			hlist_del_rcu(&dev->index_hlist);
1056			call_rcu(&dev->rcu, __dev_map_entry_free);
1057		}
1058	}
1059	spin_unlock_irqrestore(&dtab->index_lock, flags);
1060}
1061
1062static int dev_map_notification(struct notifier_block *notifier,
1063				ulong event, void *ptr)
1064{
1065	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1066	struct bpf_dtab *dtab;
1067	int i, cpu;
1068
1069	switch (event) {
1070	case NETDEV_REGISTER:
1071		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1072			break;
1073
1074		/* will be freed in free_netdev() */
1075		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1076		if (!netdev->xdp_bulkq)
1077			return NOTIFY_BAD;
1078
1079		for_each_possible_cpu(cpu)
1080			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1081		break;
1082	case NETDEV_UNREGISTER:
1083		/* This rcu_read_lock/unlock pair is needed because
1084		 * dev_map_list is an RCU list AND to ensure a delete
1085		 * operation does not free a netdev_map entry while we
1086		 * are comparing it against the netdev being unregistered.
1087		 */
1088		rcu_read_lock();
1089		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1090			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1091				dev_map_hash_remove_netdev(dtab, netdev);
1092				continue;
1093			}
1094
1095			for (i = 0; i < dtab->map.max_entries; i++) {
1096				struct bpf_dtab_netdev *dev, *odev;
1097
1098				dev = rcu_dereference(dtab->netdev_map[i]);
1099				if (!dev || netdev != dev->dev)
1100					continue;
1101				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1102				if (dev == odev)
1103					call_rcu(&dev->rcu,
1104						 __dev_map_entry_free);
1105			}
1106		}
1107		rcu_read_unlock();
1108		break;
1109	default:
1110		break;
1111	}
1112	return NOTIFY_OK;
1113}
1114
1115static struct notifier_block dev_map_notifier = {
1116	.notifier_call = dev_map_notification,
1117};
1118
1119static int __init dev_map_init(void)
1120{
1121	int cpu;
1122
1123	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1124	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1125		     offsetof(struct _bpf_dtab_netdev, dev));
1126	register_netdevice_notifier(&dev_map_notifier);
1127
1128	for_each_possible_cpu(cpu)
1129		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1130	return 0;
1131}
1132
1133subsys_initcall(dev_map_init);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
   3 */
   4
   5/* Devmaps primary use is as a backend map for XDP BPF helper call
   6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
   7 * spent some effort to ensure the datapath with redirect maps does not use
   8 * any locking. This is a quick note on the details.
   9 *
  10 * We have three possible paths to get into the devmap control plane bpf
  11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
  12 * will invoke an update, delete, or lookup operation. To ensure updates and
  13 * deletes appear atomic from the datapath side xchg() is used to modify the
  14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
  15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
  16 * an rcu grace period before free'ing the old data structures. This ensures the
  17 * datapath always has a valid copy. However, the datapath does a "flush"
  18 * operation that pushes any pending packets in the driver outside the RCU
  19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
  20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
  21 * this list is empty, indicating outstanding flush operations have completed.
  22 *
  23 * BPF syscalls may race with BPF program calls on any of the update, delete
  24 * or lookup operations. As noted above the xchg() operation also keep the
  25 * netdev_map consistent in this case. From the devmap side BPF programs
  26 * calling into these operations are the same as multiple user space threads
  27 * making system calls.
  28 *
  29 * Finally, any of the above may race with a netdev_unregister notifier. The
  30 * unregister notifier must search for net devices in the map structure that
  31 * contain a reference to the net device and remove them. This is a two step
  32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
  33 * check to see if the ifindex is the same as the net_device being removed.
  34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
  35 * removed, in the case of a concurrent update or delete operation it is
  36 * possible that the initially referenced dev is no longer in the map. As the
  37 * notifier hook walks the map we know that new dev references can not be
  38 * added by the user because core infrastructure ensures dev_get_by_index()
  39 * calls will fail at this point.
  40 *
  41 * The devmap_hash type is a map type which interprets keys as ifindexes and
  42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
  43 * densely packed instead of having holes in the lookup array for unused
  44 * ifindexes. The setup and packet enqueue/send code is shared between the two
  45 * types of devmap; only the lookup and insertion is different.
  46 */
  47#include <linux/bpf.h>
  48#include <net/xdp.h>
  49#include <linux/filter.h>
  50#include <trace/events/xdp.h>
 
  51
  52#define DEV_CREATE_FLAG_MASK \
  53	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  54
  55struct xdp_dev_bulk_queue {
  56	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
  57	struct list_head flush_node;
  58	struct net_device *dev;
  59	struct net_device *dev_rx;
  60	struct bpf_prog *xdp_prog;
  61	unsigned int count;
  62};
  63
  64struct bpf_dtab_netdev {
  65	struct net_device *dev; /* must be first member, due to tracepoint */
  66	struct hlist_node index_hlist;
  67	struct bpf_dtab *dtab;
  68	struct bpf_prog *xdp_prog;
  69	struct rcu_head rcu;
  70	unsigned int idx;
  71	struct bpf_devmap_val val;
  72};
  73
  74struct bpf_dtab {
  75	struct bpf_map map;
  76	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
  77	struct list_head list;
  78
  79	/* these are only used for DEVMAP_HASH type maps */
  80	struct hlist_head *dev_index_head;
  81	spinlock_t index_lock;
  82	unsigned int items;
  83	u32 n_buckets;
  84};
  85
  86static DEFINE_PER_CPU(struct list_head, dev_flush_list);
  87static DEFINE_SPINLOCK(dev_map_lock);
  88static LIST_HEAD(dev_map_list);
  89
  90static struct hlist_head *dev_map_create_hash(unsigned int entries,
  91					      int numa_node)
  92{
  93	int i;
  94	struct hlist_head *hash;
  95
  96	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
  97	if (hash != NULL)
  98		for (i = 0; i < entries; i++)
  99			INIT_HLIST_HEAD(&hash[i]);
 100
 101	return hash;
 102}
 103
 104static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
 105						    int idx)
 106{
 107	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
 108}
 109
 110static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
 111{
 112	u32 valsize = attr->value_size;
 113
 114	/* check sanity of attributes. 2 value sizes supported:
 115	 * 4 bytes: ifindex
 116	 * 8 bytes: ifindex + prog fd
 117	 */
 118	if (attr->max_entries == 0 || attr->key_size != 4 ||
 119	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
 120	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
 121	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
 122		return -EINVAL;
 123
 124	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
 125	 * verifier prevents writes from the BPF side
 126	 */
 127	attr->map_flags |= BPF_F_RDONLY_PROG;
 128
 129
 130	bpf_map_init_from_attr(&dtab->map, attr);
 131
 132	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 133		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
 134
 135		if (!dtab->n_buckets) /* Overflow check */
 136			return -EINVAL;
 137	}
 138
 139	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 140		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
 141							   dtab->map.numa_node);
 142		if (!dtab->dev_index_head)
 143			return -ENOMEM;
 144
 145		spin_lock_init(&dtab->index_lock);
 146	} else {
 147		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
 148						      sizeof(struct bpf_dtab_netdev *),
 149						      dtab->map.numa_node);
 150		if (!dtab->netdev_map)
 151			return -ENOMEM;
 152	}
 153
 154	return 0;
 155}
 156
 157static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 158{
 159	struct bpf_dtab *dtab;
 160	int err;
 161
 162	if (!capable(CAP_NET_ADMIN))
 163		return ERR_PTR(-EPERM);
 164
 165	dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
 166	if (!dtab)
 167		return ERR_PTR(-ENOMEM);
 168
 169	err = dev_map_init_map(dtab, attr);
 170	if (err) {
 171		kfree(dtab);
 172		return ERR_PTR(err);
 173	}
 174
 175	spin_lock(&dev_map_lock);
 176	list_add_tail_rcu(&dtab->list, &dev_map_list);
 177	spin_unlock(&dev_map_lock);
 178
 179	return &dtab->map;
 180}
 181
 182static void dev_map_free(struct bpf_map *map)
 183{
 184	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 185	int i;
 186
 187	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
 188	 * so the programs (can be more than one that used this map) were
 189	 * disconnected from events. The following synchronize_rcu() guarantees
 190	 * both rcu read critical sections complete and waits for
 191	 * preempt-disable regions (NAPI being the relevant context here) so we
 192	 * are certain there will be no further reads against the netdev_map and
 193	 * all flush operations are complete. Flush operations can only be done
 194	 * from NAPI context for this reason.
 195	 */
 196
 197	spin_lock(&dev_map_lock);
 198	list_del_rcu(&dtab->list);
 199	spin_unlock(&dev_map_lock);
 200
 201	bpf_clear_redirect_map(map);
 202	synchronize_rcu();
 203
 204	/* Make sure prior __dev_map_entry_free() have completed. */
 205	rcu_barrier();
 206
 207	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
 208		for (i = 0; i < dtab->n_buckets; i++) {
 209			struct bpf_dtab_netdev *dev;
 210			struct hlist_head *head;
 211			struct hlist_node *next;
 212
 213			head = dev_map_index_hash(dtab, i);
 214
 215			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
 216				hlist_del_rcu(&dev->index_hlist);
 217				if (dev->xdp_prog)
 218					bpf_prog_put(dev->xdp_prog);
 219				dev_put(dev->dev);
 220				kfree(dev);
 221			}
 222		}
 223
 224		bpf_map_area_free(dtab->dev_index_head);
 225	} else {
 226		for (i = 0; i < dtab->map.max_entries; i++) {
 227			struct bpf_dtab_netdev *dev;
 228
 229			dev = rcu_dereference_raw(dtab->netdev_map[i]);
 230			if (!dev)
 231				continue;
 232
 233			if (dev->xdp_prog)
 234				bpf_prog_put(dev->xdp_prog);
 235			dev_put(dev->dev);
 236			kfree(dev);
 237		}
 238
 239		bpf_map_area_free(dtab->netdev_map);
 240	}
 241
 242	kfree(dtab);
 243}
 244
 245static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 246{
 247	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 248	u32 index = key ? *(u32 *)key : U32_MAX;
 249	u32 *next = next_key;
 250
 251	if (index >= dtab->map.max_entries) {
 252		*next = 0;
 253		return 0;
 254	}
 255
 256	if (index == dtab->map.max_entries - 1)
 257		return -ENOENT;
 258	*next = index + 1;
 259	return 0;
 260}
 261
 262/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
 263 * by local_bh_disable() (from XDP calls inside NAPI). The
 264 * rcu_read_lock_bh_held() below makes lockdep accept both.
 265 */
 266static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
 267{
 268	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 269	struct hlist_head *head = dev_map_index_hash(dtab, key);
 270	struct bpf_dtab_netdev *dev;
 271
 272	hlist_for_each_entry_rcu(dev, head, index_hlist,
 273				 lockdep_is_held(&dtab->index_lock))
 274		if (dev->idx == key)
 275			return dev;
 276
 277	return NULL;
 278}
 279
 280static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
 281				    void *next_key)
 282{
 283	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 284	u32 idx, *next = next_key;
 285	struct bpf_dtab_netdev *dev, *next_dev;
 286	struct hlist_head *head;
 287	int i = 0;
 288
 289	if (!key)
 290		goto find_first;
 291
 292	idx = *(u32 *)key;
 293
 294	dev = __dev_map_hash_lookup_elem(map, idx);
 295	if (!dev)
 296		goto find_first;
 297
 298	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
 299				    struct bpf_dtab_netdev, index_hlist);
 300
 301	if (next_dev) {
 302		*next = next_dev->idx;
 303		return 0;
 304	}
 305
 306	i = idx & (dtab->n_buckets - 1);
 307	i++;
 308
 309 find_first:
 310	for (; i < dtab->n_buckets; i++) {
 311		head = dev_map_index_hash(dtab, i);
 312
 313		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
 314					    struct bpf_dtab_netdev,
 315					    index_hlist);
 316		if (next_dev) {
 317			*next = next_dev->idx;
 318			return 0;
 319		}
 320	}
 321
 322	return -ENOENT;
 323}
 324
 325bool dev_map_can_have_prog(struct bpf_map *map)
 326{
 327	if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
 328	     map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
 329	    map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
 330		return true;
 331
 332	return false;
 333}
 334
 335static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
 336				struct xdp_frame **frames, int n,
 337				struct net_device *dev)
 338{
 339	struct xdp_txq_info txq = { .dev = dev };
 340	struct xdp_buff xdp;
 341	int i, nframes = 0;
 342
 343	for (i = 0; i < n; i++) {
 344		struct xdp_frame *xdpf = frames[i];
 345		u32 act;
 346		int err;
 347
 348		xdp_convert_frame_to_buff(xdpf, &xdp);
 349		xdp.txq = &txq;
 350
 351		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 352		switch (act) {
 353		case XDP_PASS:
 354			err = xdp_update_frame_from_buff(&xdp, xdpf);
 355			if (unlikely(err < 0))
 356				xdp_return_frame_rx_napi(xdpf);
 357			else
 358				frames[nframes++] = xdpf;
 359			break;
 360		default:
 361			bpf_warn_invalid_xdp_action(act);
 362			fallthrough;
 363		case XDP_ABORTED:
 364			trace_xdp_exception(dev, xdp_prog, act);
 365			fallthrough;
 366		case XDP_DROP:
 367			xdp_return_frame_rx_napi(xdpf);
 368			break;
 369		}
 370	}
 371	return nframes; /* sent frames count */
 372}
 373
 374static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
 375{
 376	struct net_device *dev = bq->dev;
 377	unsigned int cnt = bq->count;
 378	int sent = 0, err = 0;
 379	int to_send = cnt;
 380	int i;
 381
 382	if (unlikely(!cnt))
 383		return;
 384
 385	for (i = 0; i < cnt; i++) {
 386		struct xdp_frame *xdpf = bq->q[i];
 387
 388		prefetch(xdpf);
 389	}
 390
 391	if (bq->xdp_prog) {
 392		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
 393		if (!to_send)
 394			goto out;
 395	}
 396
 397	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
 398	if (sent < 0) {
 399		/* If ndo_xdp_xmit fails with an errno, no frames have
 400		 * been xmit'ed.
 401		 */
 402		err = sent;
 403		sent = 0;
 404	}
 405
 406	/* If not all frames have been transmitted, it is our
 407	 * responsibility to free them
 408	 */
 409	for (i = sent; unlikely(i < to_send); i++)
 410		xdp_return_frame_rx_napi(bq->q[i]);
 411
 412out:
 413	bq->count = 0;
 414	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
 415}
 416
 417/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
 418 * driver before returning from its napi->poll() routine. See the comment above
 419 * xdp_do_flush() in filter.c.
 420 */
 421void __dev_flush(void)
 422{
 423	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
 424	struct xdp_dev_bulk_queue *bq, *tmp;
 425
 426	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
 427		bq_xmit_all(bq, XDP_XMIT_FLUSH);
 428		bq->dev_rx = NULL;
 429		bq->xdp_prog = NULL;
 430		__list_del_clearprev(&bq->flush_node);
 431	}
 432}
 433
 434/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
 435 * by local_bh_disable() (from XDP calls inside NAPI). The
 436 * rcu_read_lock_bh_held() below makes lockdep accept both.
 437 */
 438static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
 439{
 440	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 441	struct bpf_dtab_netdev *obj;
 442
 443	if (key >= map->max_entries)
 444		return NULL;
 445
 446	obj = rcu_dereference_check(dtab->netdev_map[key],
 447				    rcu_read_lock_bh_held());
 448	return obj;
 449}
 450
 451/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
 452 * variable access, and map elements stick around. See comment above
 453 * xdp_do_flush() in filter.c.
 454 */
 455static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
 456		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
 457{
 458	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
 459	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
 460
 461	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
 462		bq_xmit_all(bq, 0);
 463
 464	/* Ingress dev_rx will be the same for all xdp_frame's in
 465	 * bulk_queue, because bq stored per-CPU and must be flushed
 466	 * from net_device drivers NAPI func end.
 467	 *
 468	 * Do the same with xdp_prog and flush_list since these fields
 469	 * are only ever modified together.
 470	 */
 471	if (!bq->dev_rx) {
 472		bq->dev_rx = dev_rx;
 473		bq->xdp_prog = xdp_prog;
 474		list_add(&bq->flush_node, flush_list);
 475	}
 476
 477	bq->q[bq->count++] = xdpf;
 478}
 479
 480static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
 481				struct net_device *dev_rx,
 482				struct bpf_prog *xdp_prog)
 483{
 484	struct xdp_frame *xdpf;
 485	int err;
 486
 487	if (!dev->netdev_ops->ndo_xdp_xmit)
 488		return -EOPNOTSUPP;
 489
 490	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
 491	if (unlikely(err))
 492		return err;
 493
 494	xdpf = xdp_convert_buff_to_frame(xdp);
 495	if (unlikely(!xdpf))
 496		return -EOVERFLOW;
 497
 498	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
 499	return 0;
 500}
 501
 502int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503		    struct net_device *dev_rx)
 504{
 505	return __xdp_enqueue(dev, xdp, dev_rx, NULL);
 506}
 507
 508int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
 509		    struct net_device *dev_rx)
 510{
 511	struct net_device *dev = dst->dev;
 512
 513	return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
 514}
 515
 516static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp,
 517			 int exclude_ifindex)
 518{
 519	if (!obj || obj->dev->ifindex == exclude_ifindex ||
 520	    !obj->dev->netdev_ops->ndo_xdp_xmit)
 521		return false;
 522
 523	if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
 524		return false;
 525
 526	return true;
 527}
 528
 529static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
 530				 struct net_device *dev_rx,
 531				 struct xdp_frame *xdpf)
 532{
 533	struct xdp_frame *nxdpf;
 534
 535	nxdpf = xdpf_clone(xdpf);
 536	if (!nxdpf)
 537		return -ENOMEM;
 538
 539	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
 540
 541	return 0;
 542}
 543
 544int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545			  struct bpf_map *map, bool exclude_ingress)
 546{
 547	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 548	int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0;
 549	struct bpf_dtab_netdev *dst, *last_dst = NULL;
 
 550	struct hlist_head *head;
 551	struct xdp_frame *xdpf;
 552	unsigned int i;
 553	int err;
 554
 555	xdpf = xdp_convert_buff_to_frame(xdp);
 556	if (unlikely(!xdpf))
 557		return -EOVERFLOW;
 
 558
 559	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
 560		for (i = 0; i < map->max_entries; i++) {
 561			dst = rcu_dereference_check(dtab->netdev_map[i],
 562						    rcu_read_lock_bh_held());
 563			if (!is_valid_dst(dst, xdp, exclude_ifindex))
 
 
 
 564				continue;
 565
 566			/* we only need n-1 clones; last_dst enqueued below */
 567			if (!last_dst) {
 568				last_dst = dst;
 569				continue;
 570			}
 571
 572			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
 573			if (err)
 574				return err;
 575
 576			last_dst = dst;
 577		}
 578	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
 579		for (i = 0; i < dtab->n_buckets; i++) {
 580			head = dev_map_index_hash(dtab, i);
 581			hlist_for_each_entry_rcu(dst, head, index_hlist,
 582						 lockdep_is_held(&dtab->index_lock)) {
 583				if (!is_valid_dst(dst, xdp, exclude_ifindex))
 
 
 
 
 584					continue;
 585
 586				/* we only need n-1 clones; last_dst enqueued below */
 587				if (!last_dst) {
 588					last_dst = dst;
 589					continue;
 590				}
 591
 592				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
 593				if (err)
 594					return err;
 595
 596				last_dst = dst;
 597			}
 598		}
 599	}
 600
 601	/* consume the last copy of the frame */
 602	if (last_dst)
 603		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
 604	else
 605		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
 606
 607	return 0;
 608}
 609
 610int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
 611			     struct bpf_prog *xdp_prog)
 612{
 613	int err;
 614
 615	err = xdp_ok_fwd_dev(dst->dev, skb->len);
 616	if (unlikely(err))
 617		return err;
 
 
 
 
 
 
 
 
 618	skb->dev = dst->dev;
 619	generic_xdp_tx(skb, xdp_prog);
 620
 621	return 0;
 622}
 623
 624static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
 625				  struct sk_buff *skb,
 626				  struct bpf_prog *xdp_prog)
 627{
 628	struct sk_buff *nskb;
 629	int err;
 630
 631	nskb = skb_clone(skb, GFP_ATOMIC);
 632	if (!nskb)
 633		return -ENOMEM;
 634
 635	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
 636	if (unlikely(err)) {
 637		consume_skb(nskb);
 638		return err;
 639	}
 640
 641	return 0;
 642}
 643
 644int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
 645			   struct bpf_prog *xdp_prog, struct bpf_map *map,
 646			   bool exclude_ingress)
 647{
 648	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 649	int exclude_ifindex = exclude_ingress ? dev->ifindex : 0;
 650	struct bpf_dtab_netdev *dst, *last_dst = NULL;
 
 651	struct hlist_head *head;
 652	struct hlist_node *next;
 
 653	unsigned int i;
 654	int err;
 655
 
 
 
 
 
 656	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
 657		for (i = 0; i < map->max_entries; i++) {
 658			dst = rcu_dereference_check(dtab->netdev_map[i],
 659						    rcu_read_lock_bh_held());
 660			if (!dst || dst->dev->ifindex == exclude_ifindex)
 
 
 
 661				continue;
 662
 663			/* we only need n-1 clones; last_dst enqueued below */
 664			if (!last_dst) {
 665				last_dst = dst;
 666				continue;
 667			}
 668
 669			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
 670			if (err)
 671				return err;
 672
 673			last_dst = dst;
 
 674		}
 675	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
 676		for (i = 0; i < dtab->n_buckets; i++) {
 677			head = dev_map_index_hash(dtab, i);
 678			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
 679				if (!dst || dst->dev->ifindex == exclude_ifindex)
 
 
 
 
 680					continue;
 681
 682				/* we only need n-1 clones; last_dst enqueued below */
 683				if (!last_dst) {
 684					last_dst = dst;
 685					continue;
 686				}
 687
 688				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
 689				if (err)
 690					return err;
 691
 692				last_dst = dst;
 693			}
 694		}
 695	}
 696
 697	/* consume the first skb and return */
 698	if (last_dst)
 699		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
 700
 701	/* dtab is empty */
 702	consume_skb(skb);
 703	return 0;
 704}
 705
 706static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 707{
 708	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
 709
 710	return obj ? &obj->val : NULL;
 711}
 712
 713static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
 714{
 715	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
 716								*(u32 *)key);
 717	return obj ? &obj->val : NULL;
 718}
 719
 720static void __dev_map_entry_free(struct rcu_head *rcu)
 721{
 722	struct bpf_dtab_netdev *dev;
 723
 724	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
 725	if (dev->xdp_prog)
 726		bpf_prog_put(dev->xdp_prog);
 727	dev_put(dev->dev);
 728	kfree(dev);
 729}
 730
 731static int dev_map_delete_elem(struct bpf_map *map, void *key)
 732{
 733	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 734	struct bpf_dtab_netdev *old_dev;
 735	int k = *(u32 *)key;
 736
 737	if (k >= map->max_entries)
 738		return -EINVAL;
 739
 740	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
 741	if (old_dev)
 742		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 743	return 0;
 744}
 745
 746static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
 747{
 748	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 749	struct bpf_dtab_netdev *old_dev;
 750	int k = *(u32 *)key;
 751	unsigned long flags;
 752	int ret = -ENOENT;
 753
 754	spin_lock_irqsave(&dtab->index_lock, flags);
 755
 756	old_dev = __dev_map_hash_lookup_elem(map, k);
 757	if (old_dev) {
 758		dtab->items--;
 759		hlist_del_init_rcu(&old_dev->index_hlist);
 760		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 761		ret = 0;
 762	}
 763	spin_unlock_irqrestore(&dtab->index_lock, flags);
 764
 765	return ret;
 766}
 767
 768static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
 769						    struct bpf_dtab *dtab,
 770						    struct bpf_devmap_val *val,
 771						    unsigned int idx)
 772{
 773	struct bpf_prog *prog = NULL;
 774	struct bpf_dtab_netdev *dev;
 775
 776	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
 777				   GFP_ATOMIC | __GFP_NOWARN,
 778				   dtab->map.numa_node);
 779	if (!dev)
 780		return ERR_PTR(-ENOMEM);
 781
 782	dev->dev = dev_get_by_index(net, val->ifindex);
 783	if (!dev->dev)
 784		goto err_out;
 785
 786	if (val->bpf_prog.fd > 0) {
 787		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
 788					     BPF_PROG_TYPE_XDP, false);
 789		if (IS_ERR(prog))
 790			goto err_put_dev;
 791		if (prog->expected_attach_type != BPF_XDP_DEVMAP)
 
 792			goto err_put_prog;
 793	}
 794
 795	dev->idx = idx;
 796	dev->dtab = dtab;
 797	if (prog) {
 798		dev->xdp_prog = prog;
 799		dev->val.bpf_prog.id = prog->aux->id;
 800	} else {
 801		dev->xdp_prog = NULL;
 802		dev->val.bpf_prog.id = 0;
 803	}
 804	dev->val.ifindex = val->ifindex;
 805
 806	return dev;
 807err_put_prog:
 808	bpf_prog_put(prog);
 809err_put_dev:
 810	dev_put(dev->dev);
 811err_out:
 812	kfree(dev);
 813	return ERR_PTR(-EINVAL);
 814}
 815
 816static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
 817				 void *key, void *value, u64 map_flags)
 818{
 819	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 820	struct bpf_dtab_netdev *dev, *old_dev;
 821	struct bpf_devmap_val val = {};
 822	u32 i = *(u32 *)key;
 823
 824	if (unlikely(map_flags > BPF_EXIST))
 825		return -EINVAL;
 826	if (unlikely(i >= dtab->map.max_entries))
 827		return -E2BIG;
 828	if (unlikely(map_flags == BPF_NOEXIST))
 829		return -EEXIST;
 830
 831	/* already verified value_size <= sizeof val */
 832	memcpy(&val, value, map->value_size);
 833
 834	if (!val.ifindex) {
 835		dev = NULL;
 836		/* can not specify fd if ifindex is 0 */
 837		if (val.bpf_prog.fd > 0)
 838			return -EINVAL;
 839	} else {
 840		dev = __dev_map_alloc_node(net, dtab, &val, i);
 841		if (IS_ERR(dev))
 842			return PTR_ERR(dev);
 843	}
 844
 845	/* Use call_rcu() here to ensure rcu critical sections have completed
 846	 * Remembering the driver side flush operation will happen before the
 847	 * net device is removed.
 848	 */
 849	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
 850	if (old_dev)
 851		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 852
 853	return 0;
 854}
 855
 856static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
 857			       u64 map_flags)
 858{
 859	return __dev_map_update_elem(current->nsproxy->net_ns,
 860				     map, key, value, map_flags);
 861}
 862
 863static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
 864				     void *key, void *value, u64 map_flags)
 865{
 866	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 867	struct bpf_dtab_netdev *dev, *old_dev;
 868	struct bpf_devmap_val val = {};
 869	u32 idx = *(u32 *)key;
 870	unsigned long flags;
 871	int err = -EEXIST;
 872
 873	/* already verified value_size <= sizeof val */
 874	memcpy(&val, value, map->value_size);
 875
 876	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
 877		return -EINVAL;
 878
 879	spin_lock_irqsave(&dtab->index_lock, flags);
 880
 881	old_dev = __dev_map_hash_lookup_elem(map, idx);
 882	if (old_dev && (map_flags & BPF_NOEXIST))
 883		goto out_err;
 884
 885	dev = __dev_map_alloc_node(net, dtab, &val, idx);
 886	if (IS_ERR(dev)) {
 887		err = PTR_ERR(dev);
 888		goto out_err;
 889	}
 890
 891	if (old_dev) {
 892		hlist_del_rcu(&old_dev->index_hlist);
 893	} else {
 894		if (dtab->items >= dtab->map.max_entries) {
 895			spin_unlock_irqrestore(&dtab->index_lock, flags);
 896			call_rcu(&dev->rcu, __dev_map_entry_free);
 897			return -E2BIG;
 898		}
 899		dtab->items++;
 900	}
 901
 902	hlist_add_head_rcu(&dev->index_hlist,
 903			   dev_map_index_hash(dtab, idx));
 904	spin_unlock_irqrestore(&dtab->index_lock, flags);
 905
 906	if (old_dev)
 907		call_rcu(&old_dev->rcu, __dev_map_entry_free);
 908
 909	return 0;
 910
 911out_err:
 912	spin_unlock_irqrestore(&dtab->index_lock, flags);
 913	return err;
 914}
 915
 916static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
 917				   u64 map_flags)
 918{
 919	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
 920					 map, key, value, map_flags);
 921}
 922
 923static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
 924{
 925	return __bpf_xdp_redirect_map(map, ifindex, flags,
 926				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
 927				      __dev_map_lookup_elem);
 928}
 929
 930static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
 931{
 932	return __bpf_xdp_redirect_map(map, ifindex, flags,
 933				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
 934				      __dev_map_hash_lookup_elem);
 935}
 936
 937static int dev_map_btf_id;
 938const struct bpf_map_ops dev_map_ops = {
 939	.map_meta_equal = bpf_map_meta_equal,
 940	.map_alloc = dev_map_alloc,
 941	.map_free = dev_map_free,
 942	.map_get_next_key = dev_map_get_next_key,
 943	.map_lookup_elem = dev_map_lookup_elem,
 944	.map_update_elem = dev_map_update_elem,
 945	.map_delete_elem = dev_map_delete_elem,
 946	.map_check_btf = map_check_no_btf,
 947	.map_btf_name = "bpf_dtab",
 948	.map_btf_id = &dev_map_btf_id,
 949	.map_redirect = dev_map_redirect,
 950};
 951
 952static int dev_map_hash_map_btf_id;
 953const struct bpf_map_ops dev_map_hash_ops = {
 954	.map_meta_equal = bpf_map_meta_equal,
 955	.map_alloc = dev_map_alloc,
 956	.map_free = dev_map_free,
 957	.map_get_next_key = dev_map_hash_get_next_key,
 958	.map_lookup_elem = dev_map_hash_lookup_elem,
 959	.map_update_elem = dev_map_hash_update_elem,
 960	.map_delete_elem = dev_map_hash_delete_elem,
 961	.map_check_btf = map_check_no_btf,
 962	.map_btf_name = "bpf_dtab",
 963	.map_btf_id = &dev_map_hash_map_btf_id,
 964	.map_redirect = dev_hash_map_redirect,
 965};
 966
 967static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
 968				       struct net_device *netdev)
 969{
 970	unsigned long flags;
 971	u32 i;
 972
 973	spin_lock_irqsave(&dtab->index_lock, flags);
 974	for (i = 0; i < dtab->n_buckets; i++) {
 975		struct bpf_dtab_netdev *dev;
 976		struct hlist_head *head;
 977		struct hlist_node *next;
 978
 979		head = dev_map_index_hash(dtab, i);
 980
 981		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
 982			if (netdev != dev->dev)
 983				continue;
 984
 985			dtab->items--;
 986			hlist_del_rcu(&dev->index_hlist);
 987			call_rcu(&dev->rcu, __dev_map_entry_free);
 988		}
 989	}
 990	spin_unlock_irqrestore(&dtab->index_lock, flags);
 991}
 992
 993static int dev_map_notification(struct notifier_block *notifier,
 994				ulong event, void *ptr)
 995{
 996	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
 997	struct bpf_dtab *dtab;
 998	int i, cpu;
 999
1000	switch (event) {
1001	case NETDEV_REGISTER:
1002		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1003			break;
1004
1005		/* will be freed in free_netdev() */
1006		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1007		if (!netdev->xdp_bulkq)
1008			return NOTIFY_BAD;
1009
1010		for_each_possible_cpu(cpu)
1011			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1012		break;
1013	case NETDEV_UNREGISTER:
1014		/* This rcu_read_lock/unlock pair is needed because
1015		 * dev_map_list is an RCU list AND to ensure a delete
1016		 * operation does not free a netdev_map entry while we
1017		 * are comparing it against the netdev being unregistered.
1018		 */
1019		rcu_read_lock();
1020		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1021			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1022				dev_map_hash_remove_netdev(dtab, netdev);
1023				continue;
1024			}
1025
1026			for (i = 0; i < dtab->map.max_entries; i++) {
1027				struct bpf_dtab_netdev *dev, *odev;
1028
1029				dev = rcu_dereference(dtab->netdev_map[i]);
1030				if (!dev || netdev != dev->dev)
1031					continue;
1032				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1033				if (dev == odev)
1034					call_rcu(&dev->rcu,
1035						 __dev_map_entry_free);
1036			}
1037		}
1038		rcu_read_unlock();
1039		break;
1040	default:
1041		break;
1042	}
1043	return NOTIFY_OK;
1044}
1045
1046static struct notifier_block dev_map_notifier = {
1047	.notifier_call = dev_map_notification,
1048};
1049
1050static int __init dev_map_init(void)
1051{
1052	int cpu;
1053
1054	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1055	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1056		     offsetof(struct _bpf_dtab_netdev, dev));
1057	register_netdevice_notifier(&dev_map_notifier);
1058
1059	for_each_possible_cpu(cpu)
1060		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1061	return 0;
1062}
1063
1064subsys_initcall(dev_map_init);