Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  3 */
  4
  5/* Devmaps primary use is as a backend map for XDP BPF helper call
  6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
  7 * spent some effort to ensure the datapath with redirect maps does not use
  8 * any locking. This is a quick note on the details.
  9 *
 10 * We have three possible paths to get into the devmap control plane bpf
 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
 12 * will invoke an update, delete, or lookup operation. To ensure updates and
 13 * deletes appear atomic from the datapath side xchg() is used to modify the
 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
 16 * an rcu grace period before free'ing the old data structures. This ensures the
 17 * datapath always has a valid copy. However, the datapath does a "flush"
 18 * operation that pushes any pending packets in the driver outside the RCU
 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
 21 * this list is empty, indicating outstanding flush operations have completed.
 22 *
 23 * BPF syscalls may race with BPF program calls on any of the update, delete
 24 * or lookup operations. As noted above the xchg() operation also keep the
 25 * netdev_map consistent in this case. From the devmap side BPF programs
 26 * calling into these operations are the same as multiple user space threads
 27 * making system calls.
 28 *
 29 * Finally, any of the above may race with a netdev_unregister notifier. The
 30 * unregister notifier must search for net devices in the map structure that
 31 * contain a reference to the net device and remove them. This is a two step
 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
 33 * check to see if the ifindex is the same as the net_device being removed.
 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
 35 * removed, in the case of a concurrent update or delete operation it is
 36 * possible that the initially referenced dev is no longer in the map. As the
 37 * notifier hook walks the map we know that new dev references can not be
 38 * added by the user because core infrastructure ensures dev_get_by_index()
 39 * calls will fail at this point.
 40 *
 41 * The devmap_hash type is a map type which interprets keys as ifindexes and
 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
 43 * densely packed instead of having holes in the lookup array for unused
 44 * ifindexes. The setup and packet enqueue/send code is shared between the two
 45 * types of devmap; only the lookup and insertion is different.
 46 */
 47#include <linux/bpf.h>
 48#include <net/xdp.h>
 49#include <linux/filter.h>
 50#include <trace/events/xdp.h>
 51
 52#define DEV_CREATE_FLAG_MASK \
 53	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 54
 55#define DEV_MAP_BULK_SIZE 16
 56struct bpf_dtab_netdev;
 57
 58struct xdp_bulk_queue {
 59	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
 60	struct list_head flush_node;
 
 61	struct net_device *dev_rx;
 62	struct bpf_dtab_netdev *obj;
 63	unsigned int count;
 64};
 65
 66struct bpf_dtab_netdev {
 67	struct net_device *dev; /* must be first member, due to tracepoint */
 68	struct hlist_node index_hlist;
 69	struct bpf_dtab *dtab;
 70	struct xdp_bulk_queue __percpu *bulkq;
 71	struct rcu_head rcu;
 72	unsigned int idx; /* keep track of map index for tracepoint */
 
 73};
 74
 75struct bpf_dtab {
 76	struct bpf_map map;
 77	struct bpf_dtab_netdev **netdev_map;
 78	struct list_head __percpu *flush_list;
 79	struct list_head list;
 80
 81	/* these are only used for DEVMAP_HASH type maps */
 82	struct hlist_head *dev_index_head;
 83	spinlock_t index_lock;
 84	unsigned int items;
 85	u32 n_buckets;
 86};
 87
 
 88static DEFINE_SPINLOCK(dev_map_lock);
 89static LIST_HEAD(dev_map_list);
 90
 91static struct hlist_head *dev_map_create_hash(unsigned int entries)
 
 92{
 93	int i;
 94	struct hlist_head *hash;
 95
 96	hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
 97	if (hash != NULL)
 98		for (i = 0; i < entries; i++)
 99			INIT_HLIST_HEAD(&hash[i]);
100
101	return hash;
102}
103
 
 
 
 
 
 
104static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
105{
106	int err, cpu;
107	u64 cost;
 
108
109	/* check sanity of attributes */
 
 
 
110	if (attr->max_entries == 0 || attr->key_size != 4 ||
111	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
 
 
112		return -EINVAL;
113
114	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
115	 * verifier prevents writes from the BPF side
116	 */
117	attr->map_flags |= BPF_F_RDONLY_PROG;
118
119
120	bpf_map_init_from_attr(&dtab->map, attr);
121
122	/* make sure page count doesn't overflow */
123	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
124	cost += sizeof(struct list_head) * num_possible_cpus();
125
126	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
127		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
128
129		if (!dtab->n_buckets) /* Overflow check */
130			return -EINVAL;
131		cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
 
 
132	}
133
134	/* if map size is larger than memlock limit, reject it */
135	err = bpf_map_charge_init(&dtab->map.memory, cost);
136	if (err)
137		return -EINVAL;
138
139	dtab->flush_list = alloc_percpu(struct list_head);
140	if (!dtab->flush_list)
141		goto free_charge;
142
143	for_each_possible_cpu(cpu)
144		INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
145
146	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
147					      sizeof(struct bpf_dtab_netdev *),
148					      dtab->map.numa_node);
149	if (!dtab->netdev_map)
150		goto free_percpu;
151
152	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
153		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
 
154		if (!dtab->dev_index_head)
155			goto free_map_area;
156
157		spin_lock_init(&dtab->index_lock);
 
 
 
 
 
 
158	}
159
160	return 0;
161
162free_map_area:
163	bpf_map_area_free(dtab->netdev_map);
164free_percpu:
165	free_percpu(dtab->flush_list);
166free_charge:
167	bpf_map_charge_finish(&dtab->map.memory);
168	return -ENOMEM;
169}
170
171static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
172{
173	struct bpf_dtab *dtab;
174	int err;
175
176	if (!capable(CAP_NET_ADMIN))
177		return ERR_PTR(-EPERM);
178
179	dtab = kzalloc(sizeof(*dtab), GFP_USER);
180	if (!dtab)
181		return ERR_PTR(-ENOMEM);
182
183	err = dev_map_init_map(dtab, attr);
184	if (err) {
185		kfree(dtab);
186		return ERR_PTR(err);
187	}
188
189	spin_lock(&dev_map_lock);
190	list_add_tail_rcu(&dtab->list, &dev_map_list);
191	spin_unlock(&dev_map_lock);
192
193	return &dtab->map;
194}
195
196static void dev_map_free(struct bpf_map *map)
197{
198	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
199	int i, cpu;
200
201	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
202	 * so the programs (can be more than one that used this map) were
203	 * disconnected from events. Wait for outstanding critical sections in
204	 * these programs to complete. The rcu critical section only guarantees
205	 * no further reads against netdev_map. It does __not__ ensure pending
206	 * flush operations (if any) are complete.
 
 
207	 */
208
209	spin_lock(&dev_map_lock);
210	list_del_rcu(&dtab->list);
211	spin_unlock(&dev_map_lock);
212
213	bpf_clear_redirect_map(map);
214	synchronize_rcu();
215
216	/* Make sure prior __dev_map_entry_free() have completed. */
217	rcu_barrier();
218
219	/* To ensure all pending flush operations have completed wait for flush
220	 * list to empty on _all_ cpus.
221	 * Because the above synchronize_rcu() ensures the map is disconnected
222	 * from the program we can assume no new items will be added.
223	 */
224	for_each_online_cpu(cpu) {
225		struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
 
 
 
 
 
 
 
 
 
226
227		while (!list_empty(flush_list))
228			cond_resched();
229	}
 
230
231	for (i = 0; i < dtab->map.max_entries; i++) {
232		struct bpf_dtab_netdev *dev;
 
 
 
 
 
 
 
233
234		dev = dtab->netdev_map[i];
235		if (!dev)
236			continue;
237
238		free_percpu(dev->bulkq);
239		dev_put(dev->dev);
240		kfree(dev);
241	}
242
243	free_percpu(dtab->flush_list);
244	bpf_map_area_free(dtab->netdev_map);
245	kfree(dtab->dev_index_head);
246	kfree(dtab);
247}
248
249static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
250{
251	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
252	u32 index = key ? *(u32 *)key : U32_MAX;
253	u32 *next = next_key;
254
255	if (index >= dtab->map.max_entries) {
256		*next = 0;
257		return 0;
258	}
259
260	if (index == dtab->map.max_entries - 1)
261		return -ENOENT;
262	*next = index + 1;
263	return 0;
264}
265
266static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
267						    int idx)
268{
269	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
270}
271
272struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
273{
274	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
275	struct hlist_head *head = dev_map_index_hash(dtab, key);
276	struct bpf_dtab_netdev *dev;
277
278	hlist_for_each_entry_rcu(dev, head, index_hlist)
 
279		if (dev->idx == key)
280			return dev;
281
282	return NULL;
283}
284
285static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
286				    void *next_key)
287{
288	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
289	u32 idx, *next = next_key;
290	struct bpf_dtab_netdev *dev, *next_dev;
291	struct hlist_head *head;
292	int i = 0;
293
294	if (!key)
295		goto find_first;
296
297	idx = *(u32 *)key;
298
299	dev = __dev_map_hash_lookup_elem(map, idx);
300	if (!dev)
301		goto find_first;
302
303	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
304				    struct bpf_dtab_netdev, index_hlist);
305
306	if (next_dev) {
307		*next = next_dev->idx;
308		return 0;
309	}
310
311	i = idx & (dtab->n_buckets - 1);
312	i++;
313
314 find_first:
315	for (; i < dtab->n_buckets; i++) {
316		head = dev_map_index_hash(dtab, i);
317
318		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
319					    struct bpf_dtab_netdev,
320					    index_hlist);
321		if (next_dev) {
322			*next = next_dev->idx;
323			return 0;
324		}
325	}
326
327	return -ENOENT;
328}
329
330static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
331		       bool in_napi_ctx)
 
 
 
 
 
 
 
 
 
332{
333	struct bpf_dtab_netdev *obj = bq->obj;
334	struct net_device *dev = obj->dev;
335	int sent = 0, drops = 0, err = 0;
336	int i;
337
338	if (unlikely(!bq->count))
339		return 0;
340
341	for (i = 0; i < bq->count; i++) {
342		struct xdp_frame *xdpf = bq->q[i];
343
344		prefetch(xdpf);
345	}
346
347	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
348	if (sent < 0) {
349		err = sent;
350		sent = 0;
351		goto error;
352	}
353	drops = bq->count - sent;
354out:
355	bq->count = 0;
356
357	trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx,
358			      sent, drops, bq->dev_rx, dev, err);
359	bq->dev_rx = NULL;
360	__list_del_clearprev(&bq->flush_node);
361	return 0;
362error:
363	/* If ndo_xdp_xmit fails with an errno, no frames have been
364	 * xmit'ed and it's our responsibility to them free all.
365	 */
366	for (i = 0; i < bq->count; i++) {
367		struct xdp_frame *xdpf = bq->q[i];
368
369		/* RX path under NAPI protection, can return frames faster */
370		if (likely(in_napi_ctx))
371			xdp_return_frame_rx_napi(xdpf);
372		else
373			xdp_return_frame(xdpf);
374		drops++;
375	}
376	goto out;
377}
378
379/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
380 * from the driver before returning from its napi->poll() routine. The poll()
381 * routine is called either from busy_poll context or net_rx_action signaled
382 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
383 * net device can be torn down. On devmap tear down we ensure the flush list
384 * is empty before completing to ensure all flush operations have completed.
 
 
 
385 */
386void __dev_map_flush(struct bpf_map *map)
387{
388	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
389	struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
390	struct xdp_bulk_queue *bq, *tmp;
391
392	rcu_read_lock();
393	list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
394		bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
395	rcu_read_unlock();
396}
397
398/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
399 * update happens in parallel here a dev_put wont happen until after reading the
400 * ifindex.
401 */
402struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
403{
404	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
405	struct bpf_dtab_netdev *obj;
406
407	if (key >= map->max_entries)
408		return NULL;
409
410	obj = READ_ONCE(dtab->netdev_map[key]);
411	return obj;
412}
413
414/* Runs under RCU-read-side, plus in softirq under NAPI protection.
415 * Thus, safe percpu variable access.
416 */
417static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
418		      struct net_device *dev_rx)
419
420{
421	struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
422	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
423
424	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
425		bq_xmit_all(bq, 0, true);
426
427	/* Ingress dev_rx will be the same for all xdp_frame's in
428	 * bulk_queue, because bq stored per-CPU and must be flushed
429	 * from net_device drivers NAPI func end.
430	 */
431	if (!bq->dev_rx)
432		bq->dev_rx = dev_rx;
433
434	bq->q[bq->count++] = xdpf;
435
436	if (!bq->flush_node.prev)
437		list_add(&bq->flush_node, flush_list);
438
439	return 0;
440}
441
442int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
443		    struct net_device *dev_rx)
444{
445	struct net_device *dev = dst->dev;
446	struct xdp_frame *xdpf;
447	int err;
448
449	if (!dev->netdev_ops->ndo_xdp_xmit)
450		return -EOPNOTSUPP;
451
452	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
453	if (unlikely(err))
454		return err;
455
456	xdpf = convert_to_xdp_frame(xdp);
457	if (unlikely(!xdpf))
458		return -EOVERFLOW;
459
460	return bq_enqueue(dst, xdpf, dev_rx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461}
462
463int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
464			     struct bpf_prog *xdp_prog)
465{
466	int err;
467
468	err = xdp_ok_fwd_dev(dst->dev, skb->len);
469	if (unlikely(err))
470		return err;
471	skb->dev = dst->dev;
472	generic_xdp_tx(skb, xdp_prog);
473
474	return 0;
475}
476
477static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
478{
479	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
480	struct net_device *dev = obj ? obj->dev : NULL;
481
482	return dev ? &dev->ifindex : NULL;
483}
484
485static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
486{
487	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
488								*(u32 *)key);
489	struct net_device *dev = obj ? obj->dev : NULL;
490
491	return dev ? &dev->ifindex : NULL;
492}
493
494static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
495{
496	if (dev->dev->netdev_ops->ndo_xdp_xmit) {
497		struct xdp_bulk_queue *bq;
498		int cpu;
499
500		rcu_read_lock();
501		for_each_online_cpu(cpu) {
502			bq = per_cpu_ptr(dev->bulkq, cpu);
503			bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
504		}
505		rcu_read_unlock();
506	}
507}
508
509static void __dev_map_entry_free(struct rcu_head *rcu)
510{
511	struct bpf_dtab_netdev *dev;
512
513	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
514	dev_map_flush_old(dev);
515	free_percpu(dev->bulkq);
516	dev_put(dev->dev);
517	kfree(dev);
518}
519
520static int dev_map_delete_elem(struct bpf_map *map, void *key)
521{
522	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
523	struct bpf_dtab_netdev *old_dev;
524	int k = *(u32 *)key;
525
526	if (k >= map->max_entries)
527		return -EINVAL;
528
529	/* Use call_rcu() here to ensure any rcu critical sections have
530	 * completed, but this does not guarantee a flush has happened
531	 * yet. Because driver side rcu_read_lock/unlock only protects the
532	 * running XDP program. However, for pending flush operations the
533	 * dev and ctx are stored in another per cpu map. And additionally,
534	 * the driver tear down ensures all soft irqs are complete before
535	 * removing the net device in the case of dev_put equals zero.
536	 */
537	old_dev = xchg(&dtab->netdev_map[k], NULL);
538	if (old_dev)
539		call_rcu(&old_dev->rcu, __dev_map_entry_free);
540	return 0;
541}
542
543static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
544{
545	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
546	struct bpf_dtab_netdev *old_dev;
547	int k = *(u32 *)key;
548	unsigned long flags;
549	int ret = -ENOENT;
550
551	spin_lock_irqsave(&dtab->index_lock, flags);
552
553	old_dev = __dev_map_hash_lookup_elem(map, k);
554	if (old_dev) {
555		dtab->items--;
556		hlist_del_init_rcu(&old_dev->index_hlist);
557		call_rcu(&old_dev->rcu, __dev_map_entry_free);
558		ret = 0;
559	}
560	spin_unlock_irqrestore(&dtab->index_lock, flags);
561
562	return ret;
563}
564
565static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
566						    struct bpf_dtab *dtab,
567						    u32 ifindex,
568						    unsigned int idx)
569{
570	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
571	struct bpf_dtab_netdev *dev;
572	struct xdp_bulk_queue *bq;
573	int cpu;
574
575	dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
 
576	if (!dev)
577		return ERR_PTR(-ENOMEM);
578
579	dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
580					sizeof(void *), gfp);
581	if (!dev->bulkq) {
582		kfree(dev);
583		return ERR_PTR(-ENOMEM);
584	}
585
586	for_each_possible_cpu(cpu) {
587		bq = per_cpu_ptr(dev->bulkq, cpu);
588		bq->obj = dev;
589	}
590
591	dev->dev = dev_get_by_index(net, ifindex);
592	if (!dev->dev) {
593		free_percpu(dev->bulkq);
594		kfree(dev);
595		return ERR_PTR(-EINVAL);
596	}
597
598	dev->idx = idx;
599	dev->dtab = dtab;
 
 
 
 
 
 
 
 
600
601	return dev;
 
 
 
 
 
 
 
602}
603
604static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
605				 void *key, void *value, u64 map_flags)
606{
607	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
608	struct bpf_dtab_netdev *dev, *old_dev;
609	u32 ifindex = *(u32 *)value;
610	u32 i = *(u32 *)key;
611
612	if (unlikely(map_flags > BPF_EXIST))
613		return -EINVAL;
614	if (unlikely(i >= dtab->map.max_entries))
615		return -E2BIG;
616	if (unlikely(map_flags == BPF_NOEXIST))
617		return -EEXIST;
618
619	if (!ifindex) {
 
 
 
620		dev = NULL;
 
 
 
621	} else {
622		dev = __dev_map_alloc_node(net, dtab, ifindex, i);
623		if (IS_ERR(dev))
624			return PTR_ERR(dev);
625	}
626
627	/* Use call_rcu() here to ensure rcu critical sections have completed
628	 * Remembering the driver side flush operation will happen before the
629	 * net device is removed.
630	 */
631	old_dev = xchg(&dtab->netdev_map[i], dev);
632	if (old_dev)
633		call_rcu(&old_dev->rcu, __dev_map_entry_free);
634
635	return 0;
636}
637
638static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
639			       u64 map_flags)
640{
641	return __dev_map_update_elem(current->nsproxy->net_ns,
642				     map, key, value, map_flags);
643}
644
645static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
646				     void *key, void *value, u64 map_flags)
647{
648	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
649	struct bpf_dtab_netdev *dev, *old_dev;
650	u32 ifindex = *(u32 *)value;
651	u32 idx = *(u32 *)key;
652	unsigned long flags;
653	int err = -EEXIST;
654
655	if (unlikely(map_flags > BPF_EXIST || !ifindex))
 
 
 
656		return -EINVAL;
657
658	spin_lock_irqsave(&dtab->index_lock, flags);
659
660	old_dev = __dev_map_hash_lookup_elem(map, idx);
661	if (old_dev && (map_flags & BPF_NOEXIST))
662		goto out_err;
663
664	dev = __dev_map_alloc_node(net, dtab, ifindex, idx);
665	if (IS_ERR(dev)) {
666		err = PTR_ERR(dev);
667		goto out_err;
668	}
669
670	if (old_dev) {
671		hlist_del_rcu(&old_dev->index_hlist);
672	} else {
673		if (dtab->items >= dtab->map.max_entries) {
674			spin_unlock_irqrestore(&dtab->index_lock, flags);
675			call_rcu(&dev->rcu, __dev_map_entry_free);
676			return -E2BIG;
677		}
678		dtab->items++;
679	}
680
681	hlist_add_head_rcu(&dev->index_hlist,
682			   dev_map_index_hash(dtab, idx));
683	spin_unlock_irqrestore(&dtab->index_lock, flags);
684
685	if (old_dev)
686		call_rcu(&old_dev->rcu, __dev_map_entry_free);
687
688	return 0;
689
690out_err:
691	spin_unlock_irqrestore(&dtab->index_lock, flags);
692	return err;
693}
694
695static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
696				   u64 map_flags)
697{
698	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
699					 map, key, value, map_flags);
700}
701
 
702const struct bpf_map_ops dev_map_ops = {
703	.map_alloc = dev_map_alloc,
704	.map_free = dev_map_free,
705	.map_get_next_key = dev_map_get_next_key,
706	.map_lookup_elem = dev_map_lookup_elem,
707	.map_update_elem = dev_map_update_elem,
708	.map_delete_elem = dev_map_delete_elem,
709	.map_check_btf = map_check_no_btf,
 
 
710};
711
 
712const struct bpf_map_ops dev_map_hash_ops = {
713	.map_alloc = dev_map_alloc,
714	.map_free = dev_map_free,
715	.map_get_next_key = dev_map_hash_get_next_key,
716	.map_lookup_elem = dev_map_hash_lookup_elem,
717	.map_update_elem = dev_map_hash_update_elem,
718	.map_delete_elem = dev_map_hash_delete_elem,
719	.map_check_btf = map_check_no_btf,
 
 
720};
721
722static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
723				       struct net_device *netdev)
724{
725	unsigned long flags;
726	u32 i;
727
728	spin_lock_irqsave(&dtab->index_lock, flags);
729	for (i = 0; i < dtab->n_buckets; i++) {
730		struct bpf_dtab_netdev *dev;
731		struct hlist_head *head;
732		struct hlist_node *next;
733
734		head = dev_map_index_hash(dtab, i);
735
736		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
737			if (netdev != dev->dev)
738				continue;
739
740			dtab->items--;
741			hlist_del_rcu(&dev->index_hlist);
742			call_rcu(&dev->rcu, __dev_map_entry_free);
743		}
744	}
745	spin_unlock_irqrestore(&dtab->index_lock, flags);
746}
747
748static int dev_map_notification(struct notifier_block *notifier,
749				ulong event, void *ptr)
750{
751	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
752	struct bpf_dtab *dtab;
753	int i;
754
755	switch (event) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
756	case NETDEV_UNREGISTER:
757		/* This rcu_read_lock/unlock pair is needed because
758		 * dev_map_list is an RCU list AND to ensure a delete
759		 * operation does not free a netdev_map entry while we
760		 * are comparing it against the netdev being unregistered.
761		 */
762		rcu_read_lock();
763		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
764			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
765				dev_map_hash_remove_netdev(dtab, netdev);
766				continue;
767			}
768
769			for (i = 0; i < dtab->map.max_entries; i++) {
770				struct bpf_dtab_netdev *dev, *odev;
771
772				dev = READ_ONCE(dtab->netdev_map[i]);
773				if (!dev || netdev != dev->dev)
774					continue;
775				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
776				if (dev == odev)
777					call_rcu(&dev->rcu,
778						 __dev_map_entry_free);
779			}
780		}
781		rcu_read_unlock();
782		break;
783	default:
784		break;
785	}
786	return NOTIFY_OK;
787}
788
789static struct notifier_block dev_map_notifier = {
790	.notifier_call = dev_map_notification,
791};
792
793static int __init dev_map_init(void)
794{
 
 
795	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
796	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
797		     offsetof(struct _bpf_dtab_netdev, dev));
798	register_netdevice_notifier(&dev_map_notifier);
 
 
 
799	return 0;
800}
801
802subsys_initcall(dev_map_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  3 */
  4
  5/* Devmaps primary use is as a backend map for XDP BPF helper call
  6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
  7 * spent some effort to ensure the datapath with redirect maps does not use
  8 * any locking. This is a quick note on the details.
  9 *
 10 * We have three possible paths to get into the devmap control plane bpf
 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
 12 * will invoke an update, delete, or lookup operation. To ensure updates and
 13 * deletes appear atomic from the datapath side xchg() is used to modify the
 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
 16 * an rcu grace period before free'ing the old data structures. This ensures the
 17 * datapath always has a valid copy. However, the datapath does a "flush"
 18 * operation that pushes any pending packets in the driver outside the RCU
 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
 21 * this list is empty, indicating outstanding flush operations have completed.
 22 *
 23 * BPF syscalls may race with BPF program calls on any of the update, delete
 24 * or lookup operations. As noted above the xchg() operation also keep the
 25 * netdev_map consistent in this case. From the devmap side BPF programs
 26 * calling into these operations are the same as multiple user space threads
 27 * making system calls.
 28 *
 29 * Finally, any of the above may race with a netdev_unregister notifier. The
 30 * unregister notifier must search for net devices in the map structure that
 31 * contain a reference to the net device and remove them. This is a two step
 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
 33 * check to see if the ifindex is the same as the net_device being removed.
 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
 35 * removed, in the case of a concurrent update or delete operation it is
 36 * possible that the initially referenced dev is no longer in the map. As the
 37 * notifier hook walks the map we know that new dev references can not be
 38 * added by the user because core infrastructure ensures dev_get_by_index()
 39 * calls will fail at this point.
 40 *
 41 * The devmap_hash type is a map type which interprets keys as ifindexes and
 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
 43 * densely packed instead of having holes in the lookup array for unused
 44 * ifindexes. The setup and packet enqueue/send code is shared between the two
 45 * types of devmap; only the lookup and insertion is different.
 46 */
 47#include <linux/bpf.h>
 48#include <net/xdp.h>
 49#include <linux/filter.h>
 50#include <trace/events/xdp.h>
 51
 52#define DEV_CREATE_FLAG_MASK \
 53	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 54
 55struct xdp_dev_bulk_queue {
 
 
 
 56	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
 57	struct list_head flush_node;
 58	struct net_device *dev;
 59	struct net_device *dev_rx;
 
 60	unsigned int count;
 61};
 62
 63struct bpf_dtab_netdev {
 64	struct net_device *dev; /* must be first member, due to tracepoint */
 65	struct hlist_node index_hlist;
 66	struct bpf_dtab *dtab;
 67	struct bpf_prog *xdp_prog;
 68	struct rcu_head rcu;
 69	unsigned int idx;
 70	struct bpf_devmap_val val;
 71};
 72
 73struct bpf_dtab {
 74	struct bpf_map map;
 75	struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
 
 76	struct list_head list;
 77
 78	/* these are only used for DEVMAP_HASH type maps */
 79	struct hlist_head *dev_index_head;
 80	spinlock_t index_lock;
 81	unsigned int items;
 82	u32 n_buckets;
 83};
 84
 85static DEFINE_PER_CPU(struct list_head, dev_flush_list);
 86static DEFINE_SPINLOCK(dev_map_lock);
 87static LIST_HEAD(dev_map_list);
 88
 89static struct hlist_head *dev_map_create_hash(unsigned int entries,
 90					      int numa_node)
 91{
 92	int i;
 93	struct hlist_head *hash;
 94
 95	hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
 96	if (hash != NULL)
 97		for (i = 0; i < entries; i++)
 98			INIT_HLIST_HEAD(&hash[i]);
 99
100	return hash;
101}
102
103static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
104						    int idx)
105{
106	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
107}
108
109static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
110{
111	u32 valsize = attr->value_size;
112	u64 cost = 0;
113	int err;
114
115	/* check sanity of attributes. 2 value sizes supported:
116	 * 4 bytes: ifindex
117	 * 8 bytes: ifindex + prog fd
118	 */
119	if (attr->max_entries == 0 || attr->key_size != 4 ||
120	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
121	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
122	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123		return -EINVAL;
124
125	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
126	 * verifier prevents writes from the BPF side
127	 */
128	attr->map_flags |= BPF_F_RDONLY_PROG;
129
130
131	bpf_map_init_from_attr(&dtab->map, attr);
132
 
 
 
 
133	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
134		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135
136		if (!dtab->n_buckets) /* Overflow check */
137			return -EINVAL;
138		cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
139	} else {
140		cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
141	}
142
143	/* if map size is larger than memlock limit, reject it */
144	err = bpf_map_charge_init(&dtab->map.memory, cost);
145	if (err)
146		return -EINVAL;
147
 
 
 
 
 
 
 
 
 
 
 
 
 
148	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
149		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
150							   dtab->map.numa_node);
151		if (!dtab->dev_index_head)
152			goto free_charge;
153
154		spin_lock_init(&dtab->index_lock);
155	} else {
156		dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
157						      sizeof(struct bpf_dtab_netdev *),
158						      dtab->map.numa_node);
159		if (!dtab->netdev_map)
160			goto free_charge;
161	}
162
163	return 0;
164
 
 
 
 
165free_charge:
166	bpf_map_charge_finish(&dtab->map.memory);
167	return -ENOMEM;
168}
169
170static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
171{
172	struct bpf_dtab *dtab;
173	int err;
174
175	if (!capable(CAP_NET_ADMIN))
176		return ERR_PTR(-EPERM);
177
178	dtab = kzalloc(sizeof(*dtab), GFP_USER);
179	if (!dtab)
180		return ERR_PTR(-ENOMEM);
181
182	err = dev_map_init_map(dtab, attr);
183	if (err) {
184		kfree(dtab);
185		return ERR_PTR(err);
186	}
187
188	spin_lock(&dev_map_lock);
189	list_add_tail_rcu(&dtab->list, &dev_map_list);
190	spin_unlock(&dev_map_lock);
191
192	return &dtab->map;
193}
194
195static void dev_map_free(struct bpf_map *map)
196{
197	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
198	int i;
199
200	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
201	 * so the programs (can be more than one that used this map) were
202	 * disconnected from events. The following synchronize_rcu() guarantees
203	 * both rcu read critical sections complete and waits for
204	 * preempt-disable regions (NAPI being the relevant context here) so we
205	 * are certain there will be no further reads against the netdev_map and
206	 * all flush operations are complete. Flush operations can only be done
207	 * from NAPI context for this reason.
208	 */
209
210	spin_lock(&dev_map_lock);
211	list_del_rcu(&dtab->list);
212	spin_unlock(&dev_map_lock);
213
214	bpf_clear_redirect_map(map);
215	synchronize_rcu();
216
217	/* Make sure prior __dev_map_entry_free() have completed. */
218	rcu_barrier();
219
220	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
221		for (i = 0; i < dtab->n_buckets; i++) {
222			struct bpf_dtab_netdev *dev;
223			struct hlist_head *head;
224			struct hlist_node *next;
225
226			head = dev_map_index_hash(dtab, i);
227
228			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
229				hlist_del_rcu(&dev->index_hlist);
230				if (dev->xdp_prog)
231					bpf_prog_put(dev->xdp_prog);
232				dev_put(dev->dev);
233				kfree(dev);
234			}
235		}
236
237		bpf_map_area_free(dtab->dev_index_head);
238	} else {
239		for (i = 0; i < dtab->map.max_entries; i++) {
240			struct bpf_dtab_netdev *dev;
241
242			dev = dtab->netdev_map[i];
243			if (!dev)
244				continue;
245
246			if (dev->xdp_prog)
247				bpf_prog_put(dev->xdp_prog);
248			dev_put(dev->dev);
249			kfree(dev);
250		}
251
252		bpf_map_area_free(dtab->netdev_map);
 
 
 
 
 
 
253	}
254
 
 
 
255	kfree(dtab);
256}
257
258static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
259{
260	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
261	u32 index = key ? *(u32 *)key : U32_MAX;
262	u32 *next = next_key;
263
264	if (index >= dtab->map.max_entries) {
265		*next = 0;
266		return 0;
267	}
268
269	if (index == dtab->map.max_entries - 1)
270		return -ENOENT;
271	*next = index + 1;
272	return 0;
273}
274
 
 
 
 
 
 
275struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
276{
277	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
278	struct hlist_head *head = dev_map_index_hash(dtab, key);
279	struct bpf_dtab_netdev *dev;
280
281	hlist_for_each_entry_rcu(dev, head, index_hlist,
282				 lockdep_is_held(&dtab->index_lock))
283		if (dev->idx == key)
284			return dev;
285
286	return NULL;
287}
288
289static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
290				    void *next_key)
291{
292	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
293	u32 idx, *next = next_key;
294	struct bpf_dtab_netdev *dev, *next_dev;
295	struct hlist_head *head;
296	int i = 0;
297
298	if (!key)
299		goto find_first;
300
301	idx = *(u32 *)key;
302
303	dev = __dev_map_hash_lookup_elem(map, idx);
304	if (!dev)
305		goto find_first;
306
307	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
308				    struct bpf_dtab_netdev, index_hlist);
309
310	if (next_dev) {
311		*next = next_dev->idx;
312		return 0;
313	}
314
315	i = idx & (dtab->n_buckets - 1);
316	i++;
317
318 find_first:
319	for (; i < dtab->n_buckets; i++) {
320		head = dev_map_index_hash(dtab, i);
321
322		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
323					    struct bpf_dtab_netdev,
324					    index_hlist);
325		if (next_dev) {
326			*next = next_dev->idx;
327			return 0;
328		}
329	}
330
331	return -ENOENT;
332}
333
334bool dev_map_can_have_prog(struct bpf_map *map)
335{
336	if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
337	     map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
338	    map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
339		return true;
340
341	return false;
342}
343
344static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
345{
346	struct net_device *dev = bq->dev;
 
347	int sent = 0, drops = 0, err = 0;
348	int i;
349
350	if (unlikely(!bq->count))
351		return 0;
352
353	for (i = 0; i < bq->count; i++) {
354		struct xdp_frame *xdpf = bq->q[i];
355
356		prefetch(xdpf);
357	}
358
359	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
360	if (sent < 0) {
361		err = sent;
362		sent = 0;
363		goto error;
364	}
365	drops = bq->count - sent;
366out:
367	bq->count = 0;
368
369	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
 
370	bq->dev_rx = NULL;
371	__list_del_clearprev(&bq->flush_node);
372	return 0;
373error:
374	/* If ndo_xdp_xmit fails with an errno, no frames have been
375	 * xmit'ed and it's our responsibility to them free all.
376	 */
377	for (i = 0; i < bq->count; i++) {
378		struct xdp_frame *xdpf = bq->q[i];
379
380		xdp_return_frame_rx_napi(xdpf);
 
 
 
 
381		drops++;
382	}
383	goto out;
384}
385
386/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
387 * from the driver before returning from its napi->poll() routine. The poll()
388 * routine is called either from busy_poll context or net_rx_action signaled
389 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
390 * net device can be torn down. On devmap tear down we ensure the flush list
391 * is empty before completing to ensure all flush operations have completed.
392 * When drivers update the bpf program they may need to ensure any flush ops
393 * are also complete. Using synchronize_rcu or call_rcu will suffice for this
394 * because both wait for napi context to exit.
395 */
396void __dev_flush(void)
397{
398	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
399	struct xdp_dev_bulk_queue *bq, *tmp;
 
400
 
401	list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
402		bq_xmit_all(bq, XDP_XMIT_FLUSH);
 
403}
404
405/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
406 * update happens in parallel here a dev_put wont happen until after reading the
407 * ifindex.
408 */
409struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
410{
411	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
412	struct bpf_dtab_netdev *obj;
413
414	if (key >= map->max_entries)
415		return NULL;
416
417	obj = READ_ONCE(dtab->netdev_map[key]);
418	return obj;
419}
420
421/* Runs under RCU-read-side, plus in softirq under NAPI protection.
422 * Thus, safe percpu variable access.
423 */
424static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
425		      struct net_device *dev_rx)
 
426{
427	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
428	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
429
430	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
431		bq_xmit_all(bq, 0);
432
433	/* Ingress dev_rx will be the same for all xdp_frame's in
434	 * bulk_queue, because bq stored per-CPU and must be flushed
435	 * from net_device drivers NAPI func end.
436	 */
437	if (!bq->dev_rx)
438		bq->dev_rx = dev_rx;
439
440	bq->q[bq->count++] = xdpf;
441
442	if (!bq->flush_node.prev)
443		list_add(&bq->flush_node, flush_list);
444
445	return 0;
446}
447
448static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
449			       struct net_device *dev_rx)
450{
 
451	struct xdp_frame *xdpf;
452	int err;
453
454	if (!dev->netdev_ops->ndo_xdp_xmit)
455		return -EOPNOTSUPP;
456
457	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
458	if (unlikely(err))
459		return err;
460
461	xdpf = xdp_convert_buff_to_frame(xdp);
462	if (unlikely(!xdpf))
463		return -EOVERFLOW;
464
465	return bq_enqueue(dev, xdpf, dev_rx);
466}
467
468static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
469					 struct xdp_buff *xdp,
470					 struct bpf_prog *xdp_prog)
471{
472	struct xdp_txq_info txq = { .dev = dev };
473	u32 act;
474
475	xdp_set_data_meta_invalid(xdp);
476	xdp->txq = &txq;
477
478	act = bpf_prog_run_xdp(xdp_prog, xdp);
479	switch (act) {
480	case XDP_PASS:
481		return xdp;
482	case XDP_DROP:
483		break;
484	default:
485		bpf_warn_invalid_xdp_action(act);
486		fallthrough;
487	case XDP_ABORTED:
488		trace_xdp_exception(dev, xdp_prog, act);
489		break;
490	}
491
492	xdp_return_buff(xdp);
493	return NULL;
494}
495
496int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
497		    struct net_device *dev_rx)
498{
499	return __xdp_enqueue(dev, xdp, dev_rx);
500}
501
502int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
503		    struct net_device *dev_rx)
504{
505	struct net_device *dev = dst->dev;
506
507	if (dst->xdp_prog) {
508		xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
509		if (!xdp)
510			return 0;
511	}
512	return __xdp_enqueue(dev, xdp, dev_rx);
513}
514
515int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
516			     struct bpf_prog *xdp_prog)
517{
518	int err;
519
520	err = xdp_ok_fwd_dev(dst->dev, skb->len);
521	if (unlikely(err))
522		return err;
523	skb->dev = dst->dev;
524	generic_xdp_tx(skb, xdp_prog);
525
526	return 0;
527}
528
529static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
530{
531	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
 
532
533	return obj ? &obj->val : NULL;
534}
535
536static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
537{
538	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
539								*(u32 *)key);
540	return obj ? &obj->val : NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541}
542
543static void __dev_map_entry_free(struct rcu_head *rcu)
544{
545	struct bpf_dtab_netdev *dev;
546
547	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
548	if (dev->xdp_prog)
549		bpf_prog_put(dev->xdp_prog);
550	dev_put(dev->dev);
551	kfree(dev);
552}
553
554static int dev_map_delete_elem(struct bpf_map *map, void *key)
555{
556	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
557	struct bpf_dtab_netdev *old_dev;
558	int k = *(u32 *)key;
559
560	if (k >= map->max_entries)
561		return -EINVAL;
562
563	/* Use call_rcu() here to ensure any rcu critical sections have
564	 * completed as well as any flush operations because call_rcu
565	 * will wait for preempt-disable region to complete, NAPI in this
566	 * context.  And additionally, the driver tear down ensures all
567	 * soft irqs are complete before removing the net device in the
568	 * case of dev_put equals zero.
 
569	 */
570	old_dev = xchg(&dtab->netdev_map[k], NULL);
571	if (old_dev)
572		call_rcu(&old_dev->rcu, __dev_map_entry_free);
573	return 0;
574}
575
576static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
577{
578	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
579	struct bpf_dtab_netdev *old_dev;
580	int k = *(u32 *)key;
581	unsigned long flags;
582	int ret = -ENOENT;
583
584	spin_lock_irqsave(&dtab->index_lock, flags);
585
586	old_dev = __dev_map_hash_lookup_elem(map, k);
587	if (old_dev) {
588		dtab->items--;
589		hlist_del_init_rcu(&old_dev->index_hlist);
590		call_rcu(&old_dev->rcu, __dev_map_entry_free);
591		ret = 0;
592	}
593	spin_unlock_irqrestore(&dtab->index_lock, flags);
594
595	return ret;
596}
597
598static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
599						    struct bpf_dtab *dtab,
600						    struct bpf_devmap_val *val,
601						    unsigned int idx)
602{
603	struct bpf_prog *prog = NULL;
604	struct bpf_dtab_netdev *dev;
 
 
605
606	dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
607			   dtab->map.numa_node);
608	if (!dev)
609		return ERR_PTR(-ENOMEM);
610
611	dev->dev = dev_get_by_index(net, val->ifindex);
612	if (!dev->dev)
613		goto err_out;
614
615	if (val->bpf_prog.fd > 0) {
616		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
617					     BPF_PROG_TYPE_XDP, false);
618		if (IS_ERR(prog))
619			goto err_put_dev;
620		if (prog->expected_attach_type != BPF_XDP_DEVMAP)
621			goto err_put_prog;
 
 
 
 
 
 
622	}
623
624	dev->idx = idx;
625	dev->dtab = dtab;
626	if (prog) {
627		dev->xdp_prog = prog;
628		dev->val.bpf_prog.id = prog->aux->id;
629	} else {
630		dev->xdp_prog = NULL;
631		dev->val.bpf_prog.id = 0;
632	}
633	dev->val.ifindex = val->ifindex;
634
635	return dev;
636err_put_prog:
637	bpf_prog_put(prog);
638err_put_dev:
639	dev_put(dev->dev);
640err_out:
641	kfree(dev);
642	return ERR_PTR(-EINVAL);
643}
644
645static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
646				 void *key, void *value, u64 map_flags)
647{
648	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
649	struct bpf_dtab_netdev *dev, *old_dev;
650	struct bpf_devmap_val val = {};
651	u32 i = *(u32 *)key;
652
653	if (unlikely(map_flags > BPF_EXIST))
654		return -EINVAL;
655	if (unlikely(i >= dtab->map.max_entries))
656		return -E2BIG;
657	if (unlikely(map_flags == BPF_NOEXIST))
658		return -EEXIST;
659
660	/* already verified value_size <= sizeof val */
661	memcpy(&val, value, map->value_size);
662
663	if (!val.ifindex) {
664		dev = NULL;
665		/* can not specify fd if ifindex is 0 */
666		if (val.bpf_prog.fd > 0)
667			return -EINVAL;
668	} else {
669		dev = __dev_map_alloc_node(net, dtab, &val, i);
670		if (IS_ERR(dev))
671			return PTR_ERR(dev);
672	}
673
674	/* Use call_rcu() here to ensure rcu critical sections have completed
675	 * Remembering the driver side flush operation will happen before the
676	 * net device is removed.
677	 */
678	old_dev = xchg(&dtab->netdev_map[i], dev);
679	if (old_dev)
680		call_rcu(&old_dev->rcu, __dev_map_entry_free);
681
682	return 0;
683}
684
685static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
686			       u64 map_flags)
687{
688	return __dev_map_update_elem(current->nsproxy->net_ns,
689				     map, key, value, map_flags);
690}
691
692static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
693				     void *key, void *value, u64 map_flags)
694{
695	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
696	struct bpf_dtab_netdev *dev, *old_dev;
697	struct bpf_devmap_val val = {};
698	u32 idx = *(u32 *)key;
699	unsigned long flags;
700	int err = -EEXIST;
701
702	/* already verified value_size <= sizeof val */
703	memcpy(&val, value, map->value_size);
704
705	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
706		return -EINVAL;
707
708	spin_lock_irqsave(&dtab->index_lock, flags);
709
710	old_dev = __dev_map_hash_lookup_elem(map, idx);
711	if (old_dev && (map_flags & BPF_NOEXIST))
712		goto out_err;
713
714	dev = __dev_map_alloc_node(net, dtab, &val, idx);
715	if (IS_ERR(dev)) {
716		err = PTR_ERR(dev);
717		goto out_err;
718	}
719
720	if (old_dev) {
721		hlist_del_rcu(&old_dev->index_hlist);
722	} else {
723		if (dtab->items >= dtab->map.max_entries) {
724			spin_unlock_irqrestore(&dtab->index_lock, flags);
725			call_rcu(&dev->rcu, __dev_map_entry_free);
726			return -E2BIG;
727		}
728		dtab->items++;
729	}
730
731	hlist_add_head_rcu(&dev->index_hlist,
732			   dev_map_index_hash(dtab, idx));
733	spin_unlock_irqrestore(&dtab->index_lock, flags);
734
735	if (old_dev)
736		call_rcu(&old_dev->rcu, __dev_map_entry_free);
737
738	return 0;
739
740out_err:
741	spin_unlock_irqrestore(&dtab->index_lock, flags);
742	return err;
743}
744
745static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
746				   u64 map_flags)
747{
748	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
749					 map, key, value, map_flags);
750}
751
752static int dev_map_btf_id;
753const struct bpf_map_ops dev_map_ops = {
754	.map_alloc = dev_map_alloc,
755	.map_free = dev_map_free,
756	.map_get_next_key = dev_map_get_next_key,
757	.map_lookup_elem = dev_map_lookup_elem,
758	.map_update_elem = dev_map_update_elem,
759	.map_delete_elem = dev_map_delete_elem,
760	.map_check_btf = map_check_no_btf,
761	.map_btf_name = "bpf_dtab",
762	.map_btf_id = &dev_map_btf_id,
763};
764
765static int dev_map_hash_map_btf_id;
766const struct bpf_map_ops dev_map_hash_ops = {
767	.map_alloc = dev_map_alloc,
768	.map_free = dev_map_free,
769	.map_get_next_key = dev_map_hash_get_next_key,
770	.map_lookup_elem = dev_map_hash_lookup_elem,
771	.map_update_elem = dev_map_hash_update_elem,
772	.map_delete_elem = dev_map_hash_delete_elem,
773	.map_check_btf = map_check_no_btf,
774	.map_btf_name = "bpf_dtab",
775	.map_btf_id = &dev_map_hash_map_btf_id,
776};
777
778static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
779				       struct net_device *netdev)
780{
781	unsigned long flags;
782	u32 i;
783
784	spin_lock_irqsave(&dtab->index_lock, flags);
785	for (i = 0; i < dtab->n_buckets; i++) {
786		struct bpf_dtab_netdev *dev;
787		struct hlist_head *head;
788		struct hlist_node *next;
789
790		head = dev_map_index_hash(dtab, i);
791
792		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
793			if (netdev != dev->dev)
794				continue;
795
796			dtab->items--;
797			hlist_del_rcu(&dev->index_hlist);
798			call_rcu(&dev->rcu, __dev_map_entry_free);
799		}
800	}
801	spin_unlock_irqrestore(&dtab->index_lock, flags);
802}
803
804static int dev_map_notification(struct notifier_block *notifier,
805				ulong event, void *ptr)
806{
807	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
808	struct bpf_dtab *dtab;
809	int i, cpu;
810
811	switch (event) {
812	case NETDEV_REGISTER:
813		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
814			break;
815
816		/* will be freed in free_netdev() */
817		netdev->xdp_bulkq =
818			__alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue),
819					   sizeof(void *), GFP_ATOMIC);
820		if (!netdev->xdp_bulkq)
821			return NOTIFY_BAD;
822
823		for_each_possible_cpu(cpu)
824			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
825		break;
826	case NETDEV_UNREGISTER:
827		/* This rcu_read_lock/unlock pair is needed because
828		 * dev_map_list is an RCU list AND to ensure a delete
829		 * operation does not free a netdev_map entry while we
830		 * are comparing it against the netdev being unregistered.
831		 */
832		rcu_read_lock();
833		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
834			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
835				dev_map_hash_remove_netdev(dtab, netdev);
836				continue;
837			}
838
839			for (i = 0; i < dtab->map.max_entries; i++) {
840				struct bpf_dtab_netdev *dev, *odev;
841
842				dev = READ_ONCE(dtab->netdev_map[i]);
843				if (!dev || netdev != dev->dev)
844					continue;
845				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
846				if (dev == odev)
847					call_rcu(&dev->rcu,
848						 __dev_map_entry_free);
849			}
850		}
851		rcu_read_unlock();
852		break;
853	default:
854		break;
855	}
856	return NOTIFY_OK;
857}
858
859static struct notifier_block dev_map_notifier = {
860	.notifier_call = dev_map_notification,
861};
862
863static int __init dev_map_init(void)
864{
865	int cpu;
866
867	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
868	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
869		     offsetof(struct _bpf_dtab_netdev, dev));
870	register_netdevice_notifier(&dev_map_notifier);
871
872	for_each_possible_cpu(cpu)
873		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
874	return 0;
875}
876
877subsys_initcall(dev_map_init);