Linux Audio

Check our new training course

Loading...
v4.17
 
  1/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful, but
  8 * WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 10 * General Public License for more details.
 11 */
 12
 13/* Devmaps primary use is as a backend map for XDP BPF helper call
 14 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
 15 * spent some effort to ensure the datapath with redirect maps does not use
 16 * any locking. This is a quick note on the details.
 17 *
 18 * We have three possible paths to get into the devmap control plane bpf
 19 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
 20 * will invoke an update, delete, or lookup operation. To ensure updates and
 21 * deletes appear atomic from the datapath side xchg() is used to modify the
 22 * netdev_map array. Then because the datapath does a lookup into the netdev_map
 23 * array (read-only) from an RCU critical section we use call_rcu() to wait for
 24 * an rcu grace period before free'ing the old data structures. This ensures the
 25 * datapath always has a valid copy. However, the datapath does a "flush"
 26 * operation that pushes any pending packets in the driver outside the RCU
 27 * critical section. Each bpf_dtab_netdev tracks these pending operations using
 28 * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
 29 * until all bits are cleared indicating outstanding flush operations have
 30 * completed.
 31 *
 32 * BPF syscalls may race with BPF program calls on any of the update, delete
 33 * or lookup operations. As noted above the xchg() operation also keep the
 34 * netdev_map consistent in this case. From the devmap side BPF programs
 35 * calling into these operations are the same as multiple user space threads
 36 * making system calls.
 37 *
 38 * Finally, any of the above may race with a netdev_unregister notifier. The
 39 * unregister notifier must search for net devices in the map structure that
 40 * contain a reference to the net device and remove them. This is a two step
 41 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
 42 * check to see if the ifindex is the same as the net_device being removed.
 43 * When removing the dev a cmpxchg() is used to ensure the correct dev is
 44 * removed, in the case of a concurrent update or delete operation it is
 45 * possible that the initially referenced dev is no longer in the map. As the
 46 * notifier hook walks the map we know that new dev references can not be
 47 * added by the user because core infrastructure ensures dev_get_by_index()
 48 * calls will fail at this point.
 
 
 
 
 
 
 49 */
 50#include <linux/bpf.h>
 
 51#include <linux/filter.h>
 
 52
 53#define DEV_CREATE_FLAG_MASK \
 54	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 55
 
 
 
 
 
 
 
 
 
 
 
 56struct bpf_dtab_netdev {
 57	struct net_device *dev;
 
 58	struct bpf_dtab *dtab;
 59	unsigned int bit;
 60	struct rcu_head rcu;
 
 61};
 62
 63struct bpf_dtab {
 64	struct bpf_map map;
 65	struct bpf_dtab_netdev **netdev_map;
 66	unsigned long __percpu *flush_needed;
 67	struct list_head list;
 
 
 
 
 
 
 68};
 69
 70static DEFINE_SPINLOCK(dev_map_lock);
 71static LIST_HEAD(dev_map_list);
 72
 73static u64 dev_map_bitmap_size(const union bpf_attr *attr)
 74{
 75	return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
 
 
 
 
 
 
 
 
 76}
 77
 78static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 79{
 80	struct bpf_dtab *dtab;
 81	int err = -EINVAL;
 82	u64 cost;
 83
 84	if (!capable(CAP_NET_ADMIN))
 85		return ERR_PTR(-EPERM);
 86
 87	/* check sanity of attributes */
 88	if (attr->max_entries == 0 || attr->key_size != 4 ||
 89	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
 90		return ERR_PTR(-EINVAL);
 
 
 
 
 
 91
 92	dtab = kzalloc(sizeof(*dtab), GFP_USER);
 93	if (!dtab)
 94		return ERR_PTR(-ENOMEM);
 95
 96	bpf_map_init_from_attr(&dtab->map, attr);
 97
 98	/* make sure page count doesn't overflow */
 99	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
100	cost += dev_map_bitmap_size(attr) * num_possible_cpus();
101	if (cost >= U32_MAX - PAGE_SIZE)
102		goto free_dtab;
 
103
104	dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
 
 
105
106	/* if map size is larger than memlock limit, reject it early */
107	err = bpf_map_precharge_memlock(dtab->map.pages);
108	if (err)
109		goto free_dtab;
110
111	err = -ENOMEM;
 
 
112
113	/* A per cpu bitfield with a bit per possible net device */
114	dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
115						__alignof__(unsigned long),
116						GFP_KERNEL | __GFP_NOWARN);
117	if (!dtab->flush_needed)
118		goto free_dtab;
119
120	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
121					      sizeof(struct bpf_dtab_netdev *),
122					      dtab->map.numa_node);
123	if (!dtab->netdev_map)
124		goto free_dtab;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	spin_lock(&dev_map_lock);
127	list_add_tail_rcu(&dtab->list, &dev_map_list);
128	spin_unlock(&dev_map_lock);
129
130	return &dtab->map;
131free_dtab:
132	free_percpu(dtab->flush_needed);
133	kfree(dtab);
134	return ERR_PTR(err);
135}
136
137static void dev_map_free(struct bpf_map *map)
138{
139	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
140	int i, cpu;
141
142	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
143	 * so the programs (can be more than one that used this map) were
144	 * disconnected from events. Wait for outstanding critical sections in
145	 * these programs to complete. The rcu critical section only guarantees
146	 * no further reads against netdev_map. It does __not__ ensure pending
147	 * flush operations (if any) are complete.
148	 */
149
150	spin_lock(&dev_map_lock);
151	list_del_rcu(&dtab->list);
152	spin_unlock(&dev_map_lock);
153
 
154	synchronize_rcu();
155
 
 
 
156	/* To ensure all pending flush operations have completed wait for flush
157	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
158	 * Because the above synchronize_rcu() ensures the map is disconnected
159	 * from the program we can assume no new bits will be set.
160	 */
161	for_each_online_cpu(cpu) {
162		unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
163
164		while (!bitmap_empty(bitmap, dtab->map.max_entries))
165			cond_resched();
166	}
167
168	for (i = 0; i < dtab->map.max_entries; i++) {
169		struct bpf_dtab_netdev *dev;
170
171		dev = dtab->netdev_map[i];
172		if (!dev)
173			continue;
174
 
175		dev_put(dev->dev);
176		kfree(dev);
177	}
178
179	free_percpu(dtab->flush_needed);
180	bpf_map_area_free(dtab->netdev_map);
 
181	kfree(dtab);
182}
183
184static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
185{
186	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
187	u32 index = key ? *(u32 *)key : U32_MAX;
188	u32 *next = next_key;
189
190	if (index >= dtab->map.max_entries) {
191		*next = 0;
192		return 0;
193	}
194
195	if (index == dtab->map.max_entries - 1)
196		return -ENOENT;
197	*next = index + 1;
198	return 0;
199}
200
201void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
 
 
 
 
 
 
202{
203	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
204	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
206	__set_bit(bit, bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207}
208
209/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
210 * from the driver before returning from its napi->poll() routine. The poll()
211 * routine is called either from busy_poll context or net_rx_action signaled
212 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
213 * net device can be torn down. On devmap tear down we ensure the ctx bitmap
214 * is zeroed before completing to ensure all flush operations have completed.
215 */
216void __dev_map_flush(struct bpf_map *map)
217{
218	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
219	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
220	u32 bit;
221
222	for_each_set_bit(bit, bitmap, map->max_entries) {
223		struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
224		struct net_device *netdev;
225
226		/* This is possible if the dev entry is removed by user space
227		 * between xdp redirect and flush op.
228		 */
229		if (unlikely(!dev))
230			continue;
231
232		__clear_bit(bit, bitmap);
233		netdev = dev->dev;
234		if (likely(netdev->netdev_ops->ndo_xdp_flush))
235			netdev->netdev_ops->ndo_xdp_flush(netdev);
236	}
237}
238
239/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
240 * update happens in parallel here a dev_put wont happen until after reading the
241 * ifindex.
242 */
243struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
244{
245	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
246	struct bpf_dtab_netdev *dev;
247
248	if (key >= map->max_entries)
249		return NULL;
250
251	dev = READ_ONCE(dtab->netdev_map[key]);
252	return dev ? dev->dev : NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253}
254
255static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
256{
257	struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key);
 
 
 
 
 
 
 
 
 
 
258
259	return dev ? &dev->ifindex : NULL;
260}
261
262static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
263{
264	if (dev->dev->netdev_ops->ndo_xdp_flush) {
265		struct net_device *fl = dev->dev;
266		unsigned long *bitmap;
267		int cpu;
268
 
269		for_each_online_cpu(cpu) {
270			bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
271			__clear_bit(dev->bit, bitmap);
272
273			fl->netdev_ops->ndo_xdp_flush(dev->dev);
274		}
 
275	}
276}
277
278static void __dev_map_entry_free(struct rcu_head *rcu)
279{
280	struct bpf_dtab_netdev *dev;
281
282	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
283	dev_map_flush_old(dev);
 
284	dev_put(dev->dev);
285	kfree(dev);
286}
287
288static int dev_map_delete_elem(struct bpf_map *map, void *key)
289{
290	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
291	struct bpf_dtab_netdev *old_dev;
292	int k = *(u32 *)key;
293
294	if (k >= map->max_entries)
295		return -EINVAL;
296
297	/* Use call_rcu() here to ensure any rcu critical sections have
298	 * completed, but this does not guarantee a flush has happened
299	 * yet. Because driver side rcu_read_lock/unlock only protects the
300	 * running XDP program. However, for pending flush operations the
301	 * dev and ctx are stored in another per cpu map. And additionally,
302	 * the driver tear down ensures all soft irqs are complete before
303	 * removing the net device in the case of dev_put equals zero.
304	 */
305	old_dev = xchg(&dtab->netdev_map[k], NULL);
306	if (old_dev)
307		call_rcu(&old_dev->rcu, __dev_map_entry_free);
308	return 0;
309}
310
311static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
312				u64 map_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313{
314	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
315	struct net *net = current->nsproxy->net_ns;
316	struct bpf_dtab_netdev *dev, *old_dev;
317	u32 i = *(u32 *)key;
318	u32 ifindex = *(u32 *)value;
 
319
320	if (unlikely(map_flags > BPF_EXIST))
321		return -EINVAL;
322	if (unlikely(i >= dtab->map.max_entries))
323		return -E2BIG;
324	if (unlikely(map_flags == BPF_NOEXIST))
325		return -EEXIST;
326
327	if (!ifindex) {
328		dev = NULL;
329	} else {
330		dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
331				   map->numa_node);
332		if (!dev)
333			return -ENOMEM;
334
335		dev->dev = dev_get_by_index(net, ifindex);
336		if (!dev->dev) {
337			kfree(dev);
338			return -EINVAL;
339		}
340
341		dev->bit = i;
342		dev->dtab = dtab;
343	}
344
345	/* Use call_rcu() here to ensure rcu critical sections have completed
346	 * Remembering the driver side flush operation will happen before the
347	 * net device is removed.
348	 */
349	old_dev = xchg(&dtab->netdev_map[i], dev);
350	if (old_dev)
351		call_rcu(&old_dev->rcu, __dev_map_entry_free);
352
353	return 0;
354}
355
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356const struct bpf_map_ops dev_map_ops = {
357	.map_alloc = dev_map_alloc,
358	.map_free = dev_map_free,
359	.map_get_next_key = dev_map_get_next_key,
360	.map_lookup_elem = dev_map_lookup_elem,
361	.map_update_elem = dev_map_update_elem,
362	.map_delete_elem = dev_map_delete_elem,
 
363};
364
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365static int dev_map_notification(struct notifier_block *notifier,
366				ulong event, void *ptr)
367{
368	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
369	struct bpf_dtab *dtab;
370	int i;
371
372	switch (event) {
373	case NETDEV_UNREGISTER:
374		/* This rcu_read_lock/unlock pair is needed because
375		 * dev_map_list is an RCU list AND to ensure a delete
376		 * operation does not free a netdev_map entry while we
377		 * are comparing it against the netdev being unregistered.
378		 */
379		rcu_read_lock();
380		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
 
 
 
 
 
381			for (i = 0; i < dtab->map.max_entries; i++) {
382				struct bpf_dtab_netdev *dev, *odev;
383
384				dev = READ_ONCE(dtab->netdev_map[i]);
385				if (!dev ||
386				    dev->dev->ifindex != netdev->ifindex)
387					continue;
388				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
389				if (dev == odev)
390					call_rcu(&dev->rcu,
391						 __dev_map_entry_free);
392			}
393		}
394		rcu_read_unlock();
395		break;
396	default:
397		break;
398	}
399	return NOTIFY_OK;
400}
401
402static struct notifier_block dev_map_notifier = {
403	.notifier_call = dev_map_notification,
404};
405
406static int __init dev_map_init(void)
407{
 
 
 
408	register_netdevice_notifier(&dev_map_notifier);
409	return 0;
410}
411
412subsys_initcall(dev_map_init);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
 
 
 
 
 
 
 
 
 
  3 */
  4
  5/* Devmaps primary use is as a backend map for XDP BPF helper call
  6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
  7 * spent some effort to ensure the datapath with redirect maps does not use
  8 * any locking. This is a quick note on the details.
  9 *
 10 * We have three possible paths to get into the devmap control plane bpf
 11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
 12 * will invoke an update, delete, or lookup operation. To ensure updates and
 13 * deletes appear atomic from the datapath side xchg() is used to modify the
 14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
 15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
 16 * an rcu grace period before free'ing the old data structures. This ensures the
 17 * datapath always has a valid copy. However, the datapath does a "flush"
 18 * operation that pushes any pending packets in the driver outside the RCU
 19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
 20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
 21 * this list is empty, indicating outstanding flush operations have completed.
 
 22 *
 23 * BPF syscalls may race with BPF program calls on any of the update, delete
 24 * or lookup operations. As noted above the xchg() operation also keep the
 25 * netdev_map consistent in this case. From the devmap side BPF programs
 26 * calling into these operations are the same as multiple user space threads
 27 * making system calls.
 28 *
 29 * Finally, any of the above may race with a netdev_unregister notifier. The
 30 * unregister notifier must search for net devices in the map structure that
 31 * contain a reference to the net device and remove them. This is a two step
 32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
 33 * check to see if the ifindex is the same as the net_device being removed.
 34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
 35 * removed, in the case of a concurrent update or delete operation it is
 36 * possible that the initially referenced dev is no longer in the map. As the
 37 * notifier hook walks the map we know that new dev references can not be
 38 * added by the user because core infrastructure ensures dev_get_by_index()
 39 * calls will fail at this point.
 40 *
 41 * The devmap_hash type is a map type which interprets keys as ifindexes and
 42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
 43 * densely packed instead of having holes in the lookup array for unused
 44 * ifindexes. The setup and packet enqueue/send code is shared between the two
 45 * types of devmap; only the lookup and insertion is different.
 46 */
 47#include <linux/bpf.h>
 48#include <net/xdp.h>
 49#include <linux/filter.h>
 50#include <trace/events/xdp.h>
 51
 52#define DEV_CREATE_FLAG_MASK \
 53	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
 54
 55#define DEV_MAP_BULK_SIZE 16
 56struct bpf_dtab_netdev;
 57
 58struct xdp_bulk_queue {
 59	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
 60	struct list_head flush_node;
 61	struct net_device *dev_rx;
 62	struct bpf_dtab_netdev *obj;
 63	unsigned int count;
 64};
 65
 66struct bpf_dtab_netdev {
 67	struct net_device *dev; /* must be first member, due to tracepoint */
 68	struct hlist_node index_hlist;
 69	struct bpf_dtab *dtab;
 70	struct xdp_bulk_queue __percpu *bulkq;
 71	struct rcu_head rcu;
 72	unsigned int idx; /* keep track of map index for tracepoint */
 73};
 74
 75struct bpf_dtab {
 76	struct bpf_map map;
 77	struct bpf_dtab_netdev **netdev_map;
 78	struct list_head __percpu *flush_list;
 79	struct list_head list;
 80
 81	/* these are only used for DEVMAP_HASH type maps */
 82	struct hlist_head *dev_index_head;
 83	spinlock_t index_lock;
 84	unsigned int items;
 85	u32 n_buckets;
 86};
 87
 88static DEFINE_SPINLOCK(dev_map_lock);
 89static LIST_HEAD(dev_map_list);
 90
 91static struct hlist_head *dev_map_create_hash(unsigned int entries)
 92{
 93	int i;
 94	struct hlist_head *hash;
 95
 96	hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
 97	if (hash != NULL)
 98		for (i = 0; i < entries; i++)
 99			INIT_HLIST_HEAD(&hash[i]);
100
101	return hash;
102}
103
104static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
105{
106	int err, cpu;
 
107	u64 cost;
108
 
 
 
109	/* check sanity of attributes */
110	if (attr->max_entries == 0 || attr->key_size != 4 ||
111	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
112		return -EINVAL;
113
114	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
115	 * verifier prevents writes from the BPF side
116	 */
117	attr->map_flags |= BPF_F_RDONLY_PROG;
118
 
 
 
119
120	bpf_map_init_from_attr(&dtab->map, attr);
121
122	/* make sure page count doesn't overflow */
123	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
124	cost += sizeof(struct list_head) * num_possible_cpus();
125
126	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
127		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
128
129		if (!dtab->n_buckets) /* Overflow check */
130			return -EINVAL;
131		cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
132	}
133
134	/* if map size is larger than memlock limit, reject it */
135	err = bpf_map_charge_init(&dtab->map.memory, cost);
136	if (err)
137		return -EINVAL;
138
139	dtab->flush_list = alloc_percpu(struct list_head);
140	if (!dtab->flush_list)
141		goto free_charge;
142
143	for_each_possible_cpu(cpu)
144		INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
 
 
 
 
145
146	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
147					      sizeof(struct bpf_dtab_netdev *),
148					      dtab->map.numa_node);
149	if (!dtab->netdev_map)
150		goto free_percpu;
151
152	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
153		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
154		if (!dtab->dev_index_head)
155			goto free_map_area;
156
157		spin_lock_init(&dtab->index_lock);
158	}
159
160	return 0;
161
162free_map_area:
163	bpf_map_area_free(dtab->netdev_map);
164free_percpu:
165	free_percpu(dtab->flush_list);
166free_charge:
167	bpf_map_charge_finish(&dtab->map.memory);
168	return -ENOMEM;
169}
170
171static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
172{
173	struct bpf_dtab *dtab;
174	int err;
175
176	if (!capable(CAP_NET_ADMIN))
177		return ERR_PTR(-EPERM);
178
179	dtab = kzalloc(sizeof(*dtab), GFP_USER);
180	if (!dtab)
181		return ERR_PTR(-ENOMEM);
182
183	err = dev_map_init_map(dtab, attr);
184	if (err) {
185		kfree(dtab);
186		return ERR_PTR(err);
187	}
188
189	spin_lock(&dev_map_lock);
190	list_add_tail_rcu(&dtab->list, &dev_map_list);
191	spin_unlock(&dev_map_lock);
192
193	return &dtab->map;
 
 
 
 
194}
195
196static void dev_map_free(struct bpf_map *map)
197{
198	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
199	int i, cpu;
200
201	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
202	 * so the programs (can be more than one that used this map) were
203	 * disconnected from events. Wait for outstanding critical sections in
204	 * these programs to complete. The rcu critical section only guarantees
205	 * no further reads against netdev_map. It does __not__ ensure pending
206	 * flush operations (if any) are complete.
207	 */
208
209	spin_lock(&dev_map_lock);
210	list_del_rcu(&dtab->list);
211	spin_unlock(&dev_map_lock);
212
213	bpf_clear_redirect_map(map);
214	synchronize_rcu();
215
216	/* Make sure prior __dev_map_entry_free() have completed. */
217	rcu_barrier();
218
219	/* To ensure all pending flush operations have completed wait for flush
220	 * list to empty on _all_ cpus.
221	 * Because the above synchronize_rcu() ensures the map is disconnected
222	 * from the program we can assume no new items will be added.
223	 */
224	for_each_online_cpu(cpu) {
225		struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
226
227		while (!list_empty(flush_list))
228			cond_resched();
229	}
230
231	for (i = 0; i < dtab->map.max_entries; i++) {
232		struct bpf_dtab_netdev *dev;
233
234		dev = dtab->netdev_map[i];
235		if (!dev)
236			continue;
237
238		free_percpu(dev->bulkq);
239		dev_put(dev->dev);
240		kfree(dev);
241	}
242
243	free_percpu(dtab->flush_list);
244	bpf_map_area_free(dtab->netdev_map);
245	kfree(dtab->dev_index_head);
246	kfree(dtab);
247}
248
249static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
250{
251	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
252	u32 index = key ? *(u32 *)key : U32_MAX;
253	u32 *next = next_key;
254
255	if (index >= dtab->map.max_entries) {
256		*next = 0;
257		return 0;
258	}
259
260	if (index == dtab->map.max_entries - 1)
261		return -ENOENT;
262	*next = index + 1;
263	return 0;
264}
265
266static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
267						    int idx)
268{
269	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
270}
271
272struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
273{
274	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
275	struct hlist_head *head = dev_map_index_hash(dtab, key);
276	struct bpf_dtab_netdev *dev;
277
278	hlist_for_each_entry_rcu(dev, head, index_hlist)
279		if (dev->idx == key)
280			return dev;
281
282	return NULL;
283}
284
285static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
286				    void *next_key)
287{
288	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
289	u32 idx, *next = next_key;
290	struct bpf_dtab_netdev *dev, *next_dev;
291	struct hlist_head *head;
292	int i = 0;
293
294	if (!key)
295		goto find_first;
296
297	idx = *(u32 *)key;
298
299	dev = __dev_map_hash_lookup_elem(map, idx);
300	if (!dev)
301		goto find_first;
302
303	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
304				    struct bpf_dtab_netdev, index_hlist);
305
306	if (next_dev) {
307		*next = next_dev->idx;
308		return 0;
309	}
310
311	i = idx & (dtab->n_buckets - 1);
312	i++;
313
314 find_first:
315	for (; i < dtab->n_buckets; i++) {
316		head = dev_map_index_hash(dtab, i);
317
318		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
319					    struct bpf_dtab_netdev,
320					    index_hlist);
321		if (next_dev) {
322			*next = next_dev->idx;
323			return 0;
324		}
325	}
326
327	return -ENOENT;
328}
329
330static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
331		       bool in_napi_ctx)
332{
333	struct bpf_dtab_netdev *obj = bq->obj;
334	struct net_device *dev = obj->dev;
335	int sent = 0, drops = 0, err = 0;
336	int i;
337
338	if (unlikely(!bq->count))
339		return 0;
340
341	for (i = 0; i < bq->count; i++) {
342		struct xdp_frame *xdpf = bq->q[i];
343
344		prefetch(xdpf);
345	}
346
347	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
348	if (sent < 0) {
349		err = sent;
350		sent = 0;
351		goto error;
352	}
353	drops = bq->count - sent;
354out:
355	bq->count = 0;
356
357	trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx,
358			      sent, drops, bq->dev_rx, dev, err);
359	bq->dev_rx = NULL;
360	__list_del_clearprev(&bq->flush_node);
361	return 0;
362error:
363	/* If ndo_xdp_xmit fails with an errno, no frames have been
364	 * xmit'ed and it's our responsibility to them free all.
365	 */
366	for (i = 0; i < bq->count; i++) {
367		struct xdp_frame *xdpf = bq->q[i];
368
369		/* RX path under NAPI protection, can return frames faster */
370		if (likely(in_napi_ctx))
371			xdp_return_frame_rx_napi(xdpf);
372		else
373			xdp_return_frame(xdpf);
374		drops++;
375	}
376	goto out;
377}
378
379/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
380 * from the driver before returning from its napi->poll() routine. The poll()
381 * routine is called either from busy_poll context or net_rx_action signaled
382 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
383 * net device can be torn down. On devmap tear down we ensure the flush list
384 * is empty before completing to ensure all flush operations have completed.
385 */
386void __dev_map_flush(struct bpf_map *map)
387{
388	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
389	struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
390	struct xdp_bulk_queue *bq, *tmp;
391
392	rcu_read_lock();
393	list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
394		bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
395	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
396}
397
398/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
399 * update happens in parallel here a dev_put wont happen until after reading the
400 * ifindex.
401 */
402struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
403{
404	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
405	struct bpf_dtab_netdev *obj;
406
407	if (key >= map->max_entries)
408		return NULL;
409
410	obj = READ_ONCE(dtab->netdev_map[key]);
411	return obj;
412}
413
414/* Runs under RCU-read-side, plus in softirq under NAPI protection.
415 * Thus, safe percpu variable access.
416 */
417static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
418		      struct net_device *dev_rx)
419
420{
421	struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
422	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
423
424	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
425		bq_xmit_all(bq, 0, true);
426
427	/* Ingress dev_rx will be the same for all xdp_frame's in
428	 * bulk_queue, because bq stored per-CPU and must be flushed
429	 * from net_device drivers NAPI func end.
430	 */
431	if (!bq->dev_rx)
432		bq->dev_rx = dev_rx;
433
434	bq->q[bq->count++] = xdpf;
435
436	if (!bq->flush_node.prev)
437		list_add(&bq->flush_node, flush_list);
438
439	return 0;
440}
441
442int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
443		    struct net_device *dev_rx)
444{
445	struct net_device *dev = dst->dev;
446	struct xdp_frame *xdpf;
447	int err;
448
449	if (!dev->netdev_ops->ndo_xdp_xmit)
450		return -EOPNOTSUPP;
451
452	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
453	if (unlikely(err))
454		return err;
455
456	xdpf = convert_to_xdp_frame(xdp);
457	if (unlikely(!xdpf))
458		return -EOVERFLOW;
459
460	return bq_enqueue(dst, xdpf, dev_rx);
461}
462
463int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
464			     struct bpf_prog *xdp_prog)
465{
466	int err;
467
468	err = xdp_ok_fwd_dev(dst->dev, skb->len);
469	if (unlikely(err))
470		return err;
471	skb->dev = dst->dev;
472	generic_xdp_tx(skb, xdp_prog);
473
474	return 0;
475}
476
477static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
478{
479	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
480	struct net_device *dev = obj ? obj->dev : NULL;
481
482	return dev ? &dev->ifindex : NULL;
483}
484
485static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
486{
487	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
488								*(u32 *)key);
489	struct net_device *dev = obj ? obj->dev : NULL;
490
491	return dev ? &dev->ifindex : NULL;
492}
493
494static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
495{
496	if (dev->dev->netdev_ops->ndo_xdp_xmit) {
497		struct xdp_bulk_queue *bq;
 
498		int cpu;
499
500		rcu_read_lock();
501		for_each_online_cpu(cpu) {
502			bq = per_cpu_ptr(dev->bulkq, cpu);
503			bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
 
 
504		}
505		rcu_read_unlock();
506	}
507}
508
509static void __dev_map_entry_free(struct rcu_head *rcu)
510{
511	struct bpf_dtab_netdev *dev;
512
513	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
514	dev_map_flush_old(dev);
515	free_percpu(dev->bulkq);
516	dev_put(dev->dev);
517	kfree(dev);
518}
519
520static int dev_map_delete_elem(struct bpf_map *map, void *key)
521{
522	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
523	struct bpf_dtab_netdev *old_dev;
524	int k = *(u32 *)key;
525
526	if (k >= map->max_entries)
527		return -EINVAL;
528
529	/* Use call_rcu() here to ensure any rcu critical sections have
530	 * completed, but this does not guarantee a flush has happened
531	 * yet. Because driver side rcu_read_lock/unlock only protects the
532	 * running XDP program. However, for pending flush operations the
533	 * dev and ctx are stored in another per cpu map. And additionally,
534	 * the driver tear down ensures all soft irqs are complete before
535	 * removing the net device in the case of dev_put equals zero.
536	 */
537	old_dev = xchg(&dtab->netdev_map[k], NULL);
538	if (old_dev)
539		call_rcu(&old_dev->rcu, __dev_map_entry_free);
540	return 0;
541}
542
543static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
544{
545	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
546	struct bpf_dtab_netdev *old_dev;
547	int k = *(u32 *)key;
548	unsigned long flags;
549	int ret = -ENOENT;
550
551	spin_lock_irqsave(&dtab->index_lock, flags);
552
553	old_dev = __dev_map_hash_lookup_elem(map, k);
554	if (old_dev) {
555		dtab->items--;
556		hlist_del_init_rcu(&old_dev->index_hlist);
557		call_rcu(&old_dev->rcu, __dev_map_entry_free);
558		ret = 0;
559	}
560	spin_unlock_irqrestore(&dtab->index_lock, flags);
561
562	return ret;
563}
564
565static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
566						    struct bpf_dtab *dtab,
567						    u32 ifindex,
568						    unsigned int idx)
569{
570	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
571	struct bpf_dtab_netdev *dev;
572	struct xdp_bulk_queue *bq;
573	int cpu;
574
575	dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
576	if (!dev)
577		return ERR_PTR(-ENOMEM);
578
579	dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
580					sizeof(void *), gfp);
581	if (!dev->bulkq) {
582		kfree(dev);
583		return ERR_PTR(-ENOMEM);
584	}
585
586	for_each_possible_cpu(cpu) {
587		bq = per_cpu_ptr(dev->bulkq, cpu);
588		bq->obj = dev;
589	}
590
591	dev->dev = dev_get_by_index(net, ifindex);
592	if (!dev->dev) {
593		free_percpu(dev->bulkq);
594		kfree(dev);
595		return ERR_PTR(-EINVAL);
596	}
597
598	dev->idx = idx;
599	dev->dtab = dtab;
600
601	return dev;
602}
603
604static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
605				 void *key, void *value, u64 map_flags)
606{
607	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
 
608	struct bpf_dtab_netdev *dev, *old_dev;
 
609	u32 ifindex = *(u32 *)value;
610	u32 i = *(u32 *)key;
611
612	if (unlikely(map_flags > BPF_EXIST))
613		return -EINVAL;
614	if (unlikely(i >= dtab->map.max_entries))
615		return -E2BIG;
616	if (unlikely(map_flags == BPF_NOEXIST))
617		return -EEXIST;
618
619	if (!ifindex) {
620		dev = NULL;
621	} else {
622		dev = __dev_map_alloc_node(net, dtab, ifindex, i);
623		if (IS_ERR(dev))
624			return PTR_ERR(dev);
 
 
 
 
 
 
 
 
 
 
625	}
626
627	/* Use call_rcu() here to ensure rcu critical sections have completed
628	 * Remembering the driver side flush operation will happen before the
629	 * net device is removed.
630	 */
631	old_dev = xchg(&dtab->netdev_map[i], dev);
632	if (old_dev)
633		call_rcu(&old_dev->rcu, __dev_map_entry_free);
634
635	return 0;
636}
637
638static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
639			       u64 map_flags)
640{
641	return __dev_map_update_elem(current->nsproxy->net_ns,
642				     map, key, value, map_flags);
643}
644
645static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
646				     void *key, void *value, u64 map_flags)
647{
648	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
649	struct bpf_dtab_netdev *dev, *old_dev;
650	u32 ifindex = *(u32 *)value;
651	u32 idx = *(u32 *)key;
652	unsigned long flags;
653	int err = -EEXIST;
654
655	if (unlikely(map_flags > BPF_EXIST || !ifindex))
656		return -EINVAL;
657
658	spin_lock_irqsave(&dtab->index_lock, flags);
659
660	old_dev = __dev_map_hash_lookup_elem(map, idx);
661	if (old_dev && (map_flags & BPF_NOEXIST))
662		goto out_err;
663
664	dev = __dev_map_alloc_node(net, dtab, ifindex, idx);
665	if (IS_ERR(dev)) {
666		err = PTR_ERR(dev);
667		goto out_err;
668	}
669
670	if (old_dev) {
671		hlist_del_rcu(&old_dev->index_hlist);
672	} else {
673		if (dtab->items >= dtab->map.max_entries) {
674			spin_unlock_irqrestore(&dtab->index_lock, flags);
675			call_rcu(&dev->rcu, __dev_map_entry_free);
676			return -E2BIG;
677		}
678		dtab->items++;
679	}
680
681	hlist_add_head_rcu(&dev->index_hlist,
682			   dev_map_index_hash(dtab, idx));
683	spin_unlock_irqrestore(&dtab->index_lock, flags);
684
685	if (old_dev)
686		call_rcu(&old_dev->rcu, __dev_map_entry_free);
687
688	return 0;
689
690out_err:
691	spin_unlock_irqrestore(&dtab->index_lock, flags);
692	return err;
693}
694
695static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
696				   u64 map_flags)
697{
698	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
699					 map, key, value, map_flags);
700}
701
702const struct bpf_map_ops dev_map_ops = {
703	.map_alloc = dev_map_alloc,
704	.map_free = dev_map_free,
705	.map_get_next_key = dev_map_get_next_key,
706	.map_lookup_elem = dev_map_lookup_elem,
707	.map_update_elem = dev_map_update_elem,
708	.map_delete_elem = dev_map_delete_elem,
709	.map_check_btf = map_check_no_btf,
710};
711
712const struct bpf_map_ops dev_map_hash_ops = {
713	.map_alloc = dev_map_alloc,
714	.map_free = dev_map_free,
715	.map_get_next_key = dev_map_hash_get_next_key,
716	.map_lookup_elem = dev_map_hash_lookup_elem,
717	.map_update_elem = dev_map_hash_update_elem,
718	.map_delete_elem = dev_map_hash_delete_elem,
719	.map_check_btf = map_check_no_btf,
720};
721
722static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
723				       struct net_device *netdev)
724{
725	unsigned long flags;
726	u32 i;
727
728	spin_lock_irqsave(&dtab->index_lock, flags);
729	for (i = 0; i < dtab->n_buckets; i++) {
730		struct bpf_dtab_netdev *dev;
731		struct hlist_head *head;
732		struct hlist_node *next;
733
734		head = dev_map_index_hash(dtab, i);
735
736		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
737			if (netdev != dev->dev)
738				continue;
739
740			dtab->items--;
741			hlist_del_rcu(&dev->index_hlist);
742			call_rcu(&dev->rcu, __dev_map_entry_free);
743		}
744	}
745	spin_unlock_irqrestore(&dtab->index_lock, flags);
746}
747
748static int dev_map_notification(struct notifier_block *notifier,
749				ulong event, void *ptr)
750{
751	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
752	struct bpf_dtab *dtab;
753	int i;
754
755	switch (event) {
756	case NETDEV_UNREGISTER:
757		/* This rcu_read_lock/unlock pair is needed because
758		 * dev_map_list is an RCU list AND to ensure a delete
759		 * operation does not free a netdev_map entry while we
760		 * are comparing it against the netdev being unregistered.
761		 */
762		rcu_read_lock();
763		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
764			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
765				dev_map_hash_remove_netdev(dtab, netdev);
766				continue;
767			}
768
769			for (i = 0; i < dtab->map.max_entries; i++) {
770				struct bpf_dtab_netdev *dev, *odev;
771
772				dev = READ_ONCE(dtab->netdev_map[i]);
773				if (!dev || netdev != dev->dev)
 
774					continue;
775				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
776				if (dev == odev)
777					call_rcu(&dev->rcu,
778						 __dev_map_entry_free);
779			}
780		}
781		rcu_read_unlock();
782		break;
783	default:
784		break;
785	}
786	return NOTIFY_OK;
787}
788
789static struct notifier_block dev_map_notifier = {
790	.notifier_call = dev_map_notification,
791};
792
793static int __init dev_map_init(void)
794{
795	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
796	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
797		     offsetof(struct _bpf_dtab_netdev, dev));
798	register_netdevice_notifier(&dev_map_notifier);
799	return 0;
800}
801
802subsys_initcall(dev_map_init);