Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/mutex.h>
  4#include <linux/netdevice.h>
  5#include <linux/xarray.h>
  6#include <net/busy_poll.h>
  7#include <net/net_debug.h>
  8#include <net/netdev_rx_queue.h>
  9#include <net/page_pool/helpers.h>
 10#include <net/page_pool/types.h>
 
 11#include <net/sock.h>
 12
 13#include "devmem.h"
 14#include "page_pool_priv.h"
 15#include "netdev-genl-gen.h"
 16
 17static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
 18/* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
 19 *	pool->user.
 20 * Ordering: inside rtnl_lock
 21 */
 22DEFINE_MUTEX(page_pools_lock);
 23
 24/* Page pools are only reachable from user space (via netlink) if they are
 25 * linked to a netdev at creation time. Following page pool "visibility"
 26 * states are possible:
 27 *  - normal
 28 *    - user.list: linked to real netdev, netdev: real netdev
 29 *  - orphaned - real netdev has disappeared
 30 *    - user.list: linked to lo, netdev: lo
 31 *  - invisible - either (a) created without netdev linking, (b) unlisted due
 32 *      to error, or (c) the entire namespace which owned this pool disappeared
 33 *    - user.list: unhashed, netdev: unknown
 34 */
 35
 36typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
 37			     const struct genl_info *info);
 38
 39static int
 40netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill)
 41{
 42	struct page_pool *pool;
 43	struct sk_buff *rsp;
 44	int err;
 45
 46	mutex_lock(&page_pools_lock);
 47	pool = xa_load(&page_pools, id);
 48	if (!pool || hlist_unhashed(&pool->user.list) ||
 49	    !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) {
 50		err = -ENOENT;
 51		goto err_unlock;
 52	}
 53
 54	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
 55	if (!rsp) {
 56		err = -ENOMEM;
 57		goto err_unlock;
 58	}
 59
 60	err = fill(rsp, pool, info);
 61	if (err)
 62		goto err_free_msg;
 63
 64	mutex_unlock(&page_pools_lock);
 65
 66	return genlmsg_reply(rsp, info);
 67
 68err_free_msg:
 69	nlmsg_free(rsp);
 70err_unlock:
 71	mutex_unlock(&page_pools_lock);
 72	return err;
 73}
 74
 75struct page_pool_dump_cb {
 76	unsigned long ifindex;
 77	u32 pp_id;
 78};
 79
 80static int
 81netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
 82			     pp_nl_fill_cb fill)
 83{
 84	struct page_pool_dump_cb *state = (void *)cb->ctx;
 85	const struct genl_info *info = genl_info_dump(cb);
 86	struct net *net = sock_net(skb->sk);
 87	struct net_device *netdev;
 88	struct page_pool *pool;
 89	int err = 0;
 90
 91	rtnl_lock();
 92	mutex_lock(&page_pools_lock);
 93	for_each_netdev_dump(net, netdev, state->ifindex) {
 94		hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
 95			if (state->pp_id && state->pp_id < pool->user.id)
 96				continue;
 97
 98			state->pp_id = pool->user.id;
 99			err = fill(skb, pool, info);
100			if (err)
101				goto out;
102		}
103
104		state->pp_id = 0;
105	}
106out:
107	mutex_unlock(&page_pools_lock);
108	rtnl_unlock();
109
 
 
110	return err;
111}
112
113static int
114page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
115			const struct genl_info *info)
116{
117#ifdef CONFIG_PAGE_POOL_STATS
118	struct page_pool_stats stats = {};
119	struct nlattr *nest;
120	void *hdr;
121
122	if (!page_pool_get_stats(pool, &stats))
123		return 0;
124
125	hdr = genlmsg_iput(rsp, info);
126	if (!hdr)
127		return -EMSGSIZE;
128
129	nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO);
130
131	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
132	    (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
133	     nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
134			 pool->slow.netdev->ifindex)))
135		goto err_cancel_nest;
136
137	nla_nest_end(rsp, nest);
138
139	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
140			 stats.alloc_stats.fast) ||
141	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
142			 stats.alloc_stats.slow) ||
143	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
144			 stats.alloc_stats.slow_high_order) ||
145	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
146			 stats.alloc_stats.empty) ||
147	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
148			 stats.alloc_stats.refill) ||
149	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
150			 stats.alloc_stats.waive) ||
151	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
152			 stats.recycle_stats.cached) ||
153	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
154			 stats.recycle_stats.cache_full) ||
155	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
156			 stats.recycle_stats.ring) ||
157	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
158			 stats.recycle_stats.ring_full) ||
159	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
160			 stats.recycle_stats.released_refcnt))
161		goto err_cancel_msg;
162
163	genlmsg_end(rsp, hdr);
164
165	return 0;
166err_cancel_nest:
167	nla_nest_cancel(rsp, nest);
168err_cancel_msg:
169	genlmsg_cancel(rsp, hdr);
170	return -EMSGSIZE;
171#else
172	GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS");
173	return -EOPNOTSUPP;
174#endif
175}
176
177int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
178				       struct genl_info *info)
179{
180	struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)];
181	struct nlattr *nest;
182	int err;
183	u32 id;
184
185	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO))
186		return -EINVAL;
187
188	nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
189	err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
190			       netdev_page_pool_info_nl_policy,
191			       info->extack);
192	if (err)
193		return err;
194
195	if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID))
196		return -EINVAL;
197	if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) {
198		NL_SET_ERR_MSG_ATTR(info->extack,
199				    tb[NETDEV_A_PAGE_POOL_IFINDEX],
200				    "selecting by ifindex not supported");
201		return -EINVAL;
202	}
203
204	id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]);
205
206	return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
207}
208
209int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
210					 struct netlink_callback *cb)
211{
212	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
213}
214
215static int
216page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
217		  const struct genl_info *info)
218{
219	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
220	size_t inflight, refsz;
221	unsigned int napi_id;
222	void *hdr;
223
224	hdr = genlmsg_iput(rsp, info);
225	if (!hdr)
226		return -EMSGSIZE;
227
228	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
229		goto err_cancel;
230
231	if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
232	    nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
233			pool->slow.netdev->ifindex))
234		goto err_cancel;
235
236	napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0;
237	if (napi_id >= MIN_NAPI_ID &&
238	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id))
239		goto err_cancel;
240
241	inflight = page_pool_inflight(pool, false);
242	refsz =	PAGE_SIZE << pool->p.order;
243	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) ||
244	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
245			 inflight * refsz))
246		goto err_cancel;
247	if (pool->user.detach_time &&
248	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME,
249			 pool->user.detach_time))
250		goto err_cancel;
251
252	if (binding && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id))
253		goto err_cancel;
254
255	genlmsg_end(rsp, hdr);
256
257	return 0;
258err_cancel:
259	genlmsg_cancel(rsp, hdr);
260	return -EMSGSIZE;
261}
262
263static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd)
264{
265	struct genl_info info;
266	struct sk_buff *ntf;
267	struct net *net;
268
269	lockdep_assert_held(&page_pools_lock);
270
271	/* 'invisible' page pools don't matter */
272	if (hlist_unhashed(&pool->user.list))
273		return;
274	net = dev_net(pool->slow.netdev);
275
276	if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL))
277		return;
278
279	genl_info_init_ntf(&info, &netdev_nl_family, cmd);
280
281	ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
282	if (!ntf)
283		return;
284
285	if (page_pool_nl_fill(ntf, pool, &info)) {
286		nlmsg_free(ntf);
287		return;
288	}
289
290	genlmsg_multicast_netns(&netdev_nl_family, net, ntf,
291				0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL);
292}
293
294int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
295{
296	u32 id;
297
298	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID))
299		return -EINVAL;
300
301	id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]);
302
303	return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill);
304}
305
306int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
307				   struct netlink_callback *cb)
308{
309	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
310}
311
312int page_pool_list(struct page_pool *pool)
313{
314	static u32 id_alloc_next;
315	int err;
316
317	mutex_lock(&page_pools_lock);
318	err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b,
319			      &id_alloc_next, GFP_KERNEL);
320	if (err < 0)
321		goto err_unlock;
322
323	INIT_HLIST_NODE(&pool->user.list);
324	if (pool->slow.netdev) {
325		hlist_add_head(&pool->user.list,
326			       &pool->slow.netdev->page_pools);
 
 
327		netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
328	}
329
330	mutex_unlock(&page_pools_lock);
331	return 0;
332
333err_unlock:
334	mutex_unlock(&page_pools_lock);
335	return err;
336}
337
338void page_pool_detached(struct page_pool *pool)
339{
340	mutex_lock(&page_pools_lock);
341	pool->user.detach_time = ktime_get_boottime_seconds();
342	netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
343	mutex_unlock(&page_pools_lock);
344}
345
346void page_pool_unlist(struct page_pool *pool)
347{
348	mutex_lock(&page_pools_lock);
349	netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF);
350	xa_erase(&page_pools, pool->user.id);
351	if (!hlist_unhashed(&pool->user.list))
352		hlist_del(&pool->user.list);
353	mutex_unlock(&page_pools_lock);
354}
355
356int page_pool_check_memory_provider(struct net_device *dev,
357				    struct netdev_rx_queue *rxq)
358{
359	struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
360	struct page_pool *pool;
361	struct hlist_node *n;
362
363	if (!binding)
364		return 0;
365
366	mutex_lock(&page_pools_lock);
367	hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) {
368		if (pool->mp_priv != binding)
369			continue;
370
371		if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) {
372			mutex_unlock(&page_pools_lock);
373			return 0;
374		}
375	}
376	mutex_unlock(&page_pools_lock);
377	return -ENODATA;
378}
379
380static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
381{
382	struct page_pool *pool;
383	struct hlist_node *n;
384
385	mutex_lock(&page_pools_lock);
386	hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
387		hlist_del_init(&pool->user.list);
388		pool->slow.netdev = NET_PTR_POISON;
389	}
390	mutex_unlock(&page_pools_lock);
391}
392
393static void page_pool_unreg_netdev(struct net_device *netdev)
394{
395	struct page_pool *pool, *last;
396	struct net_device *lo;
397
398	lo = dev_net(netdev)->loopback_dev;
399
400	mutex_lock(&page_pools_lock);
401	last = NULL;
402	hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
403		pool->slow.netdev = lo;
404		netdev_nl_page_pool_event(pool,
405					  NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
406		last = pool;
407	}
408	if (last)
409		hlist_splice_init(&netdev->page_pools, &last->user.list,
410				  &lo->page_pools);
411	mutex_unlock(&page_pools_lock);
412}
413
414static int
415page_pool_netdevice_event(struct notifier_block *nb,
416			  unsigned long event, void *ptr)
417{
418	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
419
420	if (event != NETDEV_UNREGISTER)
421		return NOTIFY_DONE;
422
423	if (hlist_empty(&netdev->page_pools))
424		return NOTIFY_OK;
425
426	if (netdev->ifindex != LOOPBACK_IFINDEX)
427		page_pool_unreg_netdev(netdev);
428	else
429		page_pool_unreg_netdev_wipe(netdev);
430	return NOTIFY_OK;
431}
432
433static struct notifier_block page_pool_netdevice_nb = {
434	.notifier_call = page_pool_netdevice_event,
435};
436
437static int __init page_pool_user_init(void)
438{
439	return register_netdevice_notifier(&page_pool_netdevice_nb);
440}
441
442subsys_initcall(page_pool_user_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/mutex.h>
  4#include <linux/netdevice.h>
  5#include <linux/xarray.h>
 
  6#include <net/net_debug.h>
 
 
  7#include <net/page_pool/types.h>
  8#include <net/page_pool/helpers.h>
  9#include <net/sock.h>
 10
 
 11#include "page_pool_priv.h"
 12#include "netdev-genl-gen.h"
 13
 14static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
 15/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
 
 16 * Ordering: inside rtnl_lock
 17 */
 18static DEFINE_MUTEX(page_pools_lock);
 19
 20/* Page pools are only reachable from user space (via netlink) if they are
 21 * linked to a netdev at creation time. Following page pool "visibility"
 22 * states are possible:
 23 *  - normal
 24 *    - user.list: linked to real netdev, netdev: real netdev
 25 *  - orphaned - real netdev has disappeared
 26 *    - user.list: linked to lo, netdev: lo
 27 *  - invisible - either (a) created without netdev linking, (b) unlisted due
 28 *      to error, or (c) the entire namespace which owned this pool disappeared
 29 *    - user.list: unhashed, netdev: unknown
 30 */
 31
 32typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
 33			     const struct genl_info *info);
 34
 35static int
 36netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill)
 37{
 38	struct page_pool *pool;
 39	struct sk_buff *rsp;
 40	int err;
 41
 42	mutex_lock(&page_pools_lock);
 43	pool = xa_load(&page_pools, id);
 44	if (!pool || hlist_unhashed(&pool->user.list) ||
 45	    !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) {
 46		err = -ENOENT;
 47		goto err_unlock;
 48	}
 49
 50	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
 51	if (!rsp) {
 52		err = -ENOMEM;
 53		goto err_unlock;
 54	}
 55
 56	err = fill(rsp, pool, info);
 57	if (err)
 58		goto err_free_msg;
 59
 60	mutex_unlock(&page_pools_lock);
 61
 62	return genlmsg_reply(rsp, info);
 63
 64err_free_msg:
 65	nlmsg_free(rsp);
 66err_unlock:
 67	mutex_unlock(&page_pools_lock);
 68	return err;
 69}
 70
 71struct page_pool_dump_cb {
 72	unsigned long ifindex;
 73	u32 pp_id;
 74};
 75
 76static int
 77netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
 78			     pp_nl_fill_cb fill)
 79{
 80	struct page_pool_dump_cb *state = (void *)cb->ctx;
 81	const struct genl_info *info = genl_info_dump(cb);
 82	struct net *net = sock_net(skb->sk);
 83	struct net_device *netdev;
 84	struct page_pool *pool;
 85	int err = 0;
 86
 87	rtnl_lock();
 88	mutex_lock(&page_pools_lock);
 89	for_each_netdev_dump(net, netdev, state->ifindex) {
 90		hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
 91			if (state->pp_id && state->pp_id < pool->user.id)
 92				continue;
 93
 94			state->pp_id = pool->user.id;
 95			err = fill(skb, pool, info);
 96			if (err)
 97				goto out;
 98		}
 99
100		state->pp_id = 0;
101	}
102out:
103	mutex_unlock(&page_pools_lock);
104	rtnl_unlock();
105
106	if (skb->len && err == -EMSGSIZE)
107		return skb->len;
108	return err;
109}
110
111static int
112page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
113			const struct genl_info *info)
114{
115#ifdef CONFIG_PAGE_POOL_STATS
116	struct page_pool_stats stats = {};
117	struct nlattr *nest;
118	void *hdr;
119
120	if (!page_pool_get_stats(pool, &stats))
121		return 0;
122
123	hdr = genlmsg_iput(rsp, info);
124	if (!hdr)
125		return -EMSGSIZE;
126
127	nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO);
128
129	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
130	    (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
131	     nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
132			 pool->slow.netdev->ifindex)))
133		goto err_cancel_nest;
134
135	nla_nest_end(rsp, nest);
136
137	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
138			 stats.alloc_stats.fast) ||
139	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
140			 stats.alloc_stats.slow) ||
141	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
142			 stats.alloc_stats.slow_high_order) ||
143	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
144			 stats.alloc_stats.empty) ||
145	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
146			 stats.alloc_stats.refill) ||
147	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
148			 stats.alloc_stats.waive) ||
149	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
150			 stats.recycle_stats.cached) ||
151	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
152			 stats.recycle_stats.cache_full) ||
153	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
154			 stats.recycle_stats.ring) ||
155	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
156			 stats.recycle_stats.ring_full) ||
157	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
158			 stats.recycle_stats.released_refcnt))
159		goto err_cancel_msg;
160
161	genlmsg_end(rsp, hdr);
162
163	return 0;
164err_cancel_nest:
165	nla_nest_cancel(rsp, nest);
166err_cancel_msg:
167	genlmsg_cancel(rsp, hdr);
168	return -EMSGSIZE;
169#else
170	GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS");
171	return -EOPNOTSUPP;
172#endif
173}
174
175int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
176				       struct genl_info *info)
177{
178	struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)];
179	struct nlattr *nest;
180	int err;
181	u32 id;
182
183	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO))
184		return -EINVAL;
185
186	nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
187	err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
188			       netdev_page_pool_info_nl_policy,
189			       info->extack);
190	if (err)
191		return err;
192
193	if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID))
194		return -EINVAL;
195	if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) {
196		NL_SET_ERR_MSG_ATTR(info->extack,
197				    tb[NETDEV_A_PAGE_POOL_IFINDEX],
198				    "selecting by ifindex not supported");
199		return -EINVAL;
200	}
201
202	id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]);
203
204	return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
205}
206
207int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
208					 struct netlink_callback *cb)
209{
210	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
211}
212
213static int
214page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
215		  const struct genl_info *info)
216{
 
217	size_t inflight, refsz;
 
218	void *hdr;
219
220	hdr = genlmsg_iput(rsp, info);
221	if (!hdr)
222		return -EMSGSIZE;
223
224	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
225		goto err_cancel;
226
227	if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
228	    nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
229			pool->slow.netdev->ifindex))
230		goto err_cancel;
231	if (pool->user.napi_id &&
232	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
 
 
233		goto err_cancel;
234
235	inflight = page_pool_inflight(pool, false);
236	refsz =	PAGE_SIZE << pool->p.order;
237	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) ||
238	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
239			 inflight * refsz))
240		goto err_cancel;
241	if (pool->user.detach_time &&
242	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME,
243			 pool->user.detach_time))
244		goto err_cancel;
245
 
 
 
246	genlmsg_end(rsp, hdr);
247
248	return 0;
249err_cancel:
250	genlmsg_cancel(rsp, hdr);
251	return -EMSGSIZE;
252}
253
254static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd)
255{
256	struct genl_info info;
257	struct sk_buff *ntf;
258	struct net *net;
259
260	lockdep_assert_held(&page_pools_lock);
261
262	/* 'invisible' page pools don't matter */
263	if (hlist_unhashed(&pool->user.list))
264		return;
265	net = dev_net(pool->slow.netdev);
266
267	if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL))
268		return;
269
270	genl_info_init_ntf(&info, &netdev_nl_family, cmd);
271
272	ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
273	if (!ntf)
274		return;
275
276	if (page_pool_nl_fill(ntf, pool, &info)) {
277		nlmsg_free(ntf);
278		return;
279	}
280
281	genlmsg_multicast_netns(&netdev_nl_family, net, ntf,
282				0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL);
283}
284
285int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
286{
287	u32 id;
288
289	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID))
290		return -EINVAL;
291
292	id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]);
293
294	return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill);
295}
296
297int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
298				   struct netlink_callback *cb)
299{
300	return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
301}
302
303int page_pool_list(struct page_pool *pool)
304{
305	static u32 id_alloc_next;
306	int err;
307
308	mutex_lock(&page_pools_lock);
309	err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b,
310			      &id_alloc_next, GFP_KERNEL);
311	if (err < 0)
312		goto err_unlock;
313
314	INIT_HLIST_NODE(&pool->user.list);
315	if (pool->slow.netdev) {
316		hlist_add_head(&pool->user.list,
317			       &pool->slow.netdev->page_pools);
318		pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
319
320		netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
321	}
322
323	mutex_unlock(&page_pools_lock);
324	return 0;
325
326err_unlock:
327	mutex_unlock(&page_pools_lock);
328	return err;
329}
330
331void page_pool_detached(struct page_pool *pool)
332{
333	mutex_lock(&page_pools_lock);
334	pool->user.detach_time = ktime_get_boottime_seconds();
335	netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
336	mutex_unlock(&page_pools_lock);
337}
338
339void page_pool_unlist(struct page_pool *pool)
340{
341	mutex_lock(&page_pools_lock);
342	netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF);
343	xa_erase(&page_pools, pool->user.id);
344	if (!hlist_unhashed(&pool->user.list))
345		hlist_del(&pool->user.list);
346	mutex_unlock(&page_pools_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347}
348
349static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
350{
351	struct page_pool *pool;
352	struct hlist_node *n;
353
354	mutex_lock(&page_pools_lock);
355	hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
356		hlist_del_init(&pool->user.list);
357		pool->slow.netdev = NET_PTR_POISON;
358	}
359	mutex_unlock(&page_pools_lock);
360}
361
362static void page_pool_unreg_netdev(struct net_device *netdev)
363{
364	struct page_pool *pool, *last;
365	struct net_device *lo;
366
367	lo = dev_net(netdev)->loopback_dev;
368
369	mutex_lock(&page_pools_lock);
370	last = NULL;
371	hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
372		pool->slow.netdev = lo;
373		netdev_nl_page_pool_event(pool,
374					  NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
375		last = pool;
376	}
377	if (last)
378		hlist_splice_init(&netdev->page_pools, &last->user.list,
379				  &lo->page_pools);
380	mutex_unlock(&page_pools_lock);
381}
382
383static int
384page_pool_netdevice_event(struct notifier_block *nb,
385			  unsigned long event, void *ptr)
386{
387	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
388
389	if (event != NETDEV_UNREGISTER)
390		return NOTIFY_DONE;
391
392	if (hlist_empty(&netdev->page_pools))
393		return NOTIFY_OK;
394
395	if (netdev->ifindex != LOOPBACK_IFINDEX)
396		page_pool_unreg_netdev(netdev);
397	else
398		page_pool_unreg_netdev_wipe(netdev);
399	return NOTIFY_OK;
400}
401
402static struct notifier_block page_pool_netdevice_nb = {
403	.notifier_call = page_pool_netdevice_event,
404};
405
406static int __init page_pool_user_init(void)
407{
408	return register_netdevice_notifier(&page_pool_netdevice_nb);
409}
410
411subsys_initcall(page_pool_user_init);