Loading...
Note: File does not exist in v4.17.
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/mutex.h>
4#include <linux/netdevice.h>
5#include <linux/xarray.h>
6#include <net/net_debug.h>
7#include <net/page_pool/types.h>
8#include <net/page_pool/helpers.h>
9#include <net/sock.h>
10
11#include "page_pool_priv.h"
12#include "netdev-genl-gen.h"
13
14static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
15/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
16 * Ordering: inside rtnl_lock
17 */
18static DEFINE_MUTEX(page_pools_lock);
19
20/* Page pools are only reachable from user space (via netlink) if they are
21 * linked to a netdev at creation time. Following page pool "visibility"
22 * states are possible:
23 * - normal
24 * - user.list: linked to real netdev, netdev: real netdev
25 * - orphaned - real netdev has disappeared
26 * - user.list: linked to lo, netdev: lo
27 * - invisible - either (a) created without netdev linking, (b) unlisted due
28 * to error, or (c) the entire namespace which owned this pool disappeared
29 * - user.list: unhashed, netdev: unknown
30 */
31
32typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
33 const struct genl_info *info);
34
35static int
36netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill)
37{
38 struct page_pool *pool;
39 struct sk_buff *rsp;
40 int err;
41
42 mutex_lock(&page_pools_lock);
43 pool = xa_load(&page_pools, id);
44 if (!pool || hlist_unhashed(&pool->user.list) ||
45 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) {
46 err = -ENOENT;
47 goto err_unlock;
48 }
49
50 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
51 if (!rsp) {
52 err = -ENOMEM;
53 goto err_unlock;
54 }
55
56 err = fill(rsp, pool, info);
57 if (err)
58 goto err_free_msg;
59
60 mutex_unlock(&page_pools_lock);
61
62 return genlmsg_reply(rsp, info);
63
64err_free_msg:
65 nlmsg_free(rsp);
66err_unlock:
67 mutex_unlock(&page_pools_lock);
68 return err;
69}
70
71struct page_pool_dump_cb {
72 unsigned long ifindex;
73 u32 pp_id;
74};
75
76static int
77netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
78 pp_nl_fill_cb fill)
79{
80 struct page_pool_dump_cb *state = (void *)cb->ctx;
81 const struct genl_info *info = genl_info_dump(cb);
82 struct net *net = sock_net(skb->sk);
83 struct net_device *netdev;
84 struct page_pool *pool;
85 int err = 0;
86
87 rtnl_lock();
88 mutex_lock(&page_pools_lock);
89 for_each_netdev_dump(net, netdev, state->ifindex) {
90 hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
91 if (state->pp_id && state->pp_id < pool->user.id)
92 continue;
93
94 state->pp_id = pool->user.id;
95 err = fill(skb, pool, info);
96 if (err)
97 goto out;
98 }
99
100 state->pp_id = 0;
101 }
102out:
103 mutex_unlock(&page_pools_lock);
104 rtnl_unlock();
105
106 if (skb->len && err == -EMSGSIZE)
107 return skb->len;
108 return err;
109}
110
111static int
112page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
113 const struct genl_info *info)
114{
115#ifdef CONFIG_PAGE_POOL_STATS
116 struct page_pool_stats stats = {};
117 struct nlattr *nest;
118 void *hdr;
119
120 if (!page_pool_get_stats(pool, &stats))
121 return 0;
122
123 hdr = genlmsg_iput(rsp, info);
124 if (!hdr)
125 return -EMSGSIZE;
126
127 nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO);
128
129 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
130 (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
131 nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
132 pool->slow.netdev->ifindex)))
133 goto err_cancel_nest;
134
135 nla_nest_end(rsp, nest);
136
137 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
138 stats.alloc_stats.fast) ||
139 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
140 stats.alloc_stats.slow) ||
141 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
142 stats.alloc_stats.slow_high_order) ||
143 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
144 stats.alloc_stats.empty) ||
145 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
146 stats.alloc_stats.refill) ||
147 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
148 stats.alloc_stats.waive) ||
149 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
150 stats.recycle_stats.cached) ||
151 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
152 stats.recycle_stats.cache_full) ||
153 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
154 stats.recycle_stats.ring) ||
155 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
156 stats.recycle_stats.ring_full) ||
157 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
158 stats.recycle_stats.released_refcnt))
159 goto err_cancel_msg;
160
161 genlmsg_end(rsp, hdr);
162
163 return 0;
164err_cancel_nest:
165 nla_nest_cancel(rsp, nest);
166err_cancel_msg:
167 genlmsg_cancel(rsp, hdr);
168 return -EMSGSIZE;
169#else
170 GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS");
171 return -EOPNOTSUPP;
172#endif
173}
174
175int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
176 struct genl_info *info)
177{
178 struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)];
179 struct nlattr *nest;
180 int err;
181 u32 id;
182
183 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO))
184 return -EINVAL;
185
186 nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
187 err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
188 netdev_page_pool_info_nl_policy,
189 info->extack);
190 if (err)
191 return err;
192
193 if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID))
194 return -EINVAL;
195 if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) {
196 NL_SET_ERR_MSG_ATTR(info->extack,
197 tb[NETDEV_A_PAGE_POOL_IFINDEX],
198 "selecting by ifindex not supported");
199 return -EINVAL;
200 }
201
202 id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]);
203
204 return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
205}
206
207int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
208 struct netlink_callback *cb)
209{
210 return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
211}
212
213static int
214page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
215 const struct genl_info *info)
216{
217 size_t inflight, refsz;
218 void *hdr;
219
220 hdr = genlmsg_iput(rsp, info);
221 if (!hdr)
222 return -EMSGSIZE;
223
224 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
225 goto err_cancel;
226
227 if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
228 nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
229 pool->slow.netdev->ifindex))
230 goto err_cancel;
231 if (pool->user.napi_id &&
232 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
233 goto err_cancel;
234
235 inflight = page_pool_inflight(pool, false);
236 refsz = PAGE_SIZE << pool->p.order;
237 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) ||
238 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
239 inflight * refsz))
240 goto err_cancel;
241 if (pool->user.detach_time &&
242 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME,
243 pool->user.detach_time))
244 goto err_cancel;
245
246 genlmsg_end(rsp, hdr);
247
248 return 0;
249err_cancel:
250 genlmsg_cancel(rsp, hdr);
251 return -EMSGSIZE;
252}
253
254static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd)
255{
256 struct genl_info info;
257 struct sk_buff *ntf;
258 struct net *net;
259
260 lockdep_assert_held(&page_pools_lock);
261
262 /* 'invisible' page pools don't matter */
263 if (hlist_unhashed(&pool->user.list))
264 return;
265 net = dev_net(pool->slow.netdev);
266
267 if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL))
268 return;
269
270 genl_info_init_ntf(&info, &netdev_nl_family, cmd);
271
272 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
273 if (!ntf)
274 return;
275
276 if (page_pool_nl_fill(ntf, pool, &info)) {
277 nlmsg_free(ntf);
278 return;
279 }
280
281 genlmsg_multicast_netns(&netdev_nl_family, net, ntf,
282 0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL);
283}
284
285int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
286{
287 u32 id;
288
289 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID))
290 return -EINVAL;
291
292 id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]);
293
294 return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill);
295}
296
297int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
298 struct netlink_callback *cb)
299{
300 return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
301}
302
303int page_pool_list(struct page_pool *pool)
304{
305 static u32 id_alloc_next;
306 int err;
307
308 mutex_lock(&page_pools_lock);
309 err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b,
310 &id_alloc_next, GFP_KERNEL);
311 if (err < 0)
312 goto err_unlock;
313
314 INIT_HLIST_NODE(&pool->user.list);
315 if (pool->slow.netdev) {
316 hlist_add_head(&pool->user.list,
317 &pool->slow.netdev->page_pools);
318 pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
319
320 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
321 }
322
323 mutex_unlock(&page_pools_lock);
324 return 0;
325
326err_unlock:
327 mutex_unlock(&page_pools_lock);
328 return err;
329}
330
331void page_pool_detached(struct page_pool *pool)
332{
333 mutex_lock(&page_pools_lock);
334 pool->user.detach_time = ktime_get_boottime_seconds();
335 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
336 mutex_unlock(&page_pools_lock);
337}
338
339void page_pool_unlist(struct page_pool *pool)
340{
341 mutex_lock(&page_pools_lock);
342 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF);
343 xa_erase(&page_pools, pool->user.id);
344 if (!hlist_unhashed(&pool->user.list))
345 hlist_del(&pool->user.list);
346 mutex_unlock(&page_pools_lock);
347}
348
349static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
350{
351 struct page_pool *pool;
352 struct hlist_node *n;
353
354 mutex_lock(&page_pools_lock);
355 hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
356 hlist_del_init(&pool->user.list);
357 pool->slow.netdev = NET_PTR_POISON;
358 }
359 mutex_unlock(&page_pools_lock);
360}
361
362static void page_pool_unreg_netdev(struct net_device *netdev)
363{
364 struct page_pool *pool, *last;
365 struct net_device *lo;
366
367 lo = dev_net(netdev)->loopback_dev;
368
369 mutex_lock(&page_pools_lock);
370 last = NULL;
371 hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
372 pool->slow.netdev = lo;
373 netdev_nl_page_pool_event(pool,
374 NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
375 last = pool;
376 }
377 if (last)
378 hlist_splice_init(&netdev->page_pools, &last->user.list,
379 &lo->page_pools);
380 mutex_unlock(&page_pools_lock);
381}
382
383static int
384page_pool_netdevice_event(struct notifier_block *nb,
385 unsigned long event, void *ptr)
386{
387 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
388
389 if (event != NETDEV_UNREGISTER)
390 return NOTIFY_DONE;
391
392 if (hlist_empty(&netdev->page_pools))
393 return NOTIFY_OK;
394
395 if (netdev->ifindex != LOOPBACK_IFINDEX)
396 page_pool_unreg_netdev(netdev);
397 else
398 page_pool_unreg_netdev_wipe(netdev);
399 return NOTIFY_OK;
400}
401
402static struct notifier_block page_pool_netdevice_nb = {
403 .notifier_call = page_pool_netdevice_event,
404};
405
406static int __init page_pool_user_init(void)
407{
408 return register_netdevice_notifier(&page_pool_netdevice_nb);
409}
410
411subsys_initcall(page_pool_user_init);