Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#include <linux/netdevice.h>
  4#include <linux/notifier.h>
  5#include <linux/rtnetlink.h>
  6#include <net/busy_poll.h>
  7#include <net/net_namespace.h>
  8#include <net/netdev_queues.h>
  9#include <net/netdev_rx_queue.h>
 10#include <net/sock.h>
 11#include <net/xdp.h>
 12#include <net/xdp_sock.h>
 
 
 
 13
 14#include "dev.h"
 15#include "devmem.h"
 16#include "netdev-genl-gen.h"
 
 17
 18struct netdev_nl_dump_ctx {
 19	unsigned long	ifindex;
 20	unsigned int	rxq_idx;
 21	unsigned int	txq_idx;
 22	unsigned int	napi_id;
 23};
 24
 25static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
 26{
 27	NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx);
 28
 29	return (struct netdev_nl_dump_ctx *)cb->ctx;
 30}
 31
 32static int
 33netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
 34		   const struct genl_info *info)
 35{
 36	u64 xsk_features = 0;
 37	u64 xdp_rx_meta = 0;
 38	void *hdr;
 39
 40	hdr = genlmsg_iput(rsp, info);
 41	if (!hdr)
 42		return -EMSGSIZE;
 43
 44#define XDP_METADATA_KFUNC(_, flag, __, xmo) \
 45	if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
 46		xdp_rx_meta |= flag;
 47XDP_METADATA_KFUNC_xxx
 48#undef XDP_METADATA_KFUNC
 49
 50	if (netdev->xsk_tx_metadata_ops) {
 51		if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
 52			xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
 53		if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
 54			xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
 55	}
 56
 57	if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
 58	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
 59			      netdev->xdp_features, NETDEV_A_DEV_PAD) ||
 60	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
 61			      xdp_rx_meta, NETDEV_A_DEV_PAD) ||
 62	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
 63			      xsk_features, NETDEV_A_DEV_PAD))
 64		goto err_cancel_msg;
 
 
 65
 66	if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
 67		if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
 68				netdev->xdp_zc_max_segs))
 69			goto err_cancel_msg;
 
 
 70	}
 71
 72	genlmsg_end(rsp, hdr);
 73
 74	return 0;
 75
 76err_cancel_msg:
 77	genlmsg_cancel(rsp, hdr);
 78	return -EMSGSIZE;
 79}
 80
 81static void
 82netdev_genl_dev_notify(struct net_device *netdev, int cmd)
 83{
 84	struct genl_info info;
 85	struct sk_buff *ntf;
 86
 87	if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
 88				NETDEV_NLGRP_MGMT))
 89		return;
 90
 91	genl_info_init_ntf(&info, &netdev_nl_family, cmd);
 92
 93	ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
 94	if (!ntf)
 95		return;
 96
 97	if (netdev_nl_dev_fill(netdev, ntf, &info)) {
 98		nlmsg_free(ntf);
 99		return;
100	}
101
102	genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
103				0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
104}
105
106int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
107{
108	struct net_device *netdev;
109	struct sk_buff *rsp;
110	u32 ifindex;
111	int err;
112
113	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
114		return -EINVAL;
115
116	ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
117
118	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
119	if (!rsp)
120		return -ENOMEM;
121
122	rtnl_lock();
123
124	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
125	if (netdev)
126		err = netdev_nl_dev_fill(netdev, rsp, info);
127	else
128		err = -ENODEV;
129
130	rtnl_unlock();
131
132	if (err)
133		goto err_free_msg;
134
135	return genlmsg_reply(rsp, info);
136
137err_free_msg:
138	nlmsg_free(rsp);
139	return err;
140}
141
142int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
143{
144	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
145	struct net *net = sock_net(skb->sk);
146	struct net_device *netdev;
147	int err = 0;
148
149	rtnl_lock();
150	for_each_netdev_dump(net, netdev, ctx->ifindex) {
151		err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
152		if (err < 0)
153			break;
154	}
155	rtnl_unlock();
156
157	return err;
158}
159
160static int
161netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
162			const struct genl_info *info)
163{
164	unsigned long irq_suspend_timeout;
165	unsigned long gro_flush_timeout;
166	u32 napi_defer_hard_irqs;
167	void *hdr;
168	pid_t pid;
169
 
 
170	if (!(napi->dev->flags & IFF_UP))
171		return 0;
172
173	hdr = genlmsg_iput(rsp, info);
174	if (!hdr)
175		return -EMSGSIZE;
176
177	if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
 
178		goto nla_put_failure;
179
180	if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
181		goto nla_put_failure;
182
183	if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
184		goto nla_put_failure;
185
186	if (napi->thread) {
187		pid = task_pid_nr(napi->thread);
188		if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
189			goto nla_put_failure;
190	}
191
192	napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi);
193	if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS,
194			napi_defer_hard_irqs))
195		goto nla_put_failure;
196
197	irq_suspend_timeout = napi_get_irq_suspend_timeout(napi);
198	if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
199			 irq_suspend_timeout))
200		goto nla_put_failure;
201
202	gro_flush_timeout = napi_get_gro_flush_timeout(napi);
203	if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
204			 gro_flush_timeout))
205		goto nla_put_failure;
206
207	genlmsg_end(rsp, hdr);
208
209	return 0;
210
211nla_put_failure:
212	genlmsg_cancel(rsp, hdr);
213	return -EMSGSIZE;
214}
215
216int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
217{
218	struct napi_struct *napi;
219	struct sk_buff *rsp;
220	u32 napi_id;
221	int err;
222
223	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
224		return -EINVAL;
225
226	napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
227
228	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
229	if (!rsp)
230		return -ENOMEM;
231
232	rtnl_lock();
233	rcu_read_lock();
234
235	napi = netdev_napi_by_id(genl_info_net(info), napi_id);
236	if (napi) {
237		err = netdev_nl_napi_fill_one(rsp, napi, info);
238	} else {
239		NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
240		err = -ENOENT;
241	}
242
243	rcu_read_unlock();
244	rtnl_unlock();
245
246	if (err) {
247		goto err_free_msg;
248	} else if (!rsp->len) {
249		err = -ENOENT;
250		goto err_free_msg;
251	}
252
253	return genlmsg_reply(rsp, info);
254
255err_free_msg:
256	nlmsg_free(rsp);
257	return err;
258}
259
260static int
261netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
262			const struct genl_info *info,
263			struct netdev_nl_dump_ctx *ctx)
264{
265	struct napi_struct *napi;
266	int err = 0;
267
268	if (!(netdev->flags & IFF_UP))
269		return err;
270
271	list_for_each_entry(napi, &netdev->napi_list, dev_list) {
272		if (napi->napi_id < MIN_NAPI_ID)
273			continue;
274		if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
275			continue;
276
277		err = netdev_nl_napi_fill_one(rsp, napi, info);
278		if (err)
279			return err;
280		ctx->napi_id = napi->napi_id;
281	}
282	return err;
283}
284
285int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
286{
287	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
288	const struct genl_info *info = genl_info_dump(cb);
289	struct net *net = sock_net(skb->sk);
290	struct net_device *netdev;
291	u32 ifindex = 0;
292	int err = 0;
293
294	if (info->attrs[NETDEV_A_NAPI_IFINDEX])
295		ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
296
297	rtnl_lock();
298	if (ifindex) {
299		netdev = __dev_get_by_index(net, ifindex);
300		if (netdev)
301			err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
302		else
303			err = -ENODEV;
304	} else {
305		for_each_netdev_dump(net, netdev, ctx->ifindex) {
306			err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
307			if (err < 0)
308				break;
309			ctx->napi_id = 0;
310		}
311	}
312	rtnl_unlock();
313
314	return err;
315}
316
317static int
318netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
319{
320	u64 irq_suspend_timeout = 0;
321	u64 gro_flush_timeout = 0;
322	u32 defer = 0;
323
324	if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
325		defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
326		napi_set_defer_hard_irqs(napi, defer);
327	}
328
329	if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) {
330		irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]);
331		napi_set_irq_suspend_timeout(napi, irq_suspend_timeout);
332	}
333
334	if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) {
335		gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]);
336		napi_set_gro_flush_timeout(napi, gro_flush_timeout);
337	}
338
339	return 0;
340}
341
342int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
343{
344	struct napi_struct *napi;
345	unsigned int napi_id;
346	int err;
347
348	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
349		return -EINVAL;
350
351	napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
352
353	rtnl_lock();
354	rcu_read_lock();
355
356	napi = netdev_napi_by_id(genl_info_net(info), napi_id);
357	if (napi) {
358		err = netdev_nl_napi_set_config(napi, info);
359	} else {
360		NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
361		err = -ENOENT;
362	}
363
364	rcu_read_unlock();
365	rtnl_unlock();
366
367	return err;
368}
369
370static int
371netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
372			 u32 q_idx, u32 q_type, const struct genl_info *info)
373{
374	struct net_devmem_dmabuf_binding *binding;
375	struct netdev_rx_queue *rxq;
376	struct netdev_queue *txq;
377	void *hdr;
378
379	hdr = genlmsg_iput(rsp, info);
380	if (!hdr)
381		return -EMSGSIZE;
382
383	if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
384	    nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
385	    nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
386		goto nla_put_failure;
387
388	switch (q_type) {
389	case NETDEV_QUEUE_TYPE_RX:
390		rxq = __netif_get_rx_queue(netdev, q_idx);
391		if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
392					     rxq->napi->napi_id))
393			goto nla_put_failure;
394
395		binding = rxq->mp_params.mp_priv;
396		if (binding &&
397		    nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id))
398			goto nla_put_failure;
399
400		break;
401	case NETDEV_QUEUE_TYPE_TX:
402		txq = netdev_get_tx_queue(netdev, q_idx);
403		if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
404					     txq->napi->napi_id))
405			goto nla_put_failure;
406	}
407
408	genlmsg_end(rsp, hdr);
409
410	return 0;
411
412nla_put_failure:
413	genlmsg_cancel(rsp, hdr);
414	return -EMSGSIZE;
415}
416
417static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
418				    u32 q_type)
419{
420	switch (q_type) {
421	case NETDEV_QUEUE_TYPE_RX:
422		if (q_id >= netdev->real_num_rx_queues)
423			return -EINVAL;
424		return 0;
425	case NETDEV_QUEUE_TYPE_TX:
426		if (q_id >= netdev->real_num_tx_queues)
427			return -EINVAL;
428	}
429	return 0;
430}
431
432static int
433netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
434		     u32 q_type, const struct genl_info *info)
435{
436	int err;
437
438	if (!(netdev->flags & IFF_UP))
439		return -ENOENT;
440
441	err = netdev_nl_queue_validate(netdev, q_idx, q_type);
442	if (err)
443		return err;
444
445	return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
446}
447
448int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
449{
450	u32 q_id, q_type, ifindex;
451	struct net_device *netdev;
452	struct sk_buff *rsp;
453	int err;
454
455	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
456	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
457	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
458		return -EINVAL;
459
460	q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
461	q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
462	ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
463
464	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
465	if (!rsp)
466		return -ENOMEM;
467
468	rtnl_lock();
469
470	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
471	if (netdev)
472		err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
473	else
474		err = -ENODEV;
475
476	rtnl_unlock();
477
478	if (err)
479		goto err_free_msg;
480
481	return genlmsg_reply(rsp, info);
482
483err_free_msg:
484	nlmsg_free(rsp);
485	return err;
486}
487
488static int
489netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
490			 const struct genl_info *info,
491			 struct netdev_nl_dump_ctx *ctx)
492{
493	int err = 0;
 
494
495	if (!(netdev->flags & IFF_UP))
496		return err;
497
498	for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
499		err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
500					       NETDEV_QUEUE_TYPE_RX, info);
501		if (err)
502			return err;
 
503	}
504	for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
505		err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
506					       NETDEV_QUEUE_TYPE_TX, info);
507		if (err)
508			return err;
 
509	}
510
511	return err;
512}
513
514int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
515{
516	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
517	const struct genl_info *info = genl_info_dump(cb);
518	struct net *net = sock_net(skb->sk);
519	struct net_device *netdev;
520	u32 ifindex = 0;
521	int err = 0;
522
523	if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
524		ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
525
526	rtnl_lock();
527	if (ifindex) {
528		netdev = __dev_get_by_index(net, ifindex);
529		if (netdev)
530			err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
531		else
532			err = -ENODEV;
533	} else {
534		for_each_netdev_dump(net, netdev, ctx->ifindex) {
535			err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
536			if (err < 0)
537				break;
538			ctx->rxq_idx = 0;
539			ctx->txq_idx = 0;
540		}
541	}
542	rtnl_unlock();
543
544	return err;
545}
546
547#define NETDEV_STAT_NOT_SET		(~0ULL)
548
549static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
550{
551	const u64 *add = _add;
552	u64 *sum = _sum;
553
554	while (size) {
555		if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
556			*sum += *add;
557		sum++;
558		add++;
559		size -= 8;
560	}
561}
562
563static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
564{
565	if (value == NETDEV_STAT_NOT_SET)
566		return 0;
567	return nla_put_uint(rsp, attr_id, value);
568}
569
570static int
571netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
572{
573	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
574	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
575	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
576	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
577	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
578	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
579	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
580	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
581	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
582	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
583	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
584	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
585	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
586		return -EMSGSIZE;
587	return 0;
588}
589
590static int
591netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
592{
593	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
594	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
595	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
596	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
597	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
598	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
599	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
600	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
601	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
602	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
603	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
604	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
605	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
606		return -EMSGSIZE;
607	return 0;
608}
609
610static int
611netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
612		      u32 q_type, int i, const struct genl_info *info)
613{
614	const struct netdev_stat_ops *ops = netdev->stat_ops;
615	struct netdev_queue_stats_rx rx;
616	struct netdev_queue_stats_tx tx;
617	void *hdr;
618
619	hdr = genlmsg_iput(rsp, info);
620	if (!hdr)
621		return -EMSGSIZE;
622	if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
623	    nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
624	    nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
625		goto nla_put_failure;
626
627	switch (q_type) {
628	case NETDEV_QUEUE_TYPE_RX:
629		memset(&rx, 0xff, sizeof(rx));
630		ops->get_queue_stats_rx(netdev, i, &rx);
631		if (!memchr_inv(&rx, 0xff, sizeof(rx)))
632			goto nla_cancel;
633		if (netdev_nl_stats_write_rx(rsp, &rx))
634			goto nla_put_failure;
635		break;
636	case NETDEV_QUEUE_TYPE_TX:
637		memset(&tx, 0xff, sizeof(tx));
638		ops->get_queue_stats_tx(netdev, i, &tx);
639		if (!memchr_inv(&tx, 0xff, sizeof(tx)))
640			goto nla_cancel;
641		if (netdev_nl_stats_write_tx(rsp, &tx))
642			goto nla_put_failure;
643		break;
644	}
645
646	genlmsg_end(rsp, hdr);
647	return 0;
648
649nla_cancel:
650	genlmsg_cancel(rsp, hdr);
651	return 0;
652nla_put_failure:
653	genlmsg_cancel(rsp, hdr);
654	return -EMSGSIZE;
655}
656
657static int
658netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
659			 const struct genl_info *info,
660			 struct netdev_nl_dump_ctx *ctx)
661{
662	const struct netdev_stat_ops *ops = netdev->stat_ops;
663	int i, err;
664
665	if (!(netdev->flags & IFF_UP))
666		return 0;
667
668	i = ctx->rxq_idx;
669	while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
670		err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
671					    i, info);
672		if (err)
673			return err;
674		ctx->rxq_idx = ++i;
675	}
676	i = ctx->txq_idx;
677	while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
678		err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
679					    i, info);
680		if (err)
681			return err;
682		ctx->txq_idx = ++i;
683	}
684
685	ctx->rxq_idx = 0;
686	ctx->txq_idx = 0;
687	return 0;
688}
689
690static int
691netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
692			  const struct genl_info *info)
693{
694	struct netdev_queue_stats_rx rx_sum, rx;
695	struct netdev_queue_stats_tx tx_sum, tx;
696	const struct netdev_stat_ops *ops;
697	void *hdr;
698	int i;
699
700	ops = netdev->stat_ops;
701	/* Netdev can't guarantee any complete counters */
702	if (!ops->get_base_stats)
703		return 0;
704
705	memset(&rx_sum, 0xff, sizeof(rx_sum));
706	memset(&tx_sum, 0xff, sizeof(tx_sum));
707
708	ops->get_base_stats(netdev, &rx_sum, &tx_sum);
709
710	/* The op was there, but nothing reported, don't bother */
711	if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
712	    !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
713		return 0;
714
715	hdr = genlmsg_iput(rsp, info);
716	if (!hdr)
717		return -EMSGSIZE;
718	if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
719		goto nla_put_failure;
720
721	for (i = 0; i < netdev->real_num_rx_queues; i++) {
722		memset(&rx, 0xff, sizeof(rx));
723		if (ops->get_queue_stats_rx)
724			ops->get_queue_stats_rx(netdev, i, &rx);
725		netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
726	}
727	for (i = 0; i < netdev->real_num_tx_queues; i++) {
728		memset(&tx, 0xff, sizeof(tx));
729		if (ops->get_queue_stats_tx)
730			ops->get_queue_stats_tx(netdev, i, &tx);
731		netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
732	}
733
734	if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
735	    netdev_nl_stats_write_tx(rsp, &tx_sum))
736		goto nla_put_failure;
737
738	genlmsg_end(rsp, hdr);
739	return 0;
740
741nla_put_failure:
742	genlmsg_cancel(rsp, hdr);
743	return -EMSGSIZE;
744}
745
746static int
747netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
748			      struct sk_buff *skb, const struct genl_info *info,
749			      struct netdev_nl_dump_ctx *ctx)
750{
751	if (!netdev->stat_ops)
752		return 0;
753
754	switch (scope) {
755	case 0:
756		return netdev_nl_stats_by_netdev(netdev, skb, info);
757	case NETDEV_QSTATS_SCOPE_QUEUE:
758		return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
759	}
760
761	return -EINVAL;	/* Should not happen, per netlink policy */
762}
763
764int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
765				struct netlink_callback *cb)
766{
767	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
768	const struct genl_info *info = genl_info_dump(cb);
769	struct net *net = sock_net(skb->sk);
770	struct net_device *netdev;
771	unsigned int ifindex;
772	unsigned int scope;
773	int err = 0;
774
775	scope = 0;
776	if (info->attrs[NETDEV_A_QSTATS_SCOPE])
777		scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
778
779	ifindex = 0;
780	if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
781		ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
782
783	rtnl_lock();
784	if (ifindex) {
785		netdev = __dev_get_by_index(net, ifindex);
786		if (netdev && netdev->stat_ops) {
787			err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
788							    info, ctx);
789		} else {
790			NL_SET_BAD_ATTR(info->extack,
791					info->attrs[NETDEV_A_QSTATS_IFINDEX]);
792			err = netdev ? -EOPNOTSUPP : -ENODEV;
793		}
794	} else {
795		for_each_netdev_dump(net, netdev, ctx->ifindex) {
796			err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
797							    info, ctx);
798			if (err < 0)
799				break;
800		}
801	}
802	rtnl_unlock();
803
804	return err;
805}
806
807int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
808{
809	struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
810	struct net_devmem_dmabuf_binding *binding;
811	struct list_head *sock_binding_list;
812	u32 ifindex, dmabuf_fd, rxq_idx;
813	struct net_device *netdev;
814	struct sk_buff *rsp;
815	struct nlattr *attr;
816	int rem, err = 0;
817	void *hdr;
818
819	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
820	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
821	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
822		return -EINVAL;
823
824	ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
825	dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
826
827	sock_binding_list = genl_sk_priv_get(&netdev_nl_family,
828					     NETLINK_CB(skb).sk);
829	if (IS_ERR(sock_binding_list))
830		return PTR_ERR(sock_binding_list);
831
832	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
833	if (!rsp)
834		return -ENOMEM;
835
836	hdr = genlmsg_iput(rsp, info);
837	if (!hdr) {
838		err = -EMSGSIZE;
839		goto err_genlmsg_free;
840	}
841
842	rtnl_lock();
843
844	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
845	if (!netdev || !netif_device_present(netdev)) {
846		err = -ENODEV;
847		goto err_unlock;
848	}
849
850	if (dev_xdp_prog_count(netdev)) {
851		NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
852		err = -EEXIST;
853		goto err_unlock;
854	}
855
856	binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
857	if (IS_ERR(binding)) {
858		err = PTR_ERR(binding);
859		goto err_unlock;
860	}
861
862	nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
863			       genlmsg_data(info->genlhdr),
864			       genlmsg_len(info->genlhdr), rem) {
865		err = nla_parse_nested(
866			tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
867			netdev_queue_id_nl_policy, info->extack);
868		if (err < 0)
869			goto err_unbind;
870
871		if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
872		    NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
873			err = -EINVAL;
874			goto err_unbind;
875		}
876
877		if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
878			NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
879			err = -EINVAL;
880			goto err_unbind;
 
 
 
881		}
882
883		rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
884
885		err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
886						      info->extack);
887		if (err)
888			goto err_unbind;
889	}
890
891	list_add(&binding->list, sock_binding_list);
892
893	nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
894	genlmsg_end(rsp, hdr);
895
896	err = genlmsg_reply(rsp, info);
897	if (err)
898		goto err_unbind;
899
900	rtnl_unlock();
901
902	return 0;
903
904err_unbind:
905	net_devmem_unbind_dmabuf(binding);
906err_unlock:
907	rtnl_unlock();
908err_genlmsg_free:
909	nlmsg_free(rsp);
910	return err;
911}
912
913void netdev_nl_sock_priv_init(struct list_head *priv)
914{
915	INIT_LIST_HEAD(priv);
916}
917
918void netdev_nl_sock_priv_destroy(struct list_head *priv)
919{
920	struct net_devmem_dmabuf_binding *binding;
921	struct net_devmem_dmabuf_binding *temp;
922
923	list_for_each_entry_safe(binding, temp, priv, list) {
924		rtnl_lock();
925		net_devmem_unbind_dmabuf(binding);
926		rtnl_unlock();
927	}
928}
929
930static int netdev_genl_netdevice_event(struct notifier_block *nb,
931				       unsigned long event, void *ptr)
932{
933	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
934
935	switch (event) {
936	case NETDEV_REGISTER:
937		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
938		break;
939	case NETDEV_UNREGISTER:
940		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
941		break;
942	case NETDEV_XDP_FEAT_CHANGE:
943		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
944		break;
945	}
946
947	return NOTIFY_OK;
948}
949
950static struct notifier_block netdev_genl_nb = {
951	.notifier_call	= netdev_genl_netdevice_event,
952};
953
954static int __init netdev_genl_init(void)
955{
956	int err;
957
958	err = register_netdevice_notifier(&netdev_genl_nb);
959	if (err)
960		return err;
961
962	err = genl_register_family(&netdev_nl_family);
963	if (err)
964		goto err_unreg_ntf;
965
966	return 0;
967
968err_unreg_ntf:
969	unregister_netdevice_notifier(&netdev_genl_nb);
970	return err;
971}
972
973subsys_initcall(netdev_genl_init);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#include <linux/netdevice.h>
  4#include <linux/notifier.h>
  5#include <linux/rtnetlink.h>
 
  6#include <net/net_namespace.h>
 
 
  7#include <net/sock.h>
  8#include <net/xdp.h>
  9#include <net/xdp_sock.h>
 10#include <net/netdev_rx_queue.h>
 11#include <net/netdev_queues.h>
 12#include <net/busy_poll.h>
 13
 
 
 14#include "netdev-genl-gen.h"
 15#include "dev.h"
 16
 17struct netdev_nl_dump_ctx {
 18	unsigned long	ifindex;
 19	unsigned int	rxq_idx;
 20	unsigned int	txq_idx;
 21	unsigned int	napi_id;
 22};
 23
 24static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
 25{
 26	NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx);
 27
 28	return (struct netdev_nl_dump_ctx *)cb->ctx;
 29}
 30
 31static int
 32netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
 33		   const struct genl_info *info)
 34{
 35	u64 xsk_features = 0;
 36	u64 xdp_rx_meta = 0;
 37	void *hdr;
 38
 39	hdr = genlmsg_iput(rsp, info);
 40	if (!hdr)
 41		return -EMSGSIZE;
 42
 43#define XDP_METADATA_KFUNC(_, flag, __, xmo) \
 44	if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
 45		xdp_rx_meta |= flag;
 46XDP_METADATA_KFUNC_xxx
 47#undef XDP_METADATA_KFUNC
 48
 49	if (netdev->xsk_tx_metadata_ops) {
 50		if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
 51			xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
 52		if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
 53			xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
 54	}
 55
 56	if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
 57	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
 58			      netdev->xdp_features, NETDEV_A_DEV_PAD) ||
 59	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
 60			      xdp_rx_meta, NETDEV_A_DEV_PAD) ||
 61	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
 62			      xsk_features, NETDEV_A_DEV_PAD)) {
 63		genlmsg_cancel(rsp, hdr);
 64		return -EINVAL;
 65	}
 66
 67	if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
 68		if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
 69				netdev->xdp_zc_max_segs)) {
 70			genlmsg_cancel(rsp, hdr);
 71			return -EINVAL;
 72		}
 73	}
 74
 75	genlmsg_end(rsp, hdr);
 76
 77	return 0;
 
 
 
 
 78}
 79
 80static void
 81netdev_genl_dev_notify(struct net_device *netdev, int cmd)
 82{
 83	struct genl_info info;
 84	struct sk_buff *ntf;
 85
 86	if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
 87				NETDEV_NLGRP_MGMT))
 88		return;
 89
 90	genl_info_init_ntf(&info, &netdev_nl_family, cmd);
 91
 92	ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
 93	if (!ntf)
 94		return;
 95
 96	if (netdev_nl_dev_fill(netdev, ntf, &info)) {
 97		nlmsg_free(ntf);
 98		return;
 99	}
100
101	genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
102				0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
103}
104
105int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
106{
107	struct net_device *netdev;
108	struct sk_buff *rsp;
109	u32 ifindex;
110	int err;
111
112	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
113		return -EINVAL;
114
115	ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
116
117	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
118	if (!rsp)
119		return -ENOMEM;
120
121	rtnl_lock();
122
123	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
124	if (netdev)
125		err = netdev_nl_dev_fill(netdev, rsp, info);
126	else
127		err = -ENODEV;
128
129	rtnl_unlock();
130
131	if (err)
132		goto err_free_msg;
133
134	return genlmsg_reply(rsp, info);
135
136err_free_msg:
137	nlmsg_free(rsp);
138	return err;
139}
140
141int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
142{
143	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
144	struct net *net = sock_net(skb->sk);
145	struct net_device *netdev;
146	int err = 0;
147
148	rtnl_lock();
149	for_each_netdev_dump(net, netdev, ctx->ifindex) {
150		err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
151		if (err < 0)
152			break;
153	}
154	rtnl_unlock();
155
156	return err;
157}
158
159static int
160netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
161			const struct genl_info *info)
162{
 
 
 
163	void *hdr;
164	pid_t pid;
165
166	if (WARN_ON_ONCE(!napi->dev))
167		return -EINVAL;
168	if (!(napi->dev->flags & IFF_UP))
169		return 0;
170
171	hdr = genlmsg_iput(rsp, info);
172	if (!hdr)
173		return -EMSGSIZE;
174
175	if (napi->napi_id >= MIN_NAPI_ID &&
176	    nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
177		goto nla_put_failure;
178
179	if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
180		goto nla_put_failure;
181
182	if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
183		goto nla_put_failure;
184
185	if (napi->thread) {
186		pid = task_pid_nr(napi->thread);
187		if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
188			goto nla_put_failure;
189	}
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191	genlmsg_end(rsp, hdr);
192
193	return 0;
194
195nla_put_failure:
196	genlmsg_cancel(rsp, hdr);
197	return -EMSGSIZE;
198}
199
200int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
201{
202	struct napi_struct *napi;
203	struct sk_buff *rsp;
204	u32 napi_id;
205	int err;
206
207	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
208		return -EINVAL;
209
210	napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
211
212	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
213	if (!rsp)
214		return -ENOMEM;
215
216	rtnl_lock();
 
217
218	napi = napi_by_id(napi_id);
219	if (napi)
220		err = netdev_nl_napi_fill_one(rsp, napi, info);
221	else
222		err = -EINVAL;
 
 
223
 
224	rtnl_unlock();
225
226	if (err)
 
 
 
227		goto err_free_msg;
 
228
229	return genlmsg_reply(rsp, info);
230
231err_free_msg:
232	nlmsg_free(rsp);
233	return err;
234}
235
236static int
237netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
238			const struct genl_info *info,
239			struct netdev_nl_dump_ctx *ctx)
240{
241	struct napi_struct *napi;
242	int err = 0;
243
244	if (!(netdev->flags & IFF_UP))
245		return err;
246
247	list_for_each_entry(napi, &netdev->napi_list, dev_list) {
 
 
248		if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
249			continue;
250
251		err = netdev_nl_napi_fill_one(rsp, napi, info);
252		if (err)
253			return err;
254		ctx->napi_id = napi->napi_id;
255	}
256	return err;
257}
258
259int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
260{
261	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
262	const struct genl_info *info = genl_info_dump(cb);
263	struct net *net = sock_net(skb->sk);
264	struct net_device *netdev;
265	u32 ifindex = 0;
266	int err = 0;
267
268	if (info->attrs[NETDEV_A_NAPI_IFINDEX])
269		ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
270
271	rtnl_lock();
272	if (ifindex) {
273		netdev = __dev_get_by_index(net, ifindex);
274		if (netdev)
275			err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
276		else
277			err = -ENODEV;
278	} else {
279		for_each_netdev_dump(net, netdev, ctx->ifindex) {
280			err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
281			if (err < 0)
282				break;
283			ctx->napi_id = 0;
284		}
285	}
286	rtnl_unlock();
287
288	return err;
289}
290
291static int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
293			 u32 q_idx, u32 q_type, const struct genl_info *info)
294{
 
295	struct netdev_rx_queue *rxq;
296	struct netdev_queue *txq;
297	void *hdr;
298
299	hdr = genlmsg_iput(rsp, info);
300	if (!hdr)
301		return -EMSGSIZE;
302
303	if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
304	    nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
305	    nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
306		goto nla_put_failure;
307
308	switch (q_type) {
309	case NETDEV_QUEUE_TYPE_RX:
310		rxq = __netif_get_rx_queue(netdev, q_idx);
311		if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
312					     rxq->napi->napi_id))
313			goto nla_put_failure;
 
 
 
 
 
 
314		break;
315	case NETDEV_QUEUE_TYPE_TX:
316		txq = netdev_get_tx_queue(netdev, q_idx);
317		if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
318					     txq->napi->napi_id))
319			goto nla_put_failure;
320	}
321
322	genlmsg_end(rsp, hdr);
323
324	return 0;
325
326nla_put_failure:
327	genlmsg_cancel(rsp, hdr);
328	return -EMSGSIZE;
329}
330
331static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
332				    u32 q_type)
333{
334	switch (q_type) {
335	case NETDEV_QUEUE_TYPE_RX:
336		if (q_id >= netdev->real_num_rx_queues)
337			return -EINVAL;
338		return 0;
339	case NETDEV_QUEUE_TYPE_TX:
340		if (q_id >= netdev->real_num_tx_queues)
341			return -EINVAL;
342	}
343	return 0;
344}
345
346static int
347netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
348		     u32 q_type, const struct genl_info *info)
349{
350	int err = 0;
351
352	if (!(netdev->flags & IFF_UP))
353		return err;
354
355	err = netdev_nl_queue_validate(netdev, q_idx, q_type);
356	if (err)
357		return err;
358
359	return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
360}
361
362int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
363{
364	u32 q_id, q_type, ifindex;
365	struct net_device *netdev;
366	struct sk_buff *rsp;
367	int err;
368
369	if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
370	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
371	    GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
372		return -EINVAL;
373
374	q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
375	q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
376	ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
377
378	rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
379	if (!rsp)
380		return -ENOMEM;
381
382	rtnl_lock();
383
384	netdev = __dev_get_by_index(genl_info_net(info), ifindex);
385	if (netdev)
386		err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
387	else
388		err = -ENODEV;
389
390	rtnl_unlock();
391
392	if (err)
393		goto err_free_msg;
394
395	return genlmsg_reply(rsp, info);
396
397err_free_msg:
398	nlmsg_free(rsp);
399	return err;
400}
401
402static int
403netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
404			 const struct genl_info *info,
405			 struct netdev_nl_dump_ctx *ctx)
406{
407	int err = 0;
408	int i;
409
410	if (!(netdev->flags & IFF_UP))
411		return err;
412
413	for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
414		err = netdev_nl_queue_fill_one(rsp, netdev, i,
415					       NETDEV_QUEUE_TYPE_RX, info);
416		if (err)
417			return err;
418		ctx->rxq_idx = i++;
419	}
420	for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
421		err = netdev_nl_queue_fill_one(rsp, netdev, i,
422					       NETDEV_QUEUE_TYPE_TX, info);
423		if (err)
424			return err;
425		ctx->txq_idx = i++;
426	}
427
428	return err;
429}
430
431int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
432{
433	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
434	const struct genl_info *info = genl_info_dump(cb);
435	struct net *net = sock_net(skb->sk);
436	struct net_device *netdev;
437	u32 ifindex = 0;
438	int err = 0;
439
440	if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
441		ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
442
443	rtnl_lock();
444	if (ifindex) {
445		netdev = __dev_get_by_index(net, ifindex);
446		if (netdev)
447			err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
448		else
449			err = -ENODEV;
450	} else {
451		for_each_netdev_dump(net, netdev, ctx->ifindex) {
452			err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
453			if (err < 0)
454				break;
455			ctx->rxq_idx = 0;
456			ctx->txq_idx = 0;
457		}
458	}
459	rtnl_unlock();
460
461	return err;
462}
463
464#define NETDEV_STAT_NOT_SET		(~0ULL)
465
466static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
467{
468	const u64 *add = _add;
469	u64 *sum = _sum;
470
471	while (size) {
472		if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
473			*sum += *add;
474		sum++;
475		add++;
476		size -= 8;
477	}
478}
479
480static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
481{
482	if (value == NETDEV_STAT_NOT_SET)
483		return 0;
484	return nla_put_uint(rsp, attr_id, value);
485}
486
487static int
488netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
489{
490	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
491	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
492	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail))
 
 
 
 
 
 
 
 
 
 
493		return -EMSGSIZE;
494	return 0;
495}
496
497static int
498netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
499{
500	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
501	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes))
 
 
 
 
 
 
 
 
 
 
 
502		return -EMSGSIZE;
503	return 0;
504}
505
506static int
507netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
508		      u32 q_type, int i, const struct genl_info *info)
509{
510	const struct netdev_stat_ops *ops = netdev->stat_ops;
511	struct netdev_queue_stats_rx rx;
512	struct netdev_queue_stats_tx tx;
513	void *hdr;
514
515	hdr = genlmsg_iput(rsp, info);
516	if (!hdr)
517		return -EMSGSIZE;
518	if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
519	    nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
520	    nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
521		goto nla_put_failure;
522
523	switch (q_type) {
524	case NETDEV_QUEUE_TYPE_RX:
525		memset(&rx, 0xff, sizeof(rx));
526		ops->get_queue_stats_rx(netdev, i, &rx);
527		if (!memchr_inv(&rx, 0xff, sizeof(rx)))
528			goto nla_cancel;
529		if (netdev_nl_stats_write_rx(rsp, &rx))
530			goto nla_put_failure;
531		break;
532	case NETDEV_QUEUE_TYPE_TX:
533		memset(&tx, 0xff, sizeof(tx));
534		ops->get_queue_stats_tx(netdev, i, &tx);
535		if (!memchr_inv(&tx, 0xff, sizeof(tx)))
536			goto nla_cancel;
537		if (netdev_nl_stats_write_tx(rsp, &tx))
538			goto nla_put_failure;
539		break;
540	}
541
542	genlmsg_end(rsp, hdr);
543	return 0;
544
545nla_cancel:
546	genlmsg_cancel(rsp, hdr);
547	return 0;
548nla_put_failure:
549	genlmsg_cancel(rsp, hdr);
550	return -EMSGSIZE;
551}
552
553static int
554netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
555			 const struct genl_info *info,
556			 struct netdev_nl_dump_ctx *ctx)
557{
558	const struct netdev_stat_ops *ops = netdev->stat_ops;
559	int i, err;
560
561	if (!(netdev->flags & IFF_UP))
562		return 0;
563
564	i = ctx->rxq_idx;
565	while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
566		err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
567					    i, info);
568		if (err)
569			return err;
570		ctx->rxq_idx = i++;
571	}
572	i = ctx->txq_idx;
573	while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
574		err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
575					    i, info);
576		if (err)
577			return err;
578		ctx->txq_idx = i++;
579	}
580
581	ctx->rxq_idx = 0;
582	ctx->txq_idx = 0;
583	return 0;
584}
585
586static int
587netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
588			  const struct genl_info *info)
589{
590	struct netdev_queue_stats_rx rx_sum, rx;
591	struct netdev_queue_stats_tx tx_sum, tx;
592	const struct netdev_stat_ops *ops;
593	void *hdr;
594	int i;
595
596	ops = netdev->stat_ops;
597	/* Netdev can't guarantee any complete counters */
598	if (!ops->get_base_stats)
599		return 0;
600
601	memset(&rx_sum, 0xff, sizeof(rx_sum));
602	memset(&tx_sum, 0xff, sizeof(tx_sum));
603
604	ops->get_base_stats(netdev, &rx_sum, &tx_sum);
605
606	/* The op was there, but nothing reported, don't bother */
607	if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
608	    !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
609		return 0;
610
611	hdr = genlmsg_iput(rsp, info);
612	if (!hdr)
613		return -EMSGSIZE;
614	if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
615		goto nla_put_failure;
616
617	for (i = 0; i < netdev->real_num_rx_queues; i++) {
618		memset(&rx, 0xff, sizeof(rx));
619		if (ops->get_queue_stats_rx)
620			ops->get_queue_stats_rx(netdev, i, &rx);
621		netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
622	}
623	for (i = 0; i < netdev->real_num_tx_queues; i++) {
624		memset(&tx, 0xff, sizeof(tx));
625		if (ops->get_queue_stats_tx)
626			ops->get_queue_stats_tx(netdev, i, &tx);
627		netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
628	}
629
630	if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
631	    netdev_nl_stats_write_tx(rsp, &tx_sum))
632		goto nla_put_failure;
633
634	genlmsg_end(rsp, hdr);
635	return 0;
636
637nla_put_failure:
638	genlmsg_cancel(rsp, hdr);
639	return -EMSGSIZE;
640}
641
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
642int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
643				struct netlink_callback *cb)
644{
645	struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
646	const struct genl_info *info = genl_info_dump(cb);
647	struct net *net = sock_net(skb->sk);
648	struct net_device *netdev;
 
649	unsigned int scope;
650	int err = 0;
651
652	scope = 0;
653	if (info->attrs[NETDEV_A_QSTATS_SCOPE])
654		scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
655
 
 
 
 
656	rtnl_lock();
657	for_each_netdev_dump(net, netdev, ctx->ifindex) {
658		if (!netdev->stat_ops)
659			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
660
661		switch (scope) {
662		case 0:
663			err = netdev_nl_stats_by_netdev(netdev, skb, info);
664			break;
665		case NETDEV_QSTATS_SCOPE_QUEUE:
666			err = netdev_nl_stats_by_queue(netdev, skb, info, ctx);
667			break;
668		}
669		if (err < 0)
670			break;
 
 
 
 
 
671	}
 
 
 
 
 
 
 
 
 
 
672	rtnl_unlock();
673
 
 
 
 
 
 
 
 
674	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
675}
676
677static int netdev_genl_netdevice_event(struct notifier_block *nb,
678				       unsigned long event, void *ptr)
679{
680	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
681
682	switch (event) {
683	case NETDEV_REGISTER:
684		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
685		break;
686	case NETDEV_UNREGISTER:
687		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
688		break;
689	case NETDEV_XDP_FEAT_CHANGE:
690		netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
691		break;
692	}
693
694	return NOTIFY_OK;
695}
696
697static struct notifier_block netdev_genl_nb = {
698	.notifier_call	= netdev_genl_netdevice_event,
699};
700
701static int __init netdev_genl_init(void)
702{
703	int err;
704
705	err = register_netdevice_notifier(&netdev_genl_nb);
706	if (err)
707		return err;
708
709	err = genl_register_family(&netdev_nl_family);
710	if (err)
711		goto err_unreg_ntf;
712
713	return 0;
714
715err_unreg_ntf:
716	unregister_netdevice_notifier(&netdev_genl_nb);
717	return err;
718}
719
720subsys_initcall(netdev_genl_init);