Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
13 */
14
15#include <linux/bitops.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
21#include <linux/timer.h>
22#include <linux/string.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/fcntl.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/capability.h>
30#include <linux/skbuff.h>
31#include <linux/init.h>
32#include <linux/security.h>
33#include <linux/mutex.h>
34#include <linux/if_addr.h>
35#include <linux/if_bridge.h>
36#include <linux/if_vlan.h>
37#include <linux/pci.h>
38#include <linux/etherdevice.h>
39#include <linux/bpf.h>
40
41#include <linux/uaccess.h>
42
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/arp.h>
48#include <net/route.h>
49#include <net/udp.h>
50#include <net/tcp.h>
51#include <net/sock.h>
52#include <net/pkt_sched.h>
53#include <net/fib_rules.h>
54#include <net/rtnetlink.h>
55#include <net/net_namespace.h>
56#include <net/devlink.h>
57#if IS_ENABLED(CONFIG_IPV6)
58#include <net/addrconf.h>
59#endif
60#include <linux/dpll.h>
61
62#include "dev.h"
63
64#define RTNL_MAX_TYPE 50
65#define RTNL_SLAVE_MAX_TYPE 44
66
67struct rtnl_link {
68 rtnl_doit_func doit;
69 rtnl_dumpit_func dumpit;
70 struct module *owner;
71 unsigned int flags;
72 struct rcu_head rcu;
73};
74
75static DEFINE_MUTEX(rtnl_mutex);
76
77void rtnl_lock(void)
78{
79 mutex_lock(&rtnl_mutex);
80}
81EXPORT_SYMBOL(rtnl_lock);
82
83int rtnl_lock_killable(void)
84{
85 return mutex_lock_killable(&rtnl_mutex);
86}
87EXPORT_SYMBOL(rtnl_lock_killable);
88
89static struct sk_buff *defer_kfree_skb_list;
90void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
91{
92 if (head && tail) {
93 tail->next = defer_kfree_skb_list;
94 defer_kfree_skb_list = head;
95 }
96}
97EXPORT_SYMBOL(rtnl_kfree_skbs);
98
99void __rtnl_unlock(void)
100{
101 struct sk_buff *head = defer_kfree_skb_list;
102
103 defer_kfree_skb_list = NULL;
104
105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
106 * is used. In some places, e.g. in cfg80211, we have code that will do
107 * something like
108 * rtnl_lock()
109 * wiphy_lock()
110 * ...
111 * rtnl_unlock()
112 *
113 * and because netdev_run_todo() acquires the RTNL for items on the list
114 * we could cause a situation such as this:
115 * Thread 1 Thread 2
116 * rtnl_lock()
117 * unregister_netdevice()
118 * __rtnl_unlock()
119 * rtnl_lock()
120 * wiphy_lock()
121 * rtnl_unlock()
122 * netdev_run_todo()
123 * __rtnl_unlock()
124 *
125 * // list not empty now
126 * // because of thread 2
127 * rtnl_lock()
128 * while (!list_empty(...))
129 * rtnl_lock()
130 * wiphy_lock()
131 * **** DEADLOCK ****
132 *
133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
134 * it's not used in cases where something is added to do the list.
135 */
136 WARN_ON(!list_empty(&net_todo_list));
137
138 mutex_unlock(&rtnl_mutex);
139
140 while (head) {
141 struct sk_buff *next = head->next;
142
143 kfree_skb(head);
144 cond_resched();
145 head = next;
146 }
147}
148
149void rtnl_unlock(void)
150{
151 /* This fellow will unlock it for us. */
152 netdev_run_todo();
153}
154EXPORT_SYMBOL(rtnl_unlock);
155
156int rtnl_trylock(void)
157{
158 return mutex_trylock(&rtnl_mutex);
159}
160EXPORT_SYMBOL(rtnl_trylock);
161
162int rtnl_is_locked(void)
163{
164 return mutex_is_locked(&rtnl_mutex);
165}
166EXPORT_SYMBOL(rtnl_is_locked);
167
168bool refcount_dec_and_rtnl_lock(refcount_t *r)
169{
170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
171}
172EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
173
174#ifdef CONFIG_PROVE_LOCKING
175bool lockdep_rtnl_is_held(void)
176{
177 return lockdep_is_held(&rtnl_mutex);
178}
179EXPORT_SYMBOL(lockdep_rtnl_is_held);
180#endif /* #ifdef CONFIG_PROVE_LOCKING */
181
182#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
183void __rtnl_net_lock(struct net *net)
184{
185 ASSERT_RTNL();
186
187 mutex_lock(&net->rtnl_mutex);
188}
189EXPORT_SYMBOL(__rtnl_net_lock);
190
191void __rtnl_net_unlock(struct net *net)
192{
193 ASSERT_RTNL();
194
195 mutex_unlock(&net->rtnl_mutex);
196}
197EXPORT_SYMBOL(__rtnl_net_unlock);
198
199void rtnl_net_lock(struct net *net)
200{
201 rtnl_lock();
202 __rtnl_net_lock(net);
203}
204EXPORT_SYMBOL(rtnl_net_lock);
205
206void rtnl_net_unlock(struct net *net)
207{
208 __rtnl_net_unlock(net);
209 rtnl_unlock();
210}
211EXPORT_SYMBOL(rtnl_net_unlock);
212
213int rtnl_net_trylock(struct net *net)
214{
215 int ret = rtnl_trylock();
216
217 if (ret)
218 __rtnl_net_lock(net);
219
220 return ret;
221}
222EXPORT_SYMBOL(rtnl_net_trylock);
223
224static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b)
225{
226 if (net_eq(net_a, net_b))
227 return 0;
228
229 /* always init_net first */
230 if (net_eq(net_a, &init_net))
231 return -1;
232
233 if (net_eq(net_b, &init_net))
234 return 1;
235
236 /* otherwise lock in ascending order */
237 return net_a < net_b ? -1 : 1;
238}
239
240int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b)
241{
242 const struct net *net_a, *net_b;
243
244 net_a = container_of(a, struct net, rtnl_mutex.dep_map);
245 net_b = container_of(b, struct net, rtnl_mutex.dep_map);
246
247 return rtnl_net_cmp_locks(net_a, net_b);
248}
249
250bool rtnl_net_is_locked(struct net *net)
251{
252 return rtnl_is_locked() && mutex_is_locked(&net->rtnl_mutex);
253}
254EXPORT_SYMBOL(rtnl_net_is_locked);
255
256bool lockdep_rtnl_net_is_held(struct net *net)
257{
258 return lockdep_rtnl_is_held() && lockdep_is_held(&net->rtnl_mutex);
259}
260EXPORT_SYMBOL(lockdep_rtnl_net_is_held);
261#else
262static int rtnl_net_cmp_locks(const struct net *net_a, const struct net *net_b)
263{
264 /* No need to swap */
265 return -1;
266}
267#endif
268
269struct rtnl_nets {
270 /* ->newlink() needs to freeze 3 netns at most;
271 * 2 for the new device, 1 for its peer.
272 */
273 struct net *net[3];
274 unsigned char len;
275};
276
277static void rtnl_nets_init(struct rtnl_nets *rtnl_nets)
278{
279 memset(rtnl_nets, 0, sizeof(*rtnl_nets));
280}
281
282static void rtnl_nets_destroy(struct rtnl_nets *rtnl_nets)
283{
284 int i;
285
286 for (i = 0; i < rtnl_nets->len; i++) {
287 put_net(rtnl_nets->net[i]);
288 rtnl_nets->net[i] = NULL;
289 }
290
291 rtnl_nets->len = 0;
292}
293
294/**
295 * rtnl_nets_add - Add netns to be locked before ->newlink().
296 *
297 * @rtnl_nets: rtnl_nets pointer passed to ->get_peer_net().
298 * @net: netns pointer with an extra refcnt held.
299 *
300 * The extra refcnt is released in rtnl_nets_destroy().
301 */
302static void rtnl_nets_add(struct rtnl_nets *rtnl_nets, struct net *net)
303{
304 int i;
305
306 DEBUG_NET_WARN_ON_ONCE(rtnl_nets->len == ARRAY_SIZE(rtnl_nets->net));
307
308 for (i = 0; i < rtnl_nets->len; i++) {
309 switch (rtnl_net_cmp_locks(rtnl_nets->net[i], net)) {
310 case 0:
311 put_net(net);
312 return;
313 case 1:
314 swap(rtnl_nets->net[i], net);
315 }
316 }
317
318 rtnl_nets->net[i] = net;
319 rtnl_nets->len++;
320}
321
322static void rtnl_nets_lock(struct rtnl_nets *rtnl_nets)
323{
324 int i;
325
326 rtnl_lock();
327
328 for (i = 0; i < rtnl_nets->len; i++)
329 __rtnl_net_lock(rtnl_nets->net[i]);
330}
331
332static void rtnl_nets_unlock(struct rtnl_nets *rtnl_nets)
333{
334 int i;
335
336 for (i = 0; i < rtnl_nets->len; i++)
337 __rtnl_net_unlock(rtnl_nets->net[i]);
338
339 rtnl_unlock();
340}
341
342static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
343
344static inline int rtm_msgindex(int msgtype)
345{
346 int msgindex = msgtype - RTM_BASE;
347
348 /*
349 * msgindex < 0 implies someone tried to register a netlink
350 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
351 * the message type has not been added to linux/rtnetlink.h
352 */
353 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
354
355 return msgindex;
356}
357
358static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
359{
360 struct rtnl_link __rcu **tab;
361
362 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
363 protocol = PF_UNSPEC;
364
365 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
366 if (!tab)
367 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
368
369 return rcu_dereference_rtnl(tab[msgtype]);
370}
371
372static int rtnl_register_internal(struct module *owner,
373 int protocol, int msgtype,
374 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
375 unsigned int flags)
376{
377 struct rtnl_link *link, *old;
378 struct rtnl_link __rcu **tab;
379 int msgindex;
380 int ret = -ENOBUFS;
381
382 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
383 msgindex = rtm_msgindex(msgtype);
384
385 rtnl_lock();
386 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
387 if (tab == NULL) {
388 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
389 if (!tab)
390 goto unlock;
391
392 /* ensures we see the 0 stores */
393 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
394 }
395
396 old = rtnl_dereference(tab[msgindex]);
397 if (old) {
398 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
399 if (!link)
400 goto unlock;
401 } else {
402 link = kzalloc(sizeof(*link), GFP_KERNEL);
403 if (!link)
404 goto unlock;
405 }
406
407 WARN_ON(link->owner && link->owner != owner);
408 link->owner = owner;
409
410 WARN_ON(doit && link->doit && link->doit != doit);
411 if (doit)
412 link->doit = doit;
413 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
414 if (dumpit)
415 link->dumpit = dumpit;
416
417 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
418 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
419 link->flags |= flags;
420
421 /* publish protocol:msgtype */
422 rcu_assign_pointer(tab[msgindex], link);
423 ret = 0;
424 if (old)
425 kfree_rcu(old, rcu);
426unlock:
427 rtnl_unlock();
428 return ret;
429}
430
431/**
432 * rtnl_unregister - Unregister a rtnetlink message type
433 * @protocol: Protocol family or PF_UNSPEC
434 * @msgtype: rtnetlink message type
435 *
436 * Returns 0 on success or a negative error code.
437 */
438static int rtnl_unregister(int protocol, int msgtype)
439{
440 struct rtnl_link __rcu **tab;
441 struct rtnl_link *link;
442 int msgindex;
443
444 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
445 msgindex = rtm_msgindex(msgtype);
446
447 rtnl_lock();
448 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
449 if (!tab) {
450 rtnl_unlock();
451 return -ENOENT;
452 }
453
454 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
455 rtnl_unlock();
456
457 kfree_rcu(link, rcu);
458
459 return 0;
460}
461
462/**
463 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
464 * @protocol : Protocol family or PF_UNSPEC
465 *
466 * Identical to calling rtnl_unregster() for all registered message types
467 * of a certain protocol family.
468 */
469void rtnl_unregister_all(int protocol)
470{
471 struct rtnl_link __rcu **tab;
472 struct rtnl_link *link;
473 int msgindex;
474
475 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
476
477 rtnl_lock();
478 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL);
479 if (!tab) {
480 rtnl_unlock();
481 return;
482 }
483 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
484 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
485 kfree_rcu(link, rcu);
486 }
487 rtnl_unlock();
488
489 synchronize_net();
490
491 kfree(tab);
492}
493EXPORT_SYMBOL_GPL(rtnl_unregister_all);
494
495/**
496 * __rtnl_register_many - Register rtnetlink message types
497 * @handlers: Array of struct rtnl_msg_handlers
498 * @n: The length of @handlers
499 *
500 * Registers the specified function pointers (at least one of them has
501 * to be non-NULL) to be called whenever a request message for the
502 * specified protocol family and message type is received.
503 *
504 * The special protocol family PF_UNSPEC may be used to define fallback
505 * function pointers for the case when no entry for the specific protocol
506 * family exists.
507 *
508 * When one element of @handlers fails to register,
509 * 1) built-in: panics.
510 * 2) modules : the previous successful registrations are unwinded
511 * and an error is returned.
512 *
513 * Use rtnl_register_many().
514 */
515int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n)
516{
517 const struct rtnl_msg_handler *handler;
518 int i, err;
519
520 for (i = 0, handler = handlers; i < n; i++, handler++) {
521 err = rtnl_register_internal(handler->owner, handler->protocol,
522 handler->msgtype, handler->doit,
523 handler->dumpit, handler->flags);
524 if (err) {
525 if (!handler->owner)
526 panic("Unable to register rtnetlink message "
527 "handlers, %pS\n", handlers);
528
529 __rtnl_unregister_many(handlers, i);
530 break;
531 }
532 }
533
534 return err;
535}
536EXPORT_SYMBOL_GPL(__rtnl_register_many);
537
538void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n)
539{
540 const struct rtnl_msg_handler *handler;
541 int i;
542
543 for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--)
544 rtnl_unregister(handler->protocol, handler->msgtype);
545}
546EXPORT_SYMBOL_GPL(__rtnl_unregister_many);
547
548static DEFINE_MUTEX(link_ops_mutex);
549static LIST_HEAD(link_ops);
550
551static struct rtnl_link_ops *rtnl_link_ops_get(const char *kind, int *srcu_index)
552{
553 struct rtnl_link_ops *ops;
554
555 rcu_read_lock();
556
557 list_for_each_entry_rcu(ops, &link_ops, list) {
558 if (!strcmp(ops->kind, kind)) {
559 *srcu_index = srcu_read_lock(&ops->srcu);
560 goto unlock;
561 }
562 }
563
564 ops = NULL;
565unlock:
566 rcu_read_unlock();
567
568 return ops;
569}
570
571static void rtnl_link_ops_put(struct rtnl_link_ops *ops, int srcu_index)
572{
573 srcu_read_unlock(&ops->srcu, srcu_index);
574}
575
576/**
577 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
578 * @ops: struct rtnl_link_ops * to register
579 *
580 * Returns 0 on success or a negative error code.
581 */
582int rtnl_link_register(struct rtnl_link_ops *ops)
583{
584 struct rtnl_link_ops *tmp;
585 int err;
586
587 /* Sanity-check max sizes to avoid stack buffer overflow. */
588 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
589 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
590 return -EINVAL;
591
592 /* The check for alloc/setup is here because if ops
593 * does not have that filled up, it is not possible
594 * to use the ops for creating device. So do not
595 * fill up dellink as well. That disables rtnl_dellink.
596 */
597 if ((ops->alloc || ops->setup) && !ops->dellink)
598 ops->dellink = unregister_netdevice_queue;
599
600 err = init_srcu_struct(&ops->srcu);
601 if (err)
602 return err;
603
604 mutex_lock(&link_ops_mutex);
605
606 list_for_each_entry(tmp, &link_ops, list) {
607 if (!strcmp(ops->kind, tmp->kind)) {
608 err = -EEXIST;
609 goto unlock;
610 }
611 }
612
613 list_add_tail_rcu(&ops->list, &link_ops);
614unlock:
615 mutex_unlock(&link_ops_mutex);
616
617 return err;
618}
619EXPORT_SYMBOL_GPL(rtnl_link_register);
620
621static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
622{
623 struct net_device *dev;
624 LIST_HEAD(list_kill);
625
626 for_each_netdev(net, dev) {
627 if (dev->rtnl_link_ops == ops)
628 ops->dellink(dev, &list_kill);
629 }
630 unregister_netdevice_many(&list_kill);
631}
632
633/* Return with the rtnl_lock held when there are no network
634 * devices unregistering in any network namespace.
635 */
636static void rtnl_lock_unregistering_all(void)
637{
638 DEFINE_WAIT_FUNC(wait, woken_wake_function);
639
640 add_wait_queue(&netdev_unregistering_wq, &wait);
641 for (;;) {
642 rtnl_lock();
643 /* We held write locked pernet_ops_rwsem, and parallel
644 * setup_net() and cleanup_net() are not possible.
645 */
646 if (!atomic_read(&dev_unreg_count))
647 break;
648 __rtnl_unlock();
649
650 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
651 }
652 remove_wait_queue(&netdev_unregistering_wq, &wait);
653}
654
655/**
656 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
657 * @ops: struct rtnl_link_ops * to unregister
658 */
659void rtnl_link_unregister(struct rtnl_link_ops *ops)
660{
661 struct net *net;
662
663 mutex_lock(&link_ops_mutex);
664 list_del_rcu(&ops->list);
665 mutex_unlock(&link_ops_mutex);
666
667 synchronize_srcu(&ops->srcu);
668 cleanup_srcu_struct(&ops->srcu);
669
670 /* Close the race with setup_net() and cleanup_net() */
671 down_write(&pernet_ops_rwsem);
672 rtnl_lock_unregistering_all();
673
674 for_each_net(net)
675 __rtnl_kill_links(net, ops);
676
677 rtnl_unlock();
678 up_write(&pernet_ops_rwsem);
679}
680EXPORT_SYMBOL_GPL(rtnl_link_unregister);
681
682static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
683{
684 struct net_device *master_dev;
685 const struct rtnl_link_ops *ops;
686 size_t size = 0;
687
688 rcu_read_lock();
689
690 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
691 if (!master_dev)
692 goto out;
693
694 ops = master_dev->rtnl_link_ops;
695 if (!ops || !ops->get_slave_size)
696 goto out;
697 /* IFLA_INFO_SLAVE_DATA + nested data */
698 size = nla_total_size(sizeof(struct nlattr)) +
699 ops->get_slave_size(master_dev, dev);
700
701out:
702 rcu_read_unlock();
703 return size;
704}
705
706static size_t rtnl_link_get_size(const struct net_device *dev)
707{
708 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
709 size_t size;
710
711 if (!ops)
712 return 0;
713
714 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
715 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
716
717 if (ops->get_size)
718 /* IFLA_INFO_DATA + nested data */
719 size += nla_total_size(sizeof(struct nlattr)) +
720 ops->get_size(dev);
721
722 if (ops->get_xstats_size)
723 /* IFLA_INFO_XSTATS */
724 size += nla_total_size(ops->get_xstats_size(dev));
725
726 size += rtnl_link_get_slave_info_data_size(dev);
727
728 return size;
729}
730
731static LIST_HEAD(rtnl_af_ops);
732
733static struct rtnl_af_ops *rtnl_af_lookup(const int family, int *srcu_index)
734{
735 struct rtnl_af_ops *ops;
736
737 ASSERT_RTNL();
738
739 rcu_read_lock();
740
741 list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
742 if (ops->family == family) {
743 *srcu_index = srcu_read_lock(&ops->srcu);
744 goto unlock;
745 }
746 }
747
748 ops = NULL;
749unlock:
750 rcu_read_unlock();
751
752 return ops;
753}
754
755static void rtnl_af_put(struct rtnl_af_ops *ops, int srcu_index)
756{
757 srcu_read_unlock(&ops->srcu, srcu_index);
758}
759
760/**
761 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
762 * @ops: struct rtnl_af_ops * to register
763 *
764 * Return: 0 on success or a negative error code.
765 */
766int rtnl_af_register(struct rtnl_af_ops *ops)
767{
768 int err = init_srcu_struct(&ops->srcu);
769
770 if (err)
771 return err;
772
773 rtnl_lock();
774 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
775 rtnl_unlock();
776
777 return 0;
778}
779EXPORT_SYMBOL_GPL(rtnl_af_register);
780
781/**
782 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
783 * @ops: struct rtnl_af_ops * to unregister
784 */
785void rtnl_af_unregister(struct rtnl_af_ops *ops)
786{
787 rtnl_lock();
788 list_del_rcu(&ops->list);
789 rtnl_unlock();
790
791 synchronize_rcu();
792 synchronize_srcu(&ops->srcu);
793 cleanup_srcu_struct(&ops->srcu);
794}
795EXPORT_SYMBOL_GPL(rtnl_af_unregister);
796
797static size_t rtnl_link_get_af_size(const struct net_device *dev,
798 u32 ext_filter_mask)
799{
800 struct rtnl_af_ops *af_ops;
801 size_t size;
802
803 /* IFLA_AF_SPEC */
804 size = nla_total_size(sizeof(struct nlattr));
805
806 rcu_read_lock();
807 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
808 if (af_ops->get_link_af_size) {
809 /* AF_* + nested data */
810 size += nla_total_size(sizeof(struct nlattr)) +
811 af_ops->get_link_af_size(dev, ext_filter_mask);
812 }
813 }
814 rcu_read_unlock();
815
816 return size;
817}
818
819static bool rtnl_have_link_slave_info(const struct net_device *dev)
820{
821 struct net_device *master_dev;
822 bool ret = false;
823
824 rcu_read_lock();
825
826 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
827 if (master_dev && master_dev->rtnl_link_ops)
828 ret = true;
829 rcu_read_unlock();
830 return ret;
831}
832
833static int rtnl_link_slave_info_fill(struct sk_buff *skb,
834 const struct net_device *dev)
835{
836 struct net_device *master_dev;
837 const struct rtnl_link_ops *ops;
838 struct nlattr *slave_data;
839 int err;
840
841 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
842 if (!master_dev)
843 return 0;
844 ops = master_dev->rtnl_link_ops;
845 if (!ops)
846 return 0;
847 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
848 return -EMSGSIZE;
849 if (ops->fill_slave_info) {
850 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
851 if (!slave_data)
852 return -EMSGSIZE;
853 err = ops->fill_slave_info(skb, master_dev, dev);
854 if (err < 0)
855 goto err_cancel_slave_data;
856 nla_nest_end(skb, slave_data);
857 }
858 return 0;
859
860err_cancel_slave_data:
861 nla_nest_cancel(skb, slave_data);
862 return err;
863}
864
865static int rtnl_link_info_fill(struct sk_buff *skb,
866 const struct net_device *dev)
867{
868 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
869 struct nlattr *data;
870 int err;
871
872 if (!ops)
873 return 0;
874 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
875 return -EMSGSIZE;
876 if (ops->fill_xstats) {
877 err = ops->fill_xstats(skb, dev);
878 if (err < 0)
879 return err;
880 }
881 if (ops->fill_info) {
882 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
883 if (data == NULL)
884 return -EMSGSIZE;
885 err = ops->fill_info(skb, dev);
886 if (err < 0)
887 goto err_cancel_data;
888 nla_nest_end(skb, data);
889 }
890 return 0;
891
892err_cancel_data:
893 nla_nest_cancel(skb, data);
894 return err;
895}
896
897static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
898{
899 struct nlattr *linkinfo;
900 int err = -EMSGSIZE;
901
902 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
903 if (linkinfo == NULL)
904 goto out;
905
906 err = rtnl_link_info_fill(skb, dev);
907 if (err < 0)
908 goto err_cancel_link;
909
910 err = rtnl_link_slave_info_fill(skb, dev);
911 if (err < 0)
912 goto err_cancel_link;
913
914 nla_nest_end(skb, linkinfo);
915 return 0;
916
917err_cancel_link:
918 nla_nest_cancel(skb, linkinfo);
919out:
920 return err;
921}
922
923int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
924{
925 struct sock *rtnl = net->rtnl;
926
927 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
928}
929
930int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
931{
932 struct sock *rtnl = net->rtnl;
933
934 return nlmsg_unicast(rtnl, skb, pid);
935}
936EXPORT_SYMBOL(rtnl_unicast);
937
938void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
939 const struct nlmsghdr *nlh, gfp_t flags)
940{
941 struct sock *rtnl = net->rtnl;
942
943 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
944}
945EXPORT_SYMBOL(rtnl_notify);
946
947void rtnl_set_sk_err(struct net *net, u32 group, int error)
948{
949 struct sock *rtnl = net->rtnl;
950
951 netlink_set_err(rtnl, 0, group, error);
952}
953EXPORT_SYMBOL(rtnl_set_sk_err);
954
955int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
956{
957 struct nlattr *mx;
958 int i, valid = 0;
959
960 /* nothing is dumped for dst_default_metrics, so just skip the loop */
961 if (metrics == dst_default_metrics.metrics)
962 return 0;
963
964 mx = nla_nest_start_noflag(skb, RTA_METRICS);
965 if (mx == NULL)
966 return -ENOBUFS;
967
968 for (i = 0; i < RTAX_MAX; i++) {
969 if (metrics[i]) {
970 if (i == RTAX_CC_ALGO - 1) {
971 char tmp[TCP_CA_NAME_MAX], *name;
972
973 name = tcp_ca_get_name_by_key(metrics[i], tmp);
974 if (!name)
975 continue;
976 if (nla_put_string(skb, i + 1, name))
977 goto nla_put_failure;
978 } else if (i == RTAX_FEATURES - 1) {
979 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
980
981 if (!user_features)
982 continue;
983 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
984 if (nla_put_u32(skb, i + 1, user_features))
985 goto nla_put_failure;
986 } else {
987 if (nla_put_u32(skb, i + 1, metrics[i]))
988 goto nla_put_failure;
989 }
990 valid++;
991 }
992 }
993
994 if (!valid) {
995 nla_nest_cancel(skb, mx);
996 return 0;
997 }
998
999 return nla_nest_end(skb, mx);
1000
1001nla_put_failure:
1002 nla_nest_cancel(skb, mx);
1003 return -EMSGSIZE;
1004}
1005EXPORT_SYMBOL(rtnetlink_put_metrics);
1006
1007int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
1008 long expires, u32 error)
1009{
1010 struct rta_cacheinfo ci = {
1011 .rta_error = error,
1012 .rta_id = id,
1013 };
1014
1015 if (dst) {
1016 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
1017 ci.rta_used = dst->__use;
1018 ci.rta_clntref = rcuref_read(&dst->__rcuref);
1019 }
1020 if (expires) {
1021 unsigned long clock;
1022
1023 clock = jiffies_to_clock_t(abs(expires));
1024 clock = min_t(unsigned long, clock, INT_MAX);
1025 ci.rta_expires = (expires > 0) ? clock : -clock;
1026 }
1027 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
1028}
1029EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
1030
1031void netdev_set_operstate(struct net_device *dev, int newstate)
1032{
1033 unsigned int old = READ_ONCE(dev->operstate);
1034
1035 do {
1036 if (old == newstate)
1037 return;
1038 } while (!try_cmpxchg(&dev->operstate, &old, newstate));
1039
1040 netdev_state_change(dev);
1041}
1042EXPORT_SYMBOL(netdev_set_operstate);
1043
1044static void set_operstate(struct net_device *dev, unsigned char transition)
1045{
1046 unsigned char operstate = READ_ONCE(dev->operstate);
1047
1048 switch (transition) {
1049 case IF_OPER_UP:
1050 if ((operstate == IF_OPER_DORMANT ||
1051 operstate == IF_OPER_TESTING ||
1052 operstate == IF_OPER_UNKNOWN) &&
1053 !netif_dormant(dev) && !netif_testing(dev))
1054 operstate = IF_OPER_UP;
1055 break;
1056
1057 case IF_OPER_TESTING:
1058 if (netif_oper_up(dev))
1059 operstate = IF_OPER_TESTING;
1060 break;
1061
1062 case IF_OPER_DORMANT:
1063 if (netif_oper_up(dev))
1064 operstate = IF_OPER_DORMANT;
1065 break;
1066 }
1067
1068 netdev_set_operstate(dev, operstate);
1069}
1070
1071static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
1072{
1073 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
1074 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
1075}
1076
1077static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
1078 const struct ifinfomsg *ifm)
1079{
1080 unsigned int flags = ifm->ifi_flags;
1081
1082 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
1083 if (ifm->ifi_change)
1084 flags = (flags & ifm->ifi_change) |
1085 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
1086
1087 return flags;
1088}
1089
1090static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
1091 const struct rtnl_link_stats64 *b)
1092{
1093 a->rx_packets = b->rx_packets;
1094 a->tx_packets = b->tx_packets;
1095 a->rx_bytes = b->rx_bytes;
1096 a->tx_bytes = b->tx_bytes;
1097 a->rx_errors = b->rx_errors;
1098 a->tx_errors = b->tx_errors;
1099 a->rx_dropped = b->rx_dropped;
1100 a->tx_dropped = b->tx_dropped;
1101
1102 a->multicast = b->multicast;
1103 a->collisions = b->collisions;
1104
1105 a->rx_length_errors = b->rx_length_errors;
1106 a->rx_over_errors = b->rx_over_errors;
1107 a->rx_crc_errors = b->rx_crc_errors;
1108 a->rx_frame_errors = b->rx_frame_errors;
1109 a->rx_fifo_errors = b->rx_fifo_errors;
1110 a->rx_missed_errors = b->rx_missed_errors;
1111
1112 a->tx_aborted_errors = b->tx_aborted_errors;
1113 a->tx_carrier_errors = b->tx_carrier_errors;
1114 a->tx_fifo_errors = b->tx_fifo_errors;
1115 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
1116 a->tx_window_errors = b->tx_window_errors;
1117
1118 a->rx_compressed = b->rx_compressed;
1119 a->tx_compressed = b->tx_compressed;
1120
1121 a->rx_nohandler = b->rx_nohandler;
1122}
1123
1124/* All VF info */
1125static inline int rtnl_vfinfo_size(const struct net_device *dev,
1126 u32 ext_filter_mask)
1127{
1128 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
1129 int num_vfs = dev_num_vf(dev->dev.parent);
1130 size_t size = nla_total_size(0);
1131 size += num_vfs *
1132 (nla_total_size(0) +
1133 nla_total_size(sizeof(struct ifla_vf_mac)) +
1134 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
1135 nla_total_size(sizeof(struct ifla_vf_vlan)) +
1136 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
1137 nla_total_size(MAX_VLAN_LIST_LEN *
1138 sizeof(struct ifla_vf_vlan_info)) +
1139 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
1140 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
1141 nla_total_size(sizeof(struct ifla_vf_rate)) +
1142 nla_total_size(sizeof(struct ifla_vf_link_state)) +
1143 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
1144 nla_total_size(sizeof(struct ifla_vf_trust)));
1145 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1146 size += num_vfs *
1147 (nla_total_size(0) + /* nest IFLA_VF_STATS */
1148 /* IFLA_VF_STATS_RX_PACKETS */
1149 nla_total_size_64bit(sizeof(__u64)) +
1150 /* IFLA_VF_STATS_TX_PACKETS */
1151 nla_total_size_64bit(sizeof(__u64)) +
1152 /* IFLA_VF_STATS_RX_BYTES */
1153 nla_total_size_64bit(sizeof(__u64)) +
1154 /* IFLA_VF_STATS_TX_BYTES */
1155 nla_total_size_64bit(sizeof(__u64)) +
1156 /* IFLA_VF_STATS_BROADCAST */
1157 nla_total_size_64bit(sizeof(__u64)) +
1158 /* IFLA_VF_STATS_MULTICAST */
1159 nla_total_size_64bit(sizeof(__u64)) +
1160 /* IFLA_VF_STATS_RX_DROPPED */
1161 nla_total_size_64bit(sizeof(__u64)) +
1162 /* IFLA_VF_STATS_TX_DROPPED */
1163 nla_total_size_64bit(sizeof(__u64)));
1164 }
1165 return size;
1166 } else
1167 return 0;
1168}
1169
1170static size_t rtnl_port_size(const struct net_device *dev,
1171 u32 ext_filter_mask)
1172{
1173 size_t port_size = nla_total_size(4) /* PORT_VF */
1174 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
1175 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
1176 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
1177 + nla_total_size(1) /* PROT_VDP_REQUEST */
1178 + nla_total_size(2); /* PORT_VDP_RESPONSE */
1179 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
1180 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
1181 + port_size;
1182 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
1183 + port_size;
1184
1185 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1186 !(ext_filter_mask & RTEXT_FILTER_VF))
1187 return 0;
1188 if (dev_num_vf(dev->dev.parent))
1189 return port_self_size + vf_ports_size +
1190 vf_port_size * dev_num_vf(dev->dev.parent);
1191 else
1192 return port_self_size;
1193}
1194
1195static size_t rtnl_xdp_size(void)
1196{
1197 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1198 nla_total_size(1) + /* XDP_ATTACHED */
1199 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1200 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1201
1202 return xdp_size;
1203}
1204
1205static size_t rtnl_prop_list_size(const struct net_device *dev)
1206{
1207 struct netdev_name_node *name_node;
1208 unsigned int cnt = 0;
1209
1210 rcu_read_lock();
1211 list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
1212 cnt++;
1213 rcu_read_unlock();
1214
1215 if (!cnt)
1216 return 0;
1217
1218 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
1219}
1220
1221static size_t rtnl_proto_down_size(const struct net_device *dev)
1222{
1223 size_t size = nla_total_size(1);
1224
1225 /* Assume dev->proto_down_reason is not zero. */
1226 size += nla_total_size(0) + nla_total_size(4);
1227
1228 return size;
1229}
1230
1231static size_t rtnl_devlink_port_size(const struct net_device *dev)
1232{
1233 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1234
1235 if (dev->devlink_port)
1236 size += devlink_nl_port_handle_size(dev->devlink_port);
1237
1238 return size;
1239}
1240
1241static size_t rtnl_dpll_pin_size(const struct net_device *dev)
1242{
1243 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
1244
1245 size += dpll_netdev_pin_handle_size(dev);
1246
1247 return size;
1248}
1249
1250static noinline size_t if_nlmsg_size(const struct net_device *dev,
1251 u32 ext_filter_mask)
1252{
1253 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1254 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1255 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1256 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1257 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1258 + nla_total_size(sizeof(struct rtnl_link_stats))
1259 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1260 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1261 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1262 + nla_total_size(4) /* IFLA_TXQLEN */
1263 + nla_total_size(4) /* IFLA_WEIGHT */
1264 + nla_total_size(4) /* IFLA_MTU */
1265 + nla_total_size(4) /* IFLA_LINK */
1266 + nla_total_size(4) /* IFLA_MASTER */
1267 + nla_total_size(1) /* IFLA_CARRIER */
1268 + nla_total_size(4) /* IFLA_PROMISCUITY */
1269 + nla_total_size(4) /* IFLA_ALLMULTI */
1270 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1271 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1272 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1273 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1274 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1275 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1276 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
1277 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1278 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1279 + nla_total_size(1) /* IFLA_OPERSTATE */
1280 + nla_total_size(1) /* IFLA_LINKMODE */
1281 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1282 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1283 + nla_total_size(4) /* IFLA_GROUP */
1284 + nla_total_size(ext_filter_mask
1285 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1286 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1287 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1288 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1289 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1290 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1291 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1292 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1293 + rtnl_xdp_size() /* IFLA_XDP */
1294 + nla_total_size(4) /* IFLA_EVENT */
1295 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1296 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1297 + rtnl_proto_down_size(dev) /* proto down */
1298 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1299 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1300 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1301 + nla_total_size(4) /* IFLA_MIN_MTU */
1302 + nla_total_size(4) /* IFLA_MAX_MTU */
1303 + rtnl_prop_list_size(dev)
1304 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1305 + rtnl_devlink_port_size(dev)
1306 + rtnl_dpll_pin_size(dev)
1307 + nla_total_size(8) /* IFLA_MAX_PACING_OFFLOAD_HORIZON */
1308 + 0;
1309}
1310
1311static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1312{
1313 struct nlattr *vf_ports;
1314 struct nlattr *vf_port;
1315 int vf;
1316 int err;
1317
1318 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1319 if (!vf_ports)
1320 return -EMSGSIZE;
1321
1322 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1323 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1324 if (!vf_port)
1325 goto nla_put_failure;
1326 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1327 goto nla_put_failure;
1328 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1329 if (err == -EMSGSIZE)
1330 goto nla_put_failure;
1331 if (err) {
1332 nla_nest_cancel(skb, vf_port);
1333 continue;
1334 }
1335 nla_nest_end(skb, vf_port);
1336 }
1337
1338 nla_nest_end(skb, vf_ports);
1339
1340 return 0;
1341
1342nla_put_failure:
1343 nla_nest_cancel(skb, vf_ports);
1344 return -EMSGSIZE;
1345}
1346
1347static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1348{
1349 struct nlattr *port_self;
1350 int err;
1351
1352 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1353 if (!port_self)
1354 return -EMSGSIZE;
1355
1356 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1357 if (err) {
1358 nla_nest_cancel(skb, port_self);
1359 return (err == -EMSGSIZE) ? err : 0;
1360 }
1361
1362 nla_nest_end(skb, port_self);
1363
1364 return 0;
1365}
1366
1367static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1368 u32 ext_filter_mask)
1369{
1370 int err;
1371
1372 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1373 !(ext_filter_mask & RTEXT_FILTER_VF))
1374 return 0;
1375
1376 err = rtnl_port_self_fill(skb, dev);
1377 if (err)
1378 return err;
1379
1380 if (dev_num_vf(dev->dev.parent)) {
1381 err = rtnl_vf_ports_fill(skb, dev);
1382 if (err)
1383 return err;
1384 }
1385
1386 return 0;
1387}
1388
1389static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1390{
1391 int err;
1392 struct netdev_phys_item_id ppid;
1393
1394 err = dev_get_phys_port_id(dev, &ppid);
1395 if (err) {
1396 if (err == -EOPNOTSUPP)
1397 return 0;
1398 return err;
1399 }
1400
1401 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1402 return -EMSGSIZE;
1403
1404 return 0;
1405}
1406
1407static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1408{
1409 char name[IFNAMSIZ];
1410 int err;
1411
1412 err = dev_get_phys_port_name(dev, name, sizeof(name));
1413 if (err) {
1414 if (err == -EOPNOTSUPP)
1415 return 0;
1416 return err;
1417 }
1418
1419 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1420 return -EMSGSIZE;
1421
1422 return 0;
1423}
1424
1425static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1426{
1427 struct netdev_phys_item_id ppid = { };
1428 int err;
1429
1430 err = dev_get_port_parent_id(dev, &ppid, false);
1431 if (err) {
1432 if (err == -EOPNOTSUPP)
1433 return 0;
1434 return err;
1435 }
1436
1437 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1438 return -EMSGSIZE;
1439
1440 return 0;
1441}
1442
1443static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1444 struct net_device *dev)
1445{
1446 struct rtnl_link_stats64 *sp;
1447 struct nlattr *attr;
1448
1449 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1450 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1451 if (!attr)
1452 return -EMSGSIZE;
1453
1454 sp = nla_data(attr);
1455 dev_get_stats(dev, sp);
1456
1457 attr = nla_reserve(skb, IFLA_STATS,
1458 sizeof(struct rtnl_link_stats));
1459 if (!attr)
1460 return -EMSGSIZE;
1461
1462 copy_rtnl_link_stats(nla_data(attr), sp);
1463
1464 return 0;
1465}
1466
1467static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1468 struct net_device *dev,
1469 int vfs_num,
1470 u32 ext_filter_mask)
1471{
1472 struct ifla_vf_rss_query_en vf_rss_query_en;
1473 struct nlattr *vf, *vfstats, *vfvlanlist;
1474 struct ifla_vf_link_state vf_linkstate;
1475 struct ifla_vf_vlan_info vf_vlan_info;
1476 struct ifla_vf_spoofchk vf_spoofchk;
1477 struct ifla_vf_tx_rate vf_tx_rate;
1478 struct ifla_vf_stats vf_stats;
1479 struct ifla_vf_trust vf_trust;
1480 struct ifla_vf_vlan vf_vlan;
1481 struct ifla_vf_rate vf_rate;
1482 struct ifla_vf_mac vf_mac;
1483 struct ifla_vf_broadcast vf_broadcast;
1484 struct ifla_vf_info ivi;
1485 struct ifla_vf_guid node_guid;
1486 struct ifla_vf_guid port_guid;
1487
1488 memset(&ivi, 0, sizeof(ivi));
1489
1490 /* Not all SR-IOV capable drivers support the
1491 * spoofcheck and "RSS query enable" query. Preset to
1492 * -1 so the user space tool can detect that the driver
1493 * didn't report anything.
1494 */
1495 ivi.spoofchk = -1;
1496 ivi.rss_query_en = -1;
1497 ivi.trusted = -1;
1498 /* The default value for VF link state is "auto"
1499 * IFLA_VF_LINK_STATE_AUTO which equals zero
1500 */
1501 ivi.linkstate = 0;
1502 /* VLAN Protocol by default is 802.1Q */
1503 ivi.vlan_proto = htons(ETH_P_8021Q);
1504 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1505 return 0;
1506
1507 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1508 memset(&node_guid, 0, sizeof(node_guid));
1509 memset(&port_guid, 0, sizeof(port_guid));
1510
1511 vf_mac.vf =
1512 vf_vlan.vf =
1513 vf_vlan_info.vf =
1514 vf_rate.vf =
1515 vf_tx_rate.vf =
1516 vf_spoofchk.vf =
1517 vf_linkstate.vf =
1518 vf_rss_query_en.vf =
1519 vf_trust.vf =
1520 node_guid.vf =
1521 port_guid.vf = ivi.vf;
1522
1523 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1524 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1525 vf_vlan.vlan = ivi.vlan;
1526 vf_vlan.qos = ivi.qos;
1527 vf_vlan_info.vlan = ivi.vlan;
1528 vf_vlan_info.qos = ivi.qos;
1529 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1530 vf_tx_rate.rate = ivi.max_tx_rate;
1531 vf_rate.min_tx_rate = ivi.min_tx_rate;
1532 vf_rate.max_tx_rate = ivi.max_tx_rate;
1533 vf_spoofchk.setting = ivi.spoofchk;
1534 vf_linkstate.link_state = ivi.linkstate;
1535 vf_rss_query_en.setting = ivi.rss_query_en;
1536 vf_trust.setting = ivi.trusted;
1537 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1538 if (!vf)
1539 return -EMSGSIZE;
1540 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1541 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1542 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1543 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1544 &vf_rate) ||
1545 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1546 &vf_tx_rate) ||
1547 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1548 &vf_spoofchk) ||
1549 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1550 &vf_linkstate) ||
1551 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1552 sizeof(vf_rss_query_en),
1553 &vf_rss_query_en) ||
1554 nla_put(skb, IFLA_VF_TRUST,
1555 sizeof(vf_trust), &vf_trust))
1556 goto nla_put_vf_failure;
1557
1558 if (dev->netdev_ops->ndo_get_vf_guid &&
1559 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1560 &port_guid)) {
1561 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1562 &node_guid) ||
1563 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1564 &port_guid))
1565 goto nla_put_vf_failure;
1566 }
1567 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1568 if (!vfvlanlist)
1569 goto nla_put_vf_failure;
1570 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1571 &vf_vlan_info)) {
1572 nla_nest_cancel(skb, vfvlanlist);
1573 goto nla_put_vf_failure;
1574 }
1575 nla_nest_end(skb, vfvlanlist);
1576 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1577 memset(&vf_stats, 0, sizeof(vf_stats));
1578 if (dev->netdev_ops->ndo_get_vf_stats)
1579 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1580 &vf_stats);
1581 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1582 if (!vfstats)
1583 goto nla_put_vf_failure;
1584 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1585 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1586 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1587 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1588 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1589 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1590 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1591 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1592 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1593 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1594 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1595 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1596 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1597 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1598 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1599 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1600 nla_nest_cancel(skb, vfstats);
1601 goto nla_put_vf_failure;
1602 }
1603 nla_nest_end(skb, vfstats);
1604 }
1605 nla_nest_end(skb, vf);
1606 return 0;
1607
1608nla_put_vf_failure:
1609 nla_nest_cancel(skb, vf);
1610 return -EMSGSIZE;
1611}
1612
1613static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1614 struct net_device *dev,
1615 u32 ext_filter_mask)
1616{
1617 struct nlattr *vfinfo;
1618 int i, num_vfs;
1619
1620 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1621 return 0;
1622
1623 num_vfs = dev_num_vf(dev->dev.parent);
1624 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1625 return -EMSGSIZE;
1626
1627 if (!dev->netdev_ops->ndo_get_vf_config)
1628 return 0;
1629
1630 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1631 if (!vfinfo)
1632 return -EMSGSIZE;
1633
1634 for (i = 0; i < num_vfs; i++) {
1635 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
1636 nla_nest_cancel(skb, vfinfo);
1637 return -EMSGSIZE;
1638 }
1639 }
1640
1641 nla_nest_end(skb, vfinfo);
1642 return 0;
1643}
1644
1645static int rtnl_fill_link_ifmap(struct sk_buff *skb,
1646 const struct net_device *dev)
1647{
1648 struct rtnl_link_ifmap map;
1649
1650 memset(&map, 0, sizeof(map));
1651 map.mem_start = READ_ONCE(dev->mem_start);
1652 map.mem_end = READ_ONCE(dev->mem_end);
1653 map.base_addr = READ_ONCE(dev->base_addr);
1654 map.irq = READ_ONCE(dev->irq);
1655 map.dma = READ_ONCE(dev->dma);
1656 map.port = READ_ONCE(dev->if_port);
1657
1658 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1659 return -EMSGSIZE;
1660
1661 return 0;
1662}
1663
1664static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1665{
1666 const struct bpf_prog *generic_xdp_prog;
1667 u32 res = 0;
1668
1669 rcu_read_lock();
1670 generic_xdp_prog = rcu_dereference(dev->xdp_prog);
1671 if (generic_xdp_prog)
1672 res = generic_xdp_prog->aux->id;
1673 rcu_read_unlock();
1674
1675 return res;
1676}
1677
1678static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1679{
1680 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1681}
1682
1683static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1684{
1685 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1686}
1687
1688static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1689 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1690 u32 (*get_prog_id)(struct net_device *dev))
1691{
1692 u32 curr_id;
1693 int err;
1694
1695 curr_id = get_prog_id(dev);
1696 if (!curr_id)
1697 return 0;
1698
1699 *prog_id = curr_id;
1700 err = nla_put_u32(skb, attr, curr_id);
1701 if (err)
1702 return err;
1703
1704 if (*mode != XDP_ATTACHED_NONE)
1705 *mode = XDP_ATTACHED_MULTI;
1706 else
1707 *mode = tgt_mode;
1708
1709 return 0;
1710}
1711
1712static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1713{
1714 struct nlattr *xdp;
1715 u32 prog_id;
1716 int err;
1717 u8 mode;
1718
1719 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1720 if (!xdp)
1721 return -EMSGSIZE;
1722
1723 prog_id = 0;
1724 mode = XDP_ATTACHED_NONE;
1725 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1726 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1727 if (err)
1728 goto err_cancel;
1729 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1730 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1731 if (err)
1732 goto err_cancel;
1733 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1734 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1735 if (err)
1736 goto err_cancel;
1737
1738 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1739 if (err)
1740 goto err_cancel;
1741
1742 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1743 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1744 if (err)
1745 goto err_cancel;
1746 }
1747
1748 nla_nest_end(skb, xdp);
1749 return 0;
1750
1751err_cancel:
1752 nla_nest_cancel(skb, xdp);
1753 return err;
1754}
1755
1756static u32 rtnl_get_event(unsigned long event)
1757{
1758 u32 rtnl_event_type = IFLA_EVENT_NONE;
1759
1760 switch (event) {
1761 case NETDEV_REBOOT:
1762 rtnl_event_type = IFLA_EVENT_REBOOT;
1763 break;
1764 case NETDEV_FEAT_CHANGE:
1765 rtnl_event_type = IFLA_EVENT_FEATURES;
1766 break;
1767 case NETDEV_BONDING_FAILOVER:
1768 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1769 break;
1770 case NETDEV_NOTIFY_PEERS:
1771 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1772 break;
1773 case NETDEV_RESEND_IGMP:
1774 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1775 break;
1776 case NETDEV_CHANGEINFODATA:
1777 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1778 break;
1779 default:
1780 break;
1781 }
1782
1783 return rtnl_event_type;
1784}
1785
1786static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1787{
1788 const struct net_device *upper_dev;
1789 int ret = 0;
1790
1791 rcu_read_lock();
1792
1793 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1794 if (upper_dev)
1795 ret = nla_put_u32(skb, IFLA_MASTER,
1796 READ_ONCE(upper_dev->ifindex));
1797
1798 rcu_read_unlock();
1799 return ret;
1800}
1801
1802static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1803 bool force)
1804{
1805 int iflink = dev_get_iflink(dev);
1806
1807 if (force || READ_ONCE(dev->ifindex) != iflink)
1808 return nla_put_u32(skb, IFLA_LINK, iflink);
1809
1810 return 0;
1811}
1812
1813static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1814 struct net_device *dev)
1815{
1816 char buf[IFALIASZ];
1817 int ret;
1818
1819 ret = dev_get_alias(dev, buf, sizeof(buf));
1820 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1821}
1822
1823static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1824 const struct net_device *dev,
1825 struct net *src_net, gfp_t gfp)
1826{
1827 bool put_iflink = false;
1828
1829 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1830 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1831
1832 if (!net_eq(dev_net(dev), link_net)) {
1833 int id = peernet2id_alloc(src_net, link_net, gfp);
1834
1835 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1836 return -EMSGSIZE;
1837
1838 put_iflink = true;
1839 }
1840 }
1841
1842 return nla_put_iflink(skb, dev, put_iflink);
1843}
1844
1845static int rtnl_fill_link_af(struct sk_buff *skb,
1846 const struct net_device *dev,
1847 u32 ext_filter_mask)
1848{
1849 const struct rtnl_af_ops *af_ops;
1850 struct nlattr *af_spec;
1851
1852 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1853 if (!af_spec)
1854 return -EMSGSIZE;
1855
1856 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1857 struct nlattr *af;
1858 int err;
1859
1860 if (!af_ops->fill_link_af)
1861 continue;
1862
1863 af = nla_nest_start_noflag(skb, af_ops->family);
1864 if (!af)
1865 return -EMSGSIZE;
1866
1867 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1868 /*
1869 * Caller may return ENODATA to indicate that there
1870 * was no data to be dumped. This is not an error, it
1871 * means we should trim the attribute header and
1872 * continue.
1873 */
1874 if (err == -ENODATA)
1875 nla_nest_cancel(skb, af);
1876 else if (err < 0)
1877 return -EMSGSIZE;
1878
1879 nla_nest_end(skb, af);
1880 }
1881
1882 nla_nest_end(skb, af_spec);
1883 return 0;
1884}
1885
1886static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1887 const struct net_device *dev)
1888{
1889 struct netdev_name_node *name_node;
1890 int count = 0;
1891
1892 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
1893 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1894 return -EMSGSIZE;
1895 count++;
1896 }
1897 return count;
1898}
1899
1900/* RCU protected. */
1901static int rtnl_fill_prop_list(struct sk_buff *skb,
1902 const struct net_device *dev)
1903{
1904 struct nlattr *prop_list;
1905 int ret;
1906
1907 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1908 if (!prop_list)
1909 return -EMSGSIZE;
1910
1911 ret = rtnl_fill_alt_ifnames(skb, dev);
1912 if (ret <= 0)
1913 goto nest_cancel;
1914
1915 nla_nest_end(skb, prop_list);
1916 return 0;
1917
1918nest_cancel:
1919 nla_nest_cancel(skb, prop_list);
1920 return ret;
1921}
1922
1923static int rtnl_fill_proto_down(struct sk_buff *skb,
1924 const struct net_device *dev)
1925{
1926 struct nlattr *pr;
1927 u32 preason;
1928
1929 if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down)))
1930 goto nla_put_failure;
1931
1932 preason = READ_ONCE(dev->proto_down_reason);
1933 if (!preason)
1934 return 0;
1935
1936 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1937 if (!pr)
1938 return -EMSGSIZE;
1939
1940 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1941 nla_nest_cancel(skb, pr);
1942 goto nla_put_failure;
1943 }
1944
1945 nla_nest_end(skb, pr);
1946 return 0;
1947
1948nla_put_failure:
1949 return -EMSGSIZE;
1950}
1951
1952static int rtnl_fill_devlink_port(struct sk_buff *skb,
1953 const struct net_device *dev)
1954{
1955 struct nlattr *devlink_port_nest;
1956 int ret;
1957
1958 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1959 if (!devlink_port_nest)
1960 return -EMSGSIZE;
1961
1962 if (dev->devlink_port) {
1963 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1964 if (ret < 0)
1965 goto nest_cancel;
1966 }
1967
1968 nla_nest_end(skb, devlink_port_nest);
1969 return 0;
1970
1971nest_cancel:
1972 nla_nest_cancel(skb, devlink_port_nest);
1973 return ret;
1974}
1975
1976static int rtnl_fill_dpll_pin(struct sk_buff *skb,
1977 const struct net_device *dev)
1978{
1979 struct nlattr *dpll_pin_nest;
1980 int ret;
1981
1982 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
1983 if (!dpll_pin_nest)
1984 return -EMSGSIZE;
1985
1986 ret = dpll_netdev_add_pin_handle(skb, dev);
1987 if (ret < 0)
1988 goto nest_cancel;
1989
1990 nla_nest_end(skb, dpll_pin_nest);
1991 return 0;
1992
1993nest_cancel:
1994 nla_nest_cancel(skb, dpll_pin_nest);
1995 return ret;
1996}
1997
1998static int rtnl_fill_ifinfo(struct sk_buff *skb,
1999 struct net_device *dev, struct net *src_net,
2000 int type, u32 pid, u32 seq, u32 change,
2001 unsigned int flags, u32 ext_filter_mask,
2002 u32 event, int *new_nsid, int new_ifindex,
2003 int tgt_netnsid, gfp_t gfp)
2004{
2005 char devname[IFNAMSIZ];
2006 struct ifinfomsg *ifm;
2007 struct nlmsghdr *nlh;
2008 struct Qdisc *qdisc;
2009
2010 ASSERT_RTNL();
2011 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
2012 if (nlh == NULL)
2013 return -EMSGSIZE;
2014
2015 ifm = nlmsg_data(nlh);
2016 ifm->ifi_family = AF_UNSPEC;
2017 ifm->__ifi_pad = 0;
2018 ifm->ifi_type = READ_ONCE(dev->type);
2019 ifm->ifi_index = READ_ONCE(dev->ifindex);
2020 ifm->ifi_flags = dev_get_flags(dev);
2021 ifm->ifi_change = change;
2022
2023 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
2024 goto nla_put_failure;
2025
2026 netdev_copy_name(dev, devname);
2027 if (nla_put_string(skb, IFLA_IFNAME, devname))
2028 goto nla_put_failure;
2029
2030 if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) ||
2031 nla_put_u8(skb, IFLA_OPERSTATE,
2032 netif_running(dev) ? READ_ONCE(dev->operstate) :
2033 IF_OPER_DOWN) ||
2034 nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) ||
2035 nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
2036 nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) ||
2037 nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) ||
2038 nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) ||
2039 nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) ||
2040 nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) ||
2041 nla_put_u32(skb, IFLA_NUM_TX_QUEUES,
2042 READ_ONCE(dev->num_tx_queues)) ||
2043 nla_put_u32(skb, IFLA_GSO_MAX_SEGS,
2044 READ_ONCE(dev->gso_max_segs)) ||
2045 nla_put_u32(skb, IFLA_GSO_MAX_SIZE,
2046 READ_ONCE(dev->gso_max_size)) ||
2047 nla_put_u32(skb, IFLA_GRO_MAX_SIZE,
2048 READ_ONCE(dev->gro_max_size)) ||
2049 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE,
2050 READ_ONCE(dev->gso_ipv4_max_size)) ||
2051 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE,
2052 READ_ONCE(dev->gro_ipv4_max_size)) ||
2053 nla_put_u32(skb, IFLA_TSO_MAX_SIZE,
2054 READ_ONCE(dev->tso_max_size)) ||
2055 nla_put_u32(skb, IFLA_TSO_MAX_SEGS,
2056 READ_ONCE(dev->tso_max_segs)) ||
2057 nla_put_uint(skb, IFLA_MAX_PACING_OFFLOAD_HORIZON,
2058 READ_ONCE(dev->max_pacing_offload_horizon)) ||
2059#ifdef CONFIG_RPS
2060 nla_put_u32(skb, IFLA_NUM_RX_QUEUES,
2061 READ_ONCE(dev->num_rx_queues)) ||
2062#endif
2063 put_master_ifindex(skb, dev) ||
2064 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
2065 nla_put_ifalias(skb, dev) ||
2066 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
2067 atomic_read(&dev->carrier_up_count) +
2068 atomic_read(&dev->carrier_down_count)) ||
2069 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
2070 atomic_read(&dev->carrier_up_count)) ||
2071 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
2072 atomic_read(&dev->carrier_down_count)))
2073 goto nla_put_failure;
2074
2075 if (rtnl_fill_proto_down(skb, dev))
2076 goto nla_put_failure;
2077
2078 if (event != IFLA_EVENT_NONE) {
2079 if (nla_put_u32(skb, IFLA_EVENT, event))
2080 goto nla_put_failure;
2081 }
2082
2083 if (dev->addr_len) {
2084 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
2085 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
2086 goto nla_put_failure;
2087 }
2088
2089 if (rtnl_phys_port_id_fill(skb, dev))
2090 goto nla_put_failure;
2091
2092 if (rtnl_phys_port_name_fill(skb, dev))
2093 goto nla_put_failure;
2094
2095 if (rtnl_phys_switch_id_fill(skb, dev))
2096 goto nla_put_failure;
2097
2098 if (rtnl_fill_stats(skb, dev))
2099 goto nla_put_failure;
2100
2101 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
2102 goto nla_put_failure;
2103
2104 if (rtnl_port_fill(skb, dev, ext_filter_mask))
2105 goto nla_put_failure;
2106
2107 if (rtnl_xdp_fill(skb, dev))
2108 goto nla_put_failure;
2109
2110 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
2111 if (rtnl_link_fill(skb, dev) < 0)
2112 goto nla_put_failure;
2113 }
2114
2115 if (new_nsid &&
2116 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
2117 goto nla_put_failure;
2118 if (new_ifindex &&
2119 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
2120 goto nla_put_failure;
2121
2122 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
2123 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
2124 goto nla_put_failure;
2125
2126 rcu_read_lock();
2127 if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC))
2128 goto nla_put_failure_rcu;
2129 qdisc = rcu_dereference(dev->qdisc);
2130 if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id))
2131 goto nla_put_failure_rcu;
2132 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
2133 goto nla_put_failure_rcu;
2134 if (rtnl_fill_link_ifmap(skb, dev))
2135 goto nla_put_failure_rcu;
2136 if (rtnl_fill_prop_list(skb, dev))
2137 goto nla_put_failure_rcu;
2138 rcu_read_unlock();
2139
2140 if (dev->dev.parent &&
2141 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
2142 dev_name(dev->dev.parent)))
2143 goto nla_put_failure;
2144
2145 if (dev->dev.parent && dev->dev.parent->bus &&
2146 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
2147 dev->dev.parent->bus->name))
2148 goto nla_put_failure;
2149
2150 if (rtnl_fill_devlink_port(skb, dev))
2151 goto nla_put_failure;
2152
2153 if (rtnl_fill_dpll_pin(skb, dev))
2154 goto nla_put_failure;
2155
2156 nlmsg_end(skb, nlh);
2157 return 0;
2158
2159nla_put_failure_rcu:
2160 rcu_read_unlock();
2161nla_put_failure:
2162 nlmsg_cancel(skb, nlh);
2163 return -EMSGSIZE;
2164}
2165
2166static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
2167 [IFLA_UNSPEC] = { .strict_start_type = IFLA_DPLL_PIN },
2168 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
2169 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
2170 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
2171 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
2172 [IFLA_MTU] = { .type = NLA_U32 },
2173 [IFLA_LINK] = { .type = NLA_U32 },
2174 [IFLA_MASTER] = { .type = NLA_U32 },
2175 [IFLA_CARRIER] = { .type = NLA_U8 },
2176 [IFLA_TXQLEN] = { .type = NLA_U32 },
2177 [IFLA_WEIGHT] = { .type = NLA_U32 },
2178 [IFLA_OPERSTATE] = { .type = NLA_U8 },
2179 [IFLA_LINKMODE] = { .type = NLA_U8 },
2180 [IFLA_LINKINFO] = { .type = NLA_NESTED },
2181 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
2182 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
2183 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
2184 * allow 0-length string (needed to remove an alias).
2185 */
2186 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
2187 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
2188 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
2189 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
2190 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
2191 [IFLA_EXT_MASK] = { .type = NLA_U32 },
2192 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
2193 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
2194 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
2195 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
2196 [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
2197 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2198 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
2199 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
2200 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
2201 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
2202 [IFLA_XDP] = { .type = NLA_NESTED },
2203 [IFLA_EVENT] = { .type = NLA_U32 },
2204 [IFLA_GROUP] = { .type = NLA_U32 },
2205 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
2206 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
2207 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
2208 [IFLA_MIN_MTU] = { .type = NLA_U32 },
2209 [IFLA_MAX_MTU] = { .type = NLA_U32 },
2210 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
2211 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
2212 .len = ALTIFNAMSIZ - 1 },
2213 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
2214 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
2215 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
2216 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
2217 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
2218 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
2219 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
2220 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
2221 [IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
2222 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
2223};
2224
2225static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
2226 [IFLA_INFO_KIND] = { .type = NLA_STRING },
2227 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
2228 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
2229 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
2230};
2231
2232static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
2233 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
2234 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
2235 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
2236 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
2237 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
2238 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
2239 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
2240 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
2241 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
2242 [IFLA_VF_STATS] = { .type = NLA_NESTED },
2243 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
2244 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2245 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2246};
2247
2248static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2249 [IFLA_PORT_VF] = { .type = NLA_U32 },
2250 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
2251 .len = PORT_PROFILE_MAX },
2252 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2253 .len = PORT_UUID_MAX },
2254 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2255 .len = PORT_UUID_MAX },
2256 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2257 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
2258
2259 /* Unused, but we need to keep it here since user space could
2260 * fill it. It's also broken with regard to NLA_BINARY use in
2261 * combination with structs.
2262 */
2263 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2264 .len = sizeof(struct ifla_port_vsi) },
2265};
2266
2267static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2268 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2269 [IFLA_XDP_FD] = { .type = NLA_S32 },
2270 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
2271 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
2272 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
2273 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
2274};
2275
2276static struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla,
2277 int *ops_srcu_index)
2278{
2279 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2280 struct rtnl_link_ops *ops = NULL;
2281
2282 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2283 return NULL;
2284
2285 if (linfo[IFLA_INFO_KIND]) {
2286 char kind[MODULE_NAME_LEN];
2287
2288 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2289 ops = rtnl_link_ops_get(kind, ops_srcu_index);
2290 }
2291
2292 return ops;
2293}
2294
2295static bool link_master_filtered(struct net_device *dev, int master_idx)
2296{
2297 struct net_device *master;
2298
2299 if (!master_idx)
2300 return false;
2301
2302 master = netdev_master_upper_dev_get(dev);
2303
2304 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2305 * another invalid value for ifindex to denote "no master".
2306 */
2307 if (master_idx == -1)
2308 return !!master;
2309
2310 if (!master || master->ifindex != master_idx)
2311 return true;
2312
2313 return false;
2314}
2315
2316static bool link_kind_filtered(const struct net_device *dev,
2317 const struct rtnl_link_ops *kind_ops)
2318{
2319 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2320 return true;
2321
2322 return false;
2323}
2324
2325static bool link_dump_filtered(struct net_device *dev,
2326 int master_idx,
2327 const struct rtnl_link_ops *kind_ops)
2328{
2329 if (link_master_filtered(dev, master_idx) ||
2330 link_kind_filtered(dev, kind_ops))
2331 return true;
2332
2333 return false;
2334}
2335
2336/**
2337 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2338 * @sk: netlink socket
2339 * @netnsid: network namespace identifier
2340 *
2341 * Returns the network namespace identified by netnsid on success or an error
2342 * pointer on failure.
2343 */
2344struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2345{
2346 struct net *net;
2347
2348 net = get_net_ns_by_id(sock_net(sk), netnsid);
2349 if (!net)
2350 return ERR_PTR(-EINVAL);
2351
2352 /* For now, the caller is required to have CAP_NET_ADMIN in
2353 * the user namespace owning the target net ns.
2354 */
2355 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2356 put_net(net);
2357 return ERR_PTR(-EACCES);
2358 }
2359 return net;
2360}
2361EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2362
2363static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2364 bool strict_check, struct nlattr **tb,
2365 struct netlink_ext_ack *extack)
2366{
2367 int hdrlen;
2368
2369 if (strict_check) {
2370 struct ifinfomsg *ifm;
2371
2372 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2373 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2374 return -EINVAL;
2375 }
2376
2377 ifm = nlmsg_data(nlh);
2378 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2379 ifm->ifi_change) {
2380 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2381 return -EINVAL;
2382 }
2383 if (ifm->ifi_index) {
2384 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2385 return -EINVAL;
2386 }
2387
2388 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2389 IFLA_MAX, ifla_policy,
2390 extack);
2391 }
2392
2393 /* A hack to preserve kernel<->userspace interface.
2394 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2395 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2396 * what iproute2 < v3.9.0 used.
2397 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2398 * attribute, its netlink message is shorter than struct ifinfomsg.
2399 */
2400 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2401 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2402
2403 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2404 extack);
2405}
2406
2407static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2408{
2409 struct netlink_ext_ack *extack = cb->extack;
2410 struct rtnl_link_ops *kind_ops = NULL;
2411 const struct nlmsghdr *nlh = cb->nlh;
2412 struct net *net = sock_net(skb->sk);
2413 unsigned int flags = NLM_F_MULTI;
2414 struct nlattr *tb[IFLA_MAX+1];
2415 struct {
2416 unsigned long ifindex;
2417 } *ctx = (void *)cb->ctx;
2418 struct net *tgt_net = net;
2419 u32 ext_filter_mask = 0;
2420 struct net_device *dev;
2421 int ops_srcu_index;
2422 int master_idx = 0;
2423 int netnsid = -1;
2424 int err, i;
2425
2426 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2427 if (err < 0) {
2428 if (cb->strict_check)
2429 return err;
2430
2431 goto walk_entries;
2432 }
2433
2434 for (i = 0; i <= IFLA_MAX; ++i) {
2435 if (!tb[i])
2436 continue;
2437
2438 /* new attributes should only be added with strict checking */
2439 switch (i) {
2440 case IFLA_TARGET_NETNSID:
2441 netnsid = nla_get_s32(tb[i]);
2442 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2443 if (IS_ERR(tgt_net)) {
2444 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2445 err = PTR_ERR(tgt_net);
2446 netnsid = -1;
2447 goto out;
2448 }
2449 break;
2450 case IFLA_EXT_MASK:
2451 ext_filter_mask = nla_get_u32(tb[i]);
2452 break;
2453 case IFLA_MASTER:
2454 master_idx = nla_get_u32(tb[i]);
2455 break;
2456 case IFLA_LINKINFO:
2457 kind_ops = linkinfo_to_kind_ops(tb[i], &ops_srcu_index);
2458 break;
2459 default:
2460 if (cb->strict_check) {
2461 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2462 err = -EINVAL;
2463 goto out;
2464 }
2465 }
2466 }
2467
2468 if (master_idx || kind_ops)
2469 flags |= NLM_F_DUMP_FILTERED;
2470
2471walk_entries:
2472 err = 0;
2473 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
2474 if (link_dump_filtered(dev, master_idx, kind_ops))
2475 continue;
2476 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
2477 NETLINK_CB(cb->skb).portid,
2478 nlh->nlmsg_seq, 0, flags,
2479 ext_filter_mask, 0, NULL, 0,
2480 netnsid, GFP_KERNEL);
2481 if (err < 0)
2482 break;
2483 }
2484
2485
2486 cb->seq = tgt_net->dev_base_seq;
2487 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2488
2489out:
2490
2491 if (kind_ops)
2492 rtnl_link_ops_put(kind_ops, ops_srcu_index);
2493 if (netnsid >= 0)
2494 put_net(tgt_net);
2495
2496 return err;
2497}
2498
2499int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2500 struct netlink_ext_ack *exterr)
2501{
2502 const struct ifinfomsg *ifmp;
2503 const struct nlattr *attrs;
2504 size_t len;
2505
2506 ifmp = nla_data(nla_peer);
2507 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2508 len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2509
2510 if (ifmp->ifi_index < 0) {
2511 NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2512 "ifindex can't be negative");
2513 return -EINVAL;
2514 }
2515
2516 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
2517 exterr);
2518}
2519EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
2520
2521static struct net *rtnl_link_get_net_ifla(struct nlattr *tb[])
2522{
2523 struct net *net = NULL;
2524
2525 /* Examine the link attributes and figure out which
2526 * network namespace we are talking about.
2527 */
2528 if (tb[IFLA_NET_NS_PID])
2529 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2530 else if (tb[IFLA_NET_NS_FD])
2531 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2532
2533 return net;
2534}
2535
2536struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2537{
2538 struct net *net = rtnl_link_get_net_ifla(tb);
2539
2540 if (!net)
2541 net = get_net(src_net);
2542
2543 return net;
2544}
2545EXPORT_SYMBOL(rtnl_link_get_net);
2546
2547/* Figure out which network namespace we are talking about by
2548 * examining the link attributes in the following order:
2549 *
2550 * 1. IFLA_NET_NS_PID
2551 * 2. IFLA_NET_NS_FD
2552 * 3. IFLA_TARGET_NETNSID
2553 */
2554static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2555 struct nlattr *tb[])
2556{
2557 struct net *net;
2558
2559 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2560 return rtnl_link_get_net(src_net, tb);
2561
2562 if (!tb[IFLA_TARGET_NETNSID])
2563 return get_net(src_net);
2564
2565 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2566 if (!net)
2567 return ERR_PTR(-EINVAL);
2568
2569 return net;
2570}
2571
2572static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2573 struct net *src_net,
2574 struct nlattr *tb[], int cap)
2575{
2576 struct net *net;
2577
2578 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2579 if (IS_ERR(net))
2580 return net;
2581
2582 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2583 put_net(net);
2584 return ERR_PTR(-EPERM);
2585 }
2586
2587 return net;
2588}
2589
2590/* Verify that rtnetlink requests do not pass additional properties
2591 * potentially referring to different network namespaces.
2592 */
2593static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2594 struct netlink_ext_ack *extack,
2595 bool netns_id_only)
2596{
2597
2598 if (netns_id_only) {
2599 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2600 return 0;
2601
2602 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2603 return -EOPNOTSUPP;
2604 }
2605
2606 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2607 goto invalid_attr;
2608
2609 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2610 goto invalid_attr;
2611
2612 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2613 goto invalid_attr;
2614
2615 return 0;
2616
2617invalid_attr:
2618 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2619 return -EINVAL;
2620}
2621
2622static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2623 int max_tx_rate)
2624{
2625 const struct net_device_ops *ops = dev->netdev_ops;
2626
2627 if (!ops->ndo_set_vf_rate)
2628 return -EOPNOTSUPP;
2629 if (max_tx_rate && max_tx_rate < min_tx_rate)
2630 return -EINVAL;
2631
2632 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2633}
2634
2635static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2636 struct netlink_ext_ack *extack)
2637{
2638 if (tb[IFLA_ADDRESS] &&
2639 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2640 return -EINVAL;
2641
2642 if (tb[IFLA_BROADCAST] &&
2643 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2644 return -EINVAL;
2645
2646 if (tb[IFLA_GSO_MAX_SIZE] &&
2647 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
2648 NL_SET_ERR_MSG(extack, "too big gso_max_size");
2649 return -EINVAL;
2650 }
2651
2652 if (tb[IFLA_GSO_MAX_SEGS] &&
2653 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
2654 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
2655 NL_SET_ERR_MSG(extack, "too big gso_max_segs");
2656 return -EINVAL;
2657 }
2658
2659 if (tb[IFLA_GRO_MAX_SIZE] &&
2660 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
2661 NL_SET_ERR_MSG(extack, "too big gro_max_size");
2662 return -EINVAL;
2663 }
2664
2665 if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
2666 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
2667 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
2668 return -EINVAL;
2669 }
2670
2671 if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
2672 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
2673 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
2674 return -EINVAL;
2675 }
2676
2677 if (tb[IFLA_AF_SPEC]) {
2678 struct nlattr *af;
2679 int rem, err;
2680
2681 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2682 struct rtnl_af_ops *af_ops;
2683 int af_ops_srcu_index;
2684
2685 af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index);
2686 if (!af_ops)
2687 return -EAFNOSUPPORT;
2688
2689 if (!af_ops->set_link_af)
2690 err = -EOPNOTSUPP;
2691 else if (af_ops->validate_link_af)
2692 err = af_ops->validate_link_af(dev, af, extack);
2693 else
2694 err = 0;
2695
2696 rtnl_af_put(af_ops, af_ops_srcu_index);
2697
2698 if (err < 0)
2699 return err;
2700 }
2701 }
2702
2703 return 0;
2704}
2705
2706static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2707 int guid_type)
2708{
2709 const struct net_device_ops *ops = dev->netdev_ops;
2710
2711 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2712}
2713
2714static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2715{
2716 if (dev->type != ARPHRD_INFINIBAND)
2717 return -EOPNOTSUPP;
2718
2719 return handle_infiniband_guid(dev, ivt, guid_type);
2720}
2721
2722static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2723{
2724 const struct net_device_ops *ops = dev->netdev_ops;
2725 int err = -EINVAL;
2726
2727 if (tb[IFLA_VF_MAC]) {
2728 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2729
2730 if (ivm->vf >= INT_MAX)
2731 return -EINVAL;
2732 err = -EOPNOTSUPP;
2733 if (ops->ndo_set_vf_mac)
2734 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2735 ivm->mac);
2736 if (err < 0)
2737 return err;
2738 }
2739
2740 if (tb[IFLA_VF_VLAN]) {
2741 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2742
2743 if (ivv->vf >= INT_MAX)
2744 return -EINVAL;
2745 err = -EOPNOTSUPP;
2746 if (ops->ndo_set_vf_vlan)
2747 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2748 ivv->qos,
2749 htons(ETH_P_8021Q));
2750 if (err < 0)
2751 return err;
2752 }
2753
2754 if (tb[IFLA_VF_VLAN_LIST]) {
2755 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2756 struct nlattr *attr;
2757 int rem, len = 0;
2758
2759 err = -EOPNOTSUPP;
2760 if (!ops->ndo_set_vf_vlan)
2761 return err;
2762
2763 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2764 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2765 nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
2766 return -EINVAL;
2767 }
2768 if (len >= MAX_VLAN_LIST_LEN)
2769 return -EOPNOTSUPP;
2770 ivvl[len] = nla_data(attr);
2771
2772 len++;
2773 }
2774 if (len == 0)
2775 return -EINVAL;
2776
2777 if (ivvl[0]->vf >= INT_MAX)
2778 return -EINVAL;
2779 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2780 ivvl[0]->qos, ivvl[0]->vlan_proto);
2781 if (err < 0)
2782 return err;
2783 }
2784
2785 if (tb[IFLA_VF_TX_RATE]) {
2786 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2787 struct ifla_vf_info ivf;
2788
2789 if (ivt->vf >= INT_MAX)
2790 return -EINVAL;
2791 err = -EOPNOTSUPP;
2792 if (ops->ndo_get_vf_config)
2793 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2794 if (err < 0)
2795 return err;
2796
2797 err = rtnl_set_vf_rate(dev, ivt->vf,
2798 ivf.min_tx_rate, ivt->rate);
2799 if (err < 0)
2800 return err;
2801 }
2802
2803 if (tb[IFLA_VF_RATE]) {
2804 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2805
2806 if (ivt->vf >= INT_MAX)
2807 return -EINVAL;
2808
2809 err = rtnl_set_vf_rate(dev, ivt->vf,
2810 ivt->min_tx_rate, ivt->max_tx_rate);
2811 if (err < 0)
2812 return err;
2813 }
2814
2815 if (tb[IFLA_VF_SPOOFCHK]) {
2816 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2817
2818 if (ivs->vf >= INT_MAX)
2819 return -EINVAL;
2820 err = -EOPNOTSUPP;
2821 if (ops->ndo_set_vf_spoofchk)
2822 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2823 ivs->setting);
2824 if (err < 0)
2825 return err;
2826 }
2827
2828 if (tb[IFLA_VF_LINK_STATE]) {
2829 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2830
2831 if (ivl->vf >= INT_MAX)
2832 return -EINVAL;
2833 err = -EOPNOTSUPP;
2834 if (ops->ndo_set_vf_link_state)
2835 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2836 ivl->link_state);
2837 if (err < 0)
2838 return err;
2839 }
2840
2841 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2842 struct ifla_vf_rss_query_en *ivrssq_en;
2843
2844 err = -EOPNOTSUPP;
2845 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2846 if (ivrssq_en->vf >= INT_MAX)
2847 return -EINVAL;
2848 if (ops->ndo_set_vf_rss_query_en)
2849 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2850 ivrssq_en->setting);
2851 if (err < 0)
2852 return err;
2853 }
2854
2855 if (tb[IFLA_VF_TRUST]) {
2856 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2857
2858 if (ivt->vf >= INT_MAX)
2859 return -EINVAL;
2860 err = -EOPNOTSUPP;
2861 if (ops->ndo_set_vf_trust)
2862 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2863 if (err < 0)
2864 return err;
2865 }
2866
2867 if (tb[IFLA_VF_IB_NODE_GUID]) {
2868 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2869
2870 if (ivt->vf >= INT_MAX)
2871 return -EINVAL;
2872 if (!ops->ndo_set_vf_guid)
2873 return -EOPNOTSUPP;
2874 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2875 }
2876
2877 if (tb[IFLA_VF_IB_PORT_GUID]) {
2878 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2879
2880 if (ivt->vf >= INT_MAX)
2881 return -EINVAL;
2882 if (!ops->ndo_set_vf_guid)
2883 return -EOPNOTSUPP;
2884
2885 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2886 }
2887
2888 return err;
2889}
2890
2891static int do_set_master(struct net_device *dev, int ifindex,
2892 struct netlink_ext_ack *extack)
2893{
2894 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2895 const struct net_device_ops *ops;
2896 int err;
2897
2898 if (upper_dev) {
2899 if (upper_dev->ifindex == ifindex)
2900 return 0;
2901 ops = upper_dev->netdev_ops;
2902 if (ops->ndo_del_slave) {
2903 err = ops->ndo_del_slave(upper_dev, dev);
2904 if (err)
2905 return err;
2906 } else {
2907 return -EOPNOTSUPP;
2908 }
2909 }
2910
2911 if (ifindex) {
2912 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2913 if (!upper_dev)
2914 return -EINVAL;
2915 ops = upper_dev->netdev_ops;
2916 if (ops->ndo_add_slave) {
2917 err = ops->ndo_add_slave(upper_dev, dev, extack);
2918 if (err)
2919 return err;
2920 } else {
2921 return -EOPNOTSUPP;
2922 }
2923 }
2924 return 0;
2925}
2926
2927static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2928 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2929 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2930};
2931
2932static int do_set_proto_down(struct net_device *dev,
2933 struct nlattr *nl_proto_down,
2934 struct nlattr *nl_proto_down_reason,
2935 struct netlink_ext_ack *extack)
2936{
2937 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2938 unsigned long mask = 0;
2939 u32 value;
2940 bool proto_down;
2941 int err;
2942
2943 if (!dev->change_proto_down) {
2944 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2945 return -EOPNOTSUPP;
2946 }
2947
2948 if (nl_proto_down_reason) {
2949 err = nla_parse_nested_deprecated(pdreason,
2950 IFLA_PROTO_DOWN_REASON_MAX,
2951 nl_proto_down_reason,
2952 ifla_proto_down_reason_policy,
2953 NULL);
2954 if (err < 0)
2955 return err;
2956
2957 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2958 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2959 return -EINVAL;
2960 }
2961
2962 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2963
2964 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2965 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2966
2967 dev_change_proto_down_reason(dev, mask, value);
2968 }
2969
2970 if (nl_proto_down) {
2971 proto_down = nla_get_u8(nl_proto_down);
2972
2973 /* Don't turn off protodown if there are active reasons */
2974 if (!proto_down && dev->proto_down_reason) {
2975 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2976 return -EBUSY;
2977 }
2978 err = dev_change_proto_down(dev,
2979 proto_down);
2980 if (err)
2981 return err;
2982 }
2983
2984 return 0;
2985}
2986
2987#define DO_SETLINK_MODIFIED 0x01
2988/* notify flag means notify + modified. */
2989#define DO_SETLINK_NOTIFY 0x03
2990static int do_setlink(const struct sk_buff *skb, struct net_device *dev,
2991 struct net *tgt_net, struct ifinfomsg *ifm,
2992 struct netlink_ext_ack *extack,
2993 struct nlattr **tb, int status)
2994{
2995 const struct net_device_ops *ops = dev->netdev_ops;
2996 char ifname[IFNAMSIZ];
2997 int err;
2998
2999 err = validate_linkmsg(dev, tb, extack);
3000 if (err < 0)
3001 goto errout;
3002
3003 if (tb[IFLA_IFNAME])
3004 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3005 else
3006 ifname[0] = '\0';
3007
3008 if (!net_eq(tgt_net, dev_net(dev))) {
3009 const char *pat = ifname[0] ? ifname : NULL;
3010 int new_ifindex;
3011
3012 new_ifindex = nla_get_s32_default(tb[IFLA_NEW_IFINDEX], 0);
3013
3014 err = __dev_change_net_namespace(dev, tgt_net, pat, new_ifindex);
3015 if (err)
3016 goto errout;
3017
3018 status |= DO_SETLINK_MODIFIED;
3019 }
3020
3021 if (tb[IFLA_MAP]) {
3022 struct rtnl_link_ifmap *u_map;
3023 struct ifmap k_map;
3024
3025 if (!ops->ndo_set_config) {
3026 err = -EOPNOTSUPP;
3027 goto errout;
3028 }
3029
3030 if (!netif_device_present(dev)) {
3031 err = -ENODEV;
3032 goto errout;
3033 }
3034
3035 u_map = nla_data(tb[IFLA_MAP]);
3036 k_map.mem_start = (unsigned long) u_map->mem_start;
3037 k_map.mem_end = (unsigned long) u_map->mem_end;
3038 k_map.base_addr = (unsigned short) u_map->base_addr;
3039 k_map.irq = (unsigned char) u_map->irq;
3040 k_map.dma = (unsigned char) u_map->dma;
3041 k_map.port = (unsigned char) u_map->port;
3042
3043 err = ops->ndo_set_config(dev, &k_map);
3044 if (err < 0)
3045 goto errout;
3046
3047 status |= DO_SETLINK_NOTIFY;
3048 }
3049
3050 if (tb[IFLA_ADDRESS]) {
3051 struct sockaddr *sa;
3052 int len;
3053
3054 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
3055 sizeof(*sa));
3056 sa = kmalloc(len, GFP_KERNEL);
3057 if (!sa) {
3058 err = -ENOMEM;
3059 goto errout;
3060 }
3061 sa->sa_family = dev->type;
3062 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
3063 dev->addr_len);
3064 err = dev_set_mac_address_user(dev, sa, extack);
3065 kfree(sa);
3066 if (err)
3067 goto errout;
3068 status |= DO_SETLINK_MODIFIED;
3069 }
3070
3071 if (tb[IFLA_MTU]) {
3072 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
3073 if (err < 0)
3074 goto errout;
3075 status |= DO_SETLINK_MODIFIED;
3076 }
3077
3078 if (tb[IFLA_GROUP]) {
3079 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3080 status |= DO_SETLINK_NOTIFY;
3081 }
3082
3083 /*
3084 * Interface selected by interface index but interface
3085 * name provided implies that a name change has been
3086 * requested.
3087 */
3088 if (ifm->ifi_index > 0 && ifname[0]) {
3089 err = dev_change_name(dev, ifname);
3090 if (err < 0)
3091 goto errout;
3092 status |= DO_SETLINK_MODIFIED;
3093 }
3094
3095 if (tb[IFLA_IFALIAS]) {
3096 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
3097 nla_len(tb[IFLA_IFALIAS]));
3098 if (err < 0)
3099 goto errout;
3100 status |= DO_SETLINK_NOTIFY;
3101 }
3102
3103 if (tb[IFLA_BROADCAST]) {
3104 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
3105 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3106 }
3107
3108 if (ifm->ifi_flags || ifm->ifi_change) {
3109 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3110 extack);
3111 if (err < 0)
3112 goto errout;
3113 }
3114
3115 if (tb[IFLA_MASTER]) {
3116 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3117 if (err)
3118 goto errout;
3119 status |= DO_SETLINK_MODIFIED;
3120 }
3121
3122 if (tb[IFLA_CARRIER]) {
3123 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
3124 if (err)
3125 goto errout;
3126 status |= DO_SETLINK_MODIFIED;
3127 }
3128
3129 if (tb[IFLA_TXQLEN]) {
3130 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
3131
3132 err = dev_change_tx_queue_len(dev, value);
3133 if (err)
3134 goto errout;
3135 status |= DO_SETLINK_MODIFIED;
3136 }
3137
3138 if (tb[IFLA_GSO_MAX_SIZE]) {
3139 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
3140
3141 if (dev->gso_max_size ^ max_size) {
3142 netif_set_gso_max_size(dev, max_size);
3143 status |= DO_SETLINK_MODIFIED;
3144 }
3145 }
3146
3147 if (tb[IFLA_GSO_MAX_SEGS]) {
3148 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
3149
3150 if (dev->gso_max_segs ^ max_segs) {
3151 netif_set_gso_max_segs(dev, max_segs);
3152 status |= DO_SETLINK_MODIFIED;
3153 }
3154 }
3155
3156 if (tb[IFLA_GRO_MAX_SIZE]) {
3157 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
3158
3159 if (dev->gro_max_size ^ gro_max_size) {
3160 netif_set_gro_max_size(dev, gro_max_size);
3161 status |= DO_SETLINK_MODIFIED;
3162 }
3163 }
3164
3165 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
3166 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
3167
3168 if (dev->gso_ipv4_max_size ^ max_size) {
3169 netif_set_gso_ipv4_max_size(dev, max_size);
3170 status |= DO_SETLINK_MODIFIED;
3171 }
3172 }
3173
3174 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
3175 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
3176
3177 if (dev->gro_ipv4_max_size ^ gro_max_size) {
3178 netif_set_gro_ipv4_max_size(dev, gro_max_size);
3179 status |= DO_SETLINK_MODIFIED;
3180 }
3181 }
3182
3183 if (tb[IFLA_OPERSTATE])
3184 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3185
3186 if (tb[IFLA_LINKMODE]) {
3187 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
3188
3189 if (dev->link_mode ^ value)
3190 status |= DO_SETLINK_NOTIFY;
3191 WRITE_ONCE(dev->link_mode, value);
3192 }
3193
3194 if (tb[IFLA_VFINFO_LIST]) {
3195 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
3196 struct nlattr *attr;
3197 int rem;
3198
3199 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
3200 if (nla_type(attr) != IFLA_VF_INFO ||
3201 nla_len(attr) < NLA_HDRLEN) {
3202 err = -EINVAL;
3203 goto errout;
3204 }
3205 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
3206 attr,
3207 ifla_vf_policy,
3208 NULL);
3209 if (err < 0)
3210 goto errout;
3211 err = do_setvfinfo(dev, vfinfo);
3212 if (err < 0)
3213 goto errout;
3214 status |= DO_SETLINK_NOTIFY;
3215 }
3216 }
3217 err = 0;
3218
3219 if (tb[IFLA_VF_PORTS]) {
3220 struct nlattr *port[IFLA_PORT_MAX+1];
3221 struct nlattr *attr;
3222 int vf;
3223 int rem;
3224
3225 err = -EOPNOTSUPP;
3226 if (!ops->ndo_set_vf_port)
3227 goto errout;
3228
3229 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
3230 if (nla_type(attr) != IFLA_VF_PORT ||
3231 nla_len(attr) < NLA_HDRLEN) {
3232 err = -EINVAL;
3233 goto errout;
3234 }
3235 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3236 attr,
3237 ifla_port_policy,
3238 NULL);
3239 if (err < 0)
3240 goto errout;
3241 if (!port[IFLA_PORT_VF]) {
3242 err = -EOPNOTSUPP;
3243 goto errout;
3244 }
3245 vf = nla_get_u32(port[IFLA_PORT_VF]);
3246 err = ops->ndo_set_vf_port(dev, vf, port);
3247 if (err < 0)
3248 goto errout;
3249 status |= DO_SETLINK_NOTIFY;
3250 }
3251 }
3252 err = 0;
3253
3254 if (tb[IFLA_PORT_SELF]) {
3255 struct nlattr *port[IFLA_PORT_MAX+1];
3256
3257 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3258 tb[IFLA_PORT_SELF],
3259 ifla_port_policy, NULL);
3260 if (err < 0)
3261 goto errout;
3262
3263 err = -EOPNOTSUPP;
3264 if (ops->ndo_set_vf_port)
3265 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3266 if (err < 0)
3267 goto errout;
3268 status |= DO_SETLINK_NOTIFY;
3269 }
3270
3271 if (tb[IFLA_AF_SPEC]) {
3272 struct nlattr *af;
3273 int rem;
3274
3275 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3276 struct rtnl_af_ops *af_ops;
3277 int af_ops_srcu_index;
3278
3279 af_ops = rtnl_af_lookup(nla_type(af), &af_ops_srcu_index);
3280 if (!af_ops) {
3281 err = -EAFNOSUPPORT;
3282 goto errout;
3283 }
3284
3285 err = af_ops->set_link_af(dev, af, extack);
3286 rtnl_af_put(af_ops, af_ops_srcu_index);
3287
3288 if (err < 0)
3289 goto errout;
3290
3291 status |= DO_SETLINK_NOTIFY;
3292 }
3293 }
3294 err = 0;
3295
3296 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3297 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3298 tb[IFLA_PROTO_DOWN_REASON], extack);
3299 if (err)
3300 goto errout;
3301 status |= DO_SETLINK_NOTIFY;
3302 }
3303
3304 if (tb[IFLA_XDP]) {
3305 struct nlattr *xdp[IFLA_XDP_MAX + 1];
3306 u32 xdp_flags = 0;
3307
3308 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3309 tb[IFLA_XDP],
3310 ifla_xdp_policy, NULL);
3311 if (err < 0)
3312 goto errout;
3313
3314 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3315 err = -EINVAL;
3316 goto errout;
3317 }
3318
3319 if (xdp[IFLA_XDP_FLAGS]) {
3320 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3321 if (xdp_flags & ~XDP_FLAGS_MASK) {
3322 err = -EINVAL;
3323 goto errout;
3324 }
3325 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3326 err = -EINVAL;
3327 goto errout;
3328 }
3329 }
3330
3331 if (xdp[IFLA_XDP_FD]) {
3332 int expected_fd = -1;
3333
3334 if (xdp_flags & XDP_FLAGS_REPLACE) {
3335 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3336 err = -EINVAL;
3337 goto errout;
3338 }
3339 expected_fd =
3340 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3341 }
3342
3343 err = dev_change_xdp_fd(dev, extack,
3344 nla_get_s32(xdp[IFLA_XDP_FD]),
3345 expected_fd,
3346 xdp_flags);
3347 if (err)
3348 goto errout;
3349 status |= DO_SETLINK_NOTIFY;
3350 }
3351 }
3352
3353errout:
3354 if (status & DO_SETLINK_MODIFIED) {
3355 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3356 netdev_state_change(dev);
3357
3358 if (err < 0)
3359 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3360 dev->name);
3361 }
3362
3363 return err;
3364}
3365
3366static struct net_device *rtnl_dev_get(struct net *net,
3367 struct nlattr *tb[])
3368{
3369 char ifname[ALTIFNAMSIZ];
3370
3371 if (tb[IFLA_IFNAME])
3372 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3373 else if (tb[IFLA_ALT_IFNAME])
3374 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3375 else
3376 return NULL;
3377
3378 return __dev_get_by_name(net, ifname);
3379}
3380
3381static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3382 struct netlink_ext_ack *extack)
3383{
3384 struct ifinfomsg *ifm = nlmsg_data(nlh);
3385 struct net *net = sock_net(skb->sk);
3386 struct nlattr *tb[IFLA_MAX+1];
3387 struct net_device *dev = NULL;
3388 struct rtnl_nets rtnl_nets;
3389 struct net *tgt_net;
3390 int err;
3391
3392 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3393 ifla_policy, extack);
3394 if (err < 0)
3395 goto errout;
3396
3397 err = rtnl_ensure_unique_netns(tb, extack, false);
3398 if (err < 0)
3399 goto errout;
3400
3401 tgt_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3402 if (IS_ERR(tgt_net)) {
3403 err = PTR_ERR(tgt_net);
3404 goto errout;
3405 }
3406
3407 rtnl_nets_init(&rtnl_nets);
3408 rtnl_nets_add(&rtnl_nets, get_net(net));
3409 rtnl_nets_add(&rtnl_nets, tgt_net);
3410
3411 rtnl_nets_lock(&rtnl_nets);
3412
3413 if (ifm->ifi_index > 0)
3414 dev = __dev_get_by_index(net, ifm->ifi_index);
3415 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3416 dev = rtnl_dev_get(net, tb);
3417 else
3418 err = -EINVAL;
3419
3420 if (dev)
3421 err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0);
3422 else if (!err)
3423 err = -ENODEV;
3424
3425 rtnl_nets_unlock(&rtnl_nets);
3426 rtnl_nets_destroy(&rtnl_nets);
3427errout:
3428 return err;
3429}
3430
3431static int rtnl_group_dellink(const struct net *net, int group)
3432{
3433 struct net_device *dev, *aux;
3434 LIST_HEAD(list_kill);
3435 bool found = false;
3436
3437 if (!group)
3438 return -EPERM;
3439
3440 for_each_netdev(net, dev) {
3441 if (dev->group == group) {
3442 const struct rtnl_link_ops *ops;
3443
3444 found = true;
3445 ops = dev->rtnl_link_ops;
3446 if (!ops || !ops->dellink)
3447 return -EOPNOTSUPP;
3448 }
3449 }
3450
3451 if (!found)
3452 return -ENODEV;
3453
3454 for_each_netdev_safe(net, dev, aux) {
3455 if (dev->group == group) {
3456 const struct rtnl_link_ops *ops;
3457
3458 ops = dev->rtnl_link_ops;
3459 ops->dellink(dev, &list_kill);
3460 }
3461 }
3462 unregister_netdevice_many(&list_kill);
3463
3464 return 0;
3465}
3466
3467int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3468{
3469 const struct rtnl_link_ops *ops;
3470 LIST_HEAD(list_kill);
3471
3472 ops = dev->rtnl_link_ops;
3473 if (!ops || !ops->dellink)
3474 return -EOPNOTSUPP;
3475
3476 ops->dellink(dev, &list_kill);
3477 unregister_netdevice_many_notify(&list_kill, portid, nlh);
3478
3479 return 0;
3480}
3481EXPORT_SYMBOL_GPL(rtnl_delete_link);
3482
3483static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3484 struct netlink_ext_ack *extack)
3485{
3486 struct ifinfomsg *ifm = nlmsg_data(nlh);
3487 struct net *net = sock_net(skb->sk);
3488 u32 portid = NETLINK_CB(skb).portid;
3489 struct nlattr *tb[IFLA_MAX+1];
3490 struct net_device *dev = NULL;
3491 struct net *tgt_net = net;
3492 int netnsid = -1;
3493 int err;
3494
3495 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3496 ifla_policy, extack);
3497 if (err < 0)
3498 return err;
3499
3500 err = rtnl_ensure_unique_netns(tb, extack, true);
3501 if (err < 0)
3502 return err;
3503
3504 if (tb[IFLA_TARGET_NETNSID]) {
3505 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3506 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3507 if (IS_ERR(tgt_net))
3508 return PTR_ERR(tgt_net);
3509 }
3510
3511 rtnl_net_lock(tgt_net);
3512
3513 if (ifm->ifi_index > 0)
3514 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3515 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3516 dev = rtnl_dev_get(tgt_net, tb);
3517
3518 if (dev)
3519 err = rtnl_delete_link(dev, portid, nlh);
3520 else if (ifm->ifi_index > 0 || tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3521 err = -ENODEV;
3522 else if (tb[IFLA_GROUP])
3523 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3524 else
3525 err = -EINVAL;
3526
3527 rtnl_net_unlock(tgt_net);
3528
3529 if (netnsid >= 0)
3530 put_net(tgt_net);
3531
3532 return err;
3533}
3534
3535int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3536 u32 portid, const struct nlmsghdr *nlh)
3537{
3538 unsigned int old_flags;
3539 int err;
3540
3541 old_flags = dev->flags;
3542 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3543 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3544 NULL);
3545 if (err < 0)
3546 return err;
3547 }
3548
3549 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3550 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3551 } else {
3552 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3553 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3554 }
3555 return 0;
3556}
3557EXPORT_SYMBOL(rtnl_configure_link);
3558
3559struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3560 unsigned char name_assign_type,
3561 const struct rtnl_link_ops *ops,
3562 struct nlattr *tb[],
3563 struct netlink_ext_ack *extack)
3564{
3565 struct net_device *dev;
3566 unsigned int num_tx_queues = 1;
3567 unsigned int num_rx_queues = 1;
3568 int err;
3569
3570 if (tb[IFLA_NUM_TX_QUEUES])
3571 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3572 else if (ops->get_num_tx_queues)
3573 num_tx_queues = ops->get_num_tx_queues();
3574
3575 if (tb[IFLA_NUM_RX_QUEUES])
3576 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3577 else if (ops->get_num_rx_queues)
3578 num_rx_queues = ops->get_num_rx_queues();
3579
3580 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3581 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3582 return ERR_PTR(-EINVAL);
3583 }
3584
3585 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3586 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3587 return ERR_PTR(-EINVAL);
3588 }
3589
3590 if (ops->alloc) {
3591 dev = ops->alloc(tb, ifname, name_assign_type,
3592 num_tx_queues, num_rx_queues);
3593 if (IS_ERR(dev))
3594 return dev;
3595 } else {
3596 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3597 name_assign_type, ops->setup,
3598 num_tx_queues, num_rx_queues);
3599 }
3600
3601 if (!dev)
3602 return ERR_PTR(-ENOMEM);
3603
3604 err = validate_linkmsg(dev, tb, extack);
3605 if (err < 0) {
3606 free_netdev(dev);
3607 return ERR_PTR(err);
3608 }
3609
3610 dev_net_set(dev, net);
3611 dev->rtnl_link_ops = ops;
3612 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3613
3614 if (tb[IFLA_MTU]) {
3615 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3616
3617 err = dev_validate_mtu(dev, mtu, extack);
3618 if (err) {
3619 free_netdev(dev);
3620 return ERR_PTR(err);
3621 }
3622 dev->mtu = mtu;
3623 }
3624 if (tb[IFLA_ADDRESS]) {
3625 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3626 nla_len(tb[IFLA_ADDRESS]));
3627 dev->addr_assign_type = NET_ADDR_SET;
3628 }
3629 if (tb[IFLA_BROADCAST])
3630 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3631 nla_len(tb[IFLA_BROADCAST]));
3632 if (tb[IFLA_TXQLEN])
3633 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3634 if (tb[IFLA_OPERSTATE])
3635 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3636 if (tb[IFLA_LINKMODE])
3637 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3638 if (tb[IFLA_GROUP])
3639 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3640 if (tb[IFLA_GSO_MAX_SIZE])
3641 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3642 if (tb[IFLA_GSO_MAX_SEGS])
3643 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3644 if (tb[IFLA_GRO_MAX_SIZE])
3645 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3646 if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3647 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3648 if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3649 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
3650
3651 return dev;
3652}
3653EXPORT_SYMBOL(rtnl_create_link);
3654
3655struct rtnl_newlink_tbs {
3656 struct nlattr *tb[IFLA_MAX + 1];
3657 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3658 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3659 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3660};
3661
3662static int rtnl_changelink(const struct sk_buff *skb, struct nlmsghdr *nlh,
3663 const struct rtnl_link_ops *ops,
3664 struct net_device *dev, struct net *tgt_net,
3665 struct rtnl_newlink_tbs *tbs,
3666 struct nlattr **data,
3667 struct netlink_ext_ack *extack)
3668{
3669 struct nlattr ** const linkinfo = tbs->linkinfo;
3670 struct nlattr ** const tb = tbs->tb;
3671 int status = 0;
3672 int err;
3673
3674 if (nlh->nlmsg_flags & NLM_F_EXCL)
3675 return -EEXIST;
3676
3677 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3678 return -EOPNOTSUPP;
3679
3680 if (linkinfo[IFLA_INFO_DATA]) {
3681 if (!ops || ops != dev->rtnl_link_ops || !ops->changelink)
3682 return -EOPNOTSUPP;
3683
3684 err = ops->changelink(dev, tb, data, extack);
3685 if (err < 0)
3686 return err;
3687
3688 status |= DO_SETLINK_NOTIFY;
3689 }
3690
3691 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3692 const struct rtnl_link_ops *m_ops = NULL;
3693 struct nlattr **slave_data = NULL;
3694 struct net_device *master_dev;
3695
3696 master_dev = netdev_master_upper_dev_get(dev);
3697 if (master_dev)
3698 m_ops = master_dev->rtnl_link_ops;
3699
3700 if (!m_ops || !m_ops->slave_changelink)
3701 return -EOPNOTSUPP;
3702
3703 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3704 return -EINVAL;
3705
3706 if (m_ops->slave_maxtype) {
3707 err = nla_parse_nested_deprecated(tbs->slave_attr,
3708 m_ops->slave_maxtype,
3709 linkinfo[IFLA_INFO_SLAVE_DATA],
3710 m_ops->slave_policy, extack);
3711 if (err < 0)
3712 return err;
3713
3714 slave_data = tbs->slave_attr;
3715 }
3716
3717 err = m_ops->slave_changelink(master_dev, dev, tb, slave_data, extack);
3718 if (err < 0)
3719 return err;
3720
3721 status |= DO_SETLINK_NOTIFY;
3722 }
3723
3724 return do_setlink(skb, dev, tgt_net, nlmsg_data(nlh), extack, tb, status);
3725}
3726
3727static int rtnl_group_changelink(const struct sk_buff *skb,
3728 struct net *net, struct net *tgt_net,
3729 int group, struct ifinfomsg *ifm,
3730 struct netlink_ext_ack *extack,
3731 struct nlattr **tb)
3732{
3733 struct net_device *dev, *aux;
3734 int err;
3735
3736 for_each_netdev_safe(net, dev, aux) {
3737 if (dev->group == group) {
3738 err = do_setlink(skb, dev, tgt_net, ifm, extack, tb, 0);
3739 if (err < 0)
3740 return err;
3741 }
3742 }
3743
3744 return 0;
3745}
3746
3747static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3748 const struct rtnl_link_ops *ops,
3749 struct net *tgt_net, struct net *link_net,
3750 struct net *peer_net,
3751 const struct nlmsghdr *nlh,
3752 struct nlattr **tb, struct nlattr **data,
3753 struct netlink_ext_ack *extack)
3754{
3755 unsigned char name_assign_type = NET_NAME_USER;
3756 struct net *net = sock_net(skb->sk);
3757 u32 portid = NETLINK_CB(skb).portid;
3758 struct net_device *dev;
3759 char ifname[IFNAMSIZ];
3760 int err;
3761
3762 if (!ops->alloc && !ops->setup)
3763 return -EOPNOTSUPP;
3764
3765 if (tb[IFLA_IFNAME]) {
3766 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3767 } else {
3768 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3769 name_assign_type = NET_NAME_ENUM;
3770 }
3771
3772 dev = rtnl_create_link(link_net ? : tgt_net, ifname,
3773 name_assign_type, ops, tb, extack);
3774 if (IS_ERR(dev)) {
3775 err = PTR_ERR(dev);
3776 goto out;
3777 }
3778
3779 dev->ifindex = ifm->ifi_index;
3780
3781 if (link_net)
3782 net = link_net;
3783 if (peer_net)
3784 net = peer_net;
3785
3786 if (ops->newlink)
3787 err = ops->newlink(net, dev, tb, data, extack);
3788 else
3789 err = register_netdevice(dev);
3790 if (err < 0) {
3791 free_netdev(dev);
3792 goto out;
3793 }
3794
3795 err = rtnl_configure_link(dev, ifm, portid, nlh);
3796 if (err < 0)
3797 goto out_unregister;
3798 if (link_net) {
3799 err = dev_change_net_namespace(dev, tgt_net, ifname);
3800 if (err < 0)
3801 goto out_unregister;
3802 }
3803 if (tb[IFLA_MASTER]) {
3804 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3805 if (err)
3806 goto out_unregister;
3807 }
3808out:
3809 return err;
3810out_unregister:
3811 if (ops->newlink) {
3812 LIST_HEAD(list_kill);
3813
3814 ops->dellink(dev, &list_kill);
3815 unregister_netdevice_many(&list_kill);
3816 } else {
3817 unregister_netdevice(dev);
3818 }
3819 goto out;
3820}
3821
3822static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
3823 struct nlattr *tbp[],
3824 struct nlattr *data[],
3825 struct netlink_ext_ack *extack)
3826{
3827 struct nlattr *tb[IFLA_MAX + 1];
3828 int err;
3829
3830 if (!data || !data[ops->peer_type])
3831 return rtnl_link_get_net_ifla(tbp);
3832
3833 err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack);
3834 if (err < 0)
3835 return ERR_PTR(err);
3836
3837 if (ops->validate) {
3838 err = ops->validate(tb, NULL, extack);
3839 if (err < 0)
3840 return ERR_PTR(err);
3841 }
3842
3843 return rtnl_link_get_net_ifla(tb);
3844}
3845
3846static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3847 const struct rtnl_link_ops *ops,
3848 struct net *tgt_net, struct net *link_net,
3849 struct net *peer_net,
3850 struct rtnl_newlink_tbs *tbs,
3851 struct nlattr **data,
3852 struct netlink_ext_ack *extack)
3853{
3854 struct nlattr ** const tb = tbs->tb;
3855 struct net *net = sock_net(skb->sk);
3856 struct net_device *dev;
3857 struct ifinfomsg *ifm;
3858 bool link_specified;
3859
3860 ifm = nlmsg_data(nlh);
3861 if (ifm->ifi_index > 0) {
3862 link_specified = true;
3863 dev = __dev_get_by_index(net, ifm->ifi_index);
3864 } else if (ifm->ifi_index < 0) {
3865 NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3866 return -EINVAL;
3867 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3868 link_specified = true;
3869 dev = rtnl_dev_get(net, tb);
3870 } else {
3871 link_specified = false;
3872 dev = NULL;
3873 }
3874
3875 if (dev)
3876 return rtnl_changelink(skb, nlh, ops, dev, tgt_net, tbs, data, extack);
3877
3878 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3879 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3880 * or it's for a group
3881 */
3882 if (link_specified || !tb[IFLA_GROUP])
3883 return -ENODEV;
3884
3885 return rtnl_group_changelink(skb, net, tgt_net,
3886 nla_get_u32(tb[IFLA_GROUP]),
3887 ifm, extack, tb);
3888 }
3889
3890 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3891 return -EOPNOTSUPP;
3892
3893 if (!ops) {
3894 NL_SET_ERR_MSG(extack, "Unknown device type");
3895 return -EOPNOTSUPP;
3896 }
3897
3898 return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, peer_net, nlh,
3899 tb, data, extack);
3900}
3901
3902static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3903 struct netlink_ext_ack *extack)
3904{
3905 struct net *tgt_net, *link_net = NULL, *peer_net = NULL;
3906 struct nlattr **tb, **linkinfo, **data = NULL;
3907 struct rtnl_link_ops *ops = NULL;
3908 struct rtnl_newlink_tbs *tbs;
3909 struct rtnl_nets rtnl_nets;
3910 int ops_srcu_index;
3911 int ret;
3912
3913 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3914 if (!tbs)
3915 return -ENOMEM;
3916
3917 tb = tbs->tb;
3918 ret = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg), tb,
3919 IFLA_MAX, ifla_policy, extack);
3920 if (ret < 0)
3921 goto free;
3922
3923 ret = rtnl_ensure_unique_netns(tb, extack, false);
3924 if (ret < 0)
3925 goto free;
3926
3927 linkinfo = tbs->linkinfo;
3928 if (tb[IFLA_LINKINFO]) {
3929 ret = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3930 tb[IFLA_LINKINFO],
3931 ifla_info_policy, NULL);
3932 if (ret < 0)
3933 goto free;
3934 } else {
3935 memset(linkinfo, 0, sizeof(tbs->linkinfo));
3936 }
3937
3938 if (linkinfo[IFLA_INFO_KIND]) {
3939 char kind[MODULE_NAME_LEN];
3940
3941 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3942 ops = rtnl_link_ops_get(kind, &ops_srcu_index);
3943#ifdef CONFIG_MODULES
3944 if (!ops) {
3945 request_module("rtnl-link-%s", kind);
3946 ops = rtnl_link_ops_get(kind, &ops_srcu_index);
3947 }
3948#endif
3949 }
3950
3951 rtnl_nets_init(&rtnl_nets);
3952
3953 if (ops) {
3954 if (ops->maxtype > RTNL_MAX_TYPE) {
3955 ret = -EINVAL;
3956 goto put_ops;
3957 }
3958
3959 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3960 ret = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3961 linkinfo[IFLA_INFO_DATA],
3962 ops->policy, extack);
3963 if (ret < 0)
3964 goto put_ops;
3965
3966 data = tbs->attr;
3967 }
3968
3969 if (ops->validate) {
3970 ret = ops->validate(tb, data, extack);
3971 if (ret < 0)
3972 goto put_ops;
3973 }
3974
3975 if (ops->peer_type) {
3976 peer_net = rtnl_get_peer_net(ops, tb, data, extack);
3977 if (IS_ERR(peer_net)) {
3978 ret = PTR_ERR(peer_net);
3979 goto put_ops;
3980 }
3981 if (peer_net)
3982 rtnl_nets_add(&rtnl_nets, peer_net);
3983 }
3984 }
3985
3986 tgt_net = rtnl_link_get_net_capable(skb, sock_net(skb->sk), tb, CAP_NET_ADMIN);
3987 if (IS_ERR(tgt_net)) {
3988 ret = PTR_ERR(tgt_net);
3989 goto put_net;
3990 }
3991
3992 rtnl_nets_add(&rtnl_nets, tgt_net);
3993
3994 if (tb[IFLA_LINK_NETNSID]) {
3995 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3996
3997 link_net = get_net_ns_by_id(tgt_net, id);
3998 if (!link_net) {
3999 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
4000 ret = -EINVAL;
4001 goto put_net;
4002 }
4003
4004 rtnl_nets_add(&rtnl_nets, link_net);
4005
4006 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) {
4007 ret = -EPERM;
4008 goto put_net;
4009 }
4010 }
4011
4012 rtnl_nets_lock(&rtnl_nets);
4013 ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, peer_net, tbs, data, extack);
4014 rtnl_nets_unlock(&rtnl_nets);
4015
4016put_net:
4017 rtnl_nets_destroy(&rtnl_nets);
4018put_ops:
4019 if (ops)
4020 rtnl_link_ops_put(ops, ops_srcu_index);
4021free:
4022 kfree(tbs);
4023 return ret;
4024}
4025
4026static int rtnl_valid_getlink_req(struct sk_buff *skb,
4027 const struct nlmsghdr *nlh,
4028 struct nlattr **tb,
4029 struct netlink_ext_ack *extack)
4030{
4031 struct ifinfomsg *ifm;
4032 int i, err;
4033
4034 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4035 NL_SET_ERR_MSG(extack, "Invalid header for get link");
4036 return -EINVAL;
4037 }
4038
4039 if (!netlink_strict_get_check(skb))
4040 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
4041 ifla_policy, extack);
4042
4043 ifm = nlmsg_data(nlh);
4044 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4045 ifm->ifi_change) {
4046 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
4047 return -EINVAL;
4048 }
4049
4050 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
4051 ifla_policy, extack);
4052 if (err)
4053 return err;
4054
4055 for (i = 0; i <= IFLA_MAX; i++) {
4056 if (!tb[i])
4057 continue;
4058
4059 switch (i) {
4060 case IFLA_IFNAME:
4061 case IFLA_ALT_IFNAME:
4062 case IFLA_EXT_MASK:
4063 case IFLA_TARGET_NETNSID:
4064 break;
4065 default:
4066 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
4067 return -EINVAL;
4068 }
4069 }
4070
4071 return 0;
4072}
4073
4074static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4075 struct netlink_ext_ack *extack)
4076{
4077 struct net *net = sock_net(skb->sk);
4078 struct net *tgt_net = net;
4079 struct ifinfomsg *ifm;
4080 struct nlattr *tb[IFLA_MAX+1];
4081 struct net_device *dev = NULL;
4082 struct sk_buff *nskb;
4083 int netnsid = -1;
4084 int err;
4085 u32 ext_filter_mask = 0;
4086
4087 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
4088 if (err < 0)
4089 return err;
4090
4091 err = rtnl_ensure_unique_netns(tb, extack, true);
4092 if (err < 0)
4093 return err;
4094
4095 if (tb[IFLA_TARGET_NETNSID]) {
4096 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
4097 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
4098 if (IS_ERR(tgt_net))
4099 return PTR_ERR(tgt_net);
4100 }
4101
4102 if (tb[IFLA_EXT_MASK])
4103 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
4104
4105 err = -EINVAL;
4106 ifm = nlmsg_data(nlh);
4107 if (ifm->ifi_index > 0)
4108 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
4109 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
4110 dev = rtnl_dev_get(tgt_net, tb);
4111 else
4112 goto out;
4113
4114 err = -ENODEV;
4115 if (dev == NULL)
4116 goto out;
4117
4118 err = -ENOBUFS;
4119 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask));
4120 if (nskb == NULL)
4121 goto out;
4122
4123 /* Synchronize the carrier state so we don't report a state
4124 * that we're not actually going to honour immediately; if
4125 * the driver just did a carrier off->on transition, we can
4126 * only TX if link watch work has run, but without this we'd
4127 * already report carrier on, even if it doesn't work yet.
4128 */
4129 linkwatch_sync_dev(dev);
4130
4131 err = rtnl_fill_ifinfo(nskb, dev, net,
4132 RTM_NEWLINK, NETLINK_CB(skb).portid,
4133 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
4134 0, NULL, 0, netnsid, GFP_KERNEL);
4135 if (err < 0) {
4136 /* -EMSGSIZE implies BUG in if_nlmsg_size */
4137 WARN_ON(err == -EMSGSIZE);
4138 kfree_skb(nskb);
4139 } else
4140 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4141out:
4142 if (netnsid >= 0)
4143 put_net(tgt_net);
4144
4145 return err;
4146}
4147
4148static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
4149 bool *changed, struct netlink_ext_ack *extack)
4150{
4151 char *alt_ifname;
4152 size_t size;
4153 int err;
4154
4155 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
4156 if (err)
4157 return err;
4158
4159 if (cmd == RTM_NEWLINKPROP) {
4160 size = rtnl_prop_list_size(dev);
4161 size += nla_total_size(ALTIFNAMSIZ);
4162 if (size >= U16_MAX) {
4163 NL_SET_ERR_MSG(extack,
4164 "effective property list too long");
4165 return -EINVAL;
4166 }
4167 }
4168
4169 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
4170 if (!alt_ifname)
4171 return -ENOMEM;
4172
4173 if (cmd == RTM_NEWLINKPROP) {
4174 err = netdev_name_node_alt_create(dev, alt_ifname);
4175 if (!err)
4176 alt_ifname = NULL;
4177 } else if (cmd == RTM_DELLINKPROP) {
4178 err = netdev_name_node_alt_destroy(dev, alt_ifname);
4179 } else {
4180 WARN_ON_ONCE(1);
4181 err = -EINVAL;
4182 }
4183
4184 kfree(alt_ifname);
4185 if (!err)
4186 *changed = true;
4187 return err;
4188}
4189
4190static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
4191 struct netlink_ext_ack *extack)
4192{
4193 struct net *net = sock_net(skb->sk);
4194 struct nlattr *tb[IFLA_MAX + 1];
4195 struct net_device *dev;
4196 struct ifinfomsg *ifm;
4197 bool changed = false;
4198 struct nlattr *attr;
4199 int err, rem;
4200
4201 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
4202 if (err)
4203 return err;
4204
4205 err = rtnl_ensure_unique_netns(tb, extack, true);
4206 if (err)
4207 return err;
4208
4209 ifm = nlmsg_data(nlh);
4210 if (ifm->ifi_index > 0)
4211 dev = __dev_get_by_index(net, ifm->ifi_index);
4212 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
4213 dev = rtnl_dev_get(net, tb);
4214 else
4215 return -EINVAL;
4216
4217 if (!dev)
4218 return -ENODEV;
4219
4220 if (!tb[IFLA_PROP_LIST])
4221 return 0;
4222
4223 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
4224 switch (nla_type(attr)) {
4225 case IFLA_ALT_IFNAME:
4226 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
4227 if (err)
4228 return err;
4229 break;
4230 }
4231 }
4232
4233 if (changed)
4234 netdev_state_change(dev);
4235 return 0;
4236}
4237
4238static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
4239 struct netlink_ext_ack *extack)
4240{
4241 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
4242}
4243
4244static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
4245 struct netlink_ext_ack *extack)
4246{
4247 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
4248}
4249
4250static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb,
4251 struct nlmsghdr *nlh)
4252{
4253 struct net *net = sock_net(skb->sk);
4254 size_t min_ifinfo_dump_size = 0;
4255 u32 ext_filter_mask = 0;
4256 struct net_device *dev;
4257 struct nlattr *nla;
4258 int hdrlen, rem;
4259
4260 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
4261 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
4262 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
4263
4264 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
4265 return NLMSG_GOODSIZE;
4266
4267 nla_for_each_attr_type(nla, IFLA_EXT_MASK,
4268 nlmsg_attrdata(nlh, hdrlen),
4269 nlmsg_attrlen(nlh, hdrlen), rem) {
4270 if (nla_len(nla) == sizeof(u32))
4271 ext_filter_mask = nla_get_u32(nla);
4272 }
4273
4274 if (!ext_filter_mask)
4275 return NLMSG_GOODSIZE;
4276 /*
4277 * traverse the list of net devices and compute the minimum
4278 * buffer size based upon the filter mask.
4279 */
4280 rcu_read_lock();
4281 for_each_netdev_rcu(net, dev) {
4282 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
4283 if_nlmsg_size(dev, ext_filter_mask));
4284 }
4285 rcu_read_unlock();
4286
4287 return nlmsg_total_size(min_ifinfo_dump_size);
4288}
4289
4290static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
4291{
4292 int idx;
4293 int s_idx = cb->family;
4294 int type = cb->nlh->nlmsg_type - RTM_BASE;
4295 int ret = 0;
4296
4297 if (s_idx == 0)
4298 s_idx = 1;
4299
4300 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
4301 struct rtnl_link __rcu **tab;
4302 struct rtnl_link *link;
4303 rtnl_dumpit_func dumpit;
4304
4305 if (idx < s_idx || idx == PF_PACKET)
4306 continue;
4307
4308 if (type < 0 || type >= RTM_NR_MSGTYPES)
4309 continue;
4310
4311 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
4312 if (!tab)
4313 continue;
4314
4315 link = rcu_dereference_rtnl(tab[type]);
4316 if (!link)
4317 continue;
4318
4319 dumpit = link->dumpit;
4320 if (!dumpit)
4321 continue;
4322
4323 if (idx > s_idx) {
4324 memset(&cb->args[0], 0, sizeof(cb->args));
4325 cb->prev_seq = 0;
4326 cb->seq = 0;
4327 }
4328 ret = dumpit(skb, cb);
4329 if (ret)
4330 break;
4331 }
4332 cb->family = idx;
4333
4334 return skb->len ? : ret;
4335}
4336
4337struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
4338 unsigned int change,
4339 u32 event, gfp_t flags, int *new_nsid,
4340 int new_ifindex, u32 portid,
4341 const struct nlmsghdr *nlh)
4342{
4343 struct net *net = dev_net(dev);
4344 struct sk_buff *skb;
4345 int err = -ENOBUFS;
4346 u32 seq = 0;
4347
4348 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
4349 if (skb == NULL)
4350 goto errout;
4351
4352 if (nlmsg_report(nlh))
4353 seq = nlmsg_seq(nlh);
4354 else
4355 portid = 0;
4356
4357 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
4358 type, portid, seq, change, 0, 0, event,
4359 new_nsid, new_ifindex, -1, flags);
4360 if (err < 0) {
4361 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
4362 WARN_ON(err == -EMSGSIZE);
4363 kfree_skb(skb);
4364 goto errout;
4365 }
4366 return skb;
4367errout:
4368 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4369 return NULL;
4370}
4371
4372void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4373 u32 portid, const struct nlmsghdr *nlh)
4374{
4375 struct net *net = dev_net(dev);
4376
4377 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
4378}
4379
4380static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4381 unsigned int change, u32 event,
4382 gfp_t flags, int *new_nsid, int new_ifindex,
4383 u32 portid, const struct nlmsghdr *nlh)
4384{
4385 struct sk_buff *skb;
4386
4387 if (dev->reg_state != NETREG_REGISTERED)
4388 return;
4389
4390 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
4391 new_ifindex, portid, nlh);
4392 if (skb)
4393 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
4394}
4395
4396void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
4397 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
4398{
4399 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4400 NULL, 0, portid, nlh);
4401}
4402
4403void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4404 gfp_t flags, int *new_nsid, int new_ifindex)
4405{
4406 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4407 new_nsid, new_ifindex, 0, NULL);
4408}
4409
4410static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4411 struct net_device *dev,
4412 u8 *addr, u16 vid, u32 pid, u32 seq,
4413 int type, unsigned int flags,
4414 int nlflags, u16 ndm_state)
4415{
4416 struct nlmsghdr *nlh;
4417 struct ndmsg *ndm;
4418
4419 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4420 if (!nlh)
4421 return -EMSGSIZE;
4422
4423 ndm = nlmsg_data(nlh);
4424 ndm->ndm_family = AF_BRIDGE;
4425 ndm->ndm_pad1 = 0;
4426 ndm->ndm_pad2 = 0;
4427 ndm->ndm_flags = flags;
4428 ndm->ndm_type = 0;
4429 ndm->ndm_ifindex = dev->ifindex;
4430 ndm->ndm_state = ndm_state;
4431
4432 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
4433 goto nla_put_failure;
4434 if (vid)
4435 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4436 goto nla_put_failure;
4437
4438 nlmsg_end(skb, nlh);
4439 return 0;
4440
4441nla_put_failure:
4442 nlmsg_cancel(skb, nlh);
4443 return -EMSGSIZE;
4444}
4445
4446static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
4447{
4448 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4449 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
4450 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4451 0;
4452}
4453
4454static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4455 u16 ndm_state)
4456{
4457 struct net *net = dev_net(dev);
4458 struct sk_buff *skb;
4459 int err = -ENOBUFS;
4460
4461 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
4462 if (!skb)
4463 goto errout;
4464
4465 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4466 0, 0, type, NTF_SELF, 0, ndm_state);
4467 if (err < 0) {
4468 kfree_skb(skb);
4469 goto errout;
4470 }
4471
4472 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4473 return;
4474errout:
4475 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4476}
4477
4478/*
4479 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4480 */
4481int ndo_dflt_fdb_add(struct ndmsg *ndm,
4482 struct nlattr *tb[],
4483 struct net_device *dev,
4484 const unsigned char *addr, u16 vid,
4485 u16 flags)
4486{
4487 int err = -EINVAL;
4488
4489 /* If aging addresses are supported device will need to
4490 * implement its own handler for this.
4491 */
4492 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4493 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4494 return err;
4495 }
4496
4497 if (tb[NDA_FLAGS_EXT]) {
4498 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4499 return err;
4500 }
4501
4502 if (vid) {
4503 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4504 return err;
4505 }
4506
4507 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4508 err = dev_uc_add_excl(dev, addr);
4509 else if (is_multicast_ether_addr(addr))
4510 err = dev_mc_add_excl(dev, addr);
4511
4512 /* Only return duplicate errors if NLM_F_EXCL is set */
4513 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4514 err = 0;
4515
4516 return err;
4517}
4518EXPORT_SYMBOL(ndo_dflt_fdb_add);
4519
4520static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4521 struct netlink_ext_ack *extack)
4522{
4523 u16 vid = 0;
4524
4525 if (vlan_attr) {
4526 if (nla_len(vlan_attr) != sizeof(u16)) {
4527 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4528 return -EINVAL;
4529 }
4530
4531 vid = nla_get_u16(vlan_attr);
4532
4533 if (!vid || vid >= VLAN_VID_MASK) {
4534 NL_SET_ERR_MSG(extack, "invalid vlan id");
4535 return -EINVAL;
4536 }
4537 }
4538 *p_vid = vid;
4539 return 0;
4540}
4541
4542static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4543 struct netlink_ext_ack *extack)
4544{
4545 struct net *net = sock_net(skb->sk);
4546 struct ndmsg *ndm;
4547 struct nlattr *tb[NDA_MAX+1];
4548 struct net_device *dev;
4549 u8 *addr;
4550 u16 vid;
4551 int err;
4552
4553 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4554 extack);
4555 if (err < 0)
4556 return err;
4557
4558 ndm = nlmsg_data(nlh);
4559 if (ndm->ndm_ifindex == 0) {
4560 NL_SET_ERR_MSG(extack, "invalid ifindex");
4561 return -EINVAL;
4562 }
4563
4564 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4565 if (dev == NULL) {
4566 NL_SET_ERR_MSG(extack, "unknown ifindex");
4567 return -ENODEV;
4568 }
4569
4570 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4571 NL_SET_ERR_MSG(extack, "invalid address");
4572 return -EINVAL;
4573 }
4574
4575 if (dev->type != ARPHRD_ETHER) {
4576 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4577 return -EINVAL;
4578 }
4579
4580 addr = nla_data(tb[NDA_LLADDR]);
4581
4582 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4583 if (err)
4584 return err;
4585
4586 err = -EOPNOTSUPP;
4587
4588 /* Support fdb on master device the net/bridge default case */
4589 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4590 netif_is_bridge_port(dev)) {
4591 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4592 const struct net_device_ops *ops = br_dev->netdev_ops;
4593 bool notified = false;
4594
4595 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4596 nlh->nlmsg_flags, ¬ified, extack);
4597 if (err)
4598 goto out;
4599 else
4600 ndm->ndm_flags &= ~NTF_MASTER;
4601 }
4602
4603 /* Embedded bridge, macvlan, and any other device support */
4604 if ((ndm->ndm_flags & NTF_SELF)) {
4605 bool notified = false;
4606
4607 if (dev->netdev_ops->ndo_fdb_add)
4608 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4609 vid,
4610 nlh->nlmsg_flags,
4611 ¬ified, extack);
4612 else
4613 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4614 nlh->nlmsg_flags);
4615
4616 if (!err && !notified) {
4617 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4618 ndm->ndm_state);
4619 ndm->ndm_flags &= ~NTF_SELF;
4620 }
4621 }
4622out:
4623 return err;
4624}
4625
4626/*
4627 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4628 */
4629int ndo_dflt_fdb_del(struct ndmsg *ndm,
4630 struct nlattr *tb[],
4631 struct net_device *dev,
4632 const unsigned char *addr, u16 vid)
4633{
4634 int err = -EINVAL;
4635
4636 /* If aging addresses are supported device will need to
4637 * implement its own handler for this.
4638 */
4639 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4640 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4641 return err;
4642 }
4643
4644 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4645 err = dev_uc_del(dev, addr);
4646 else if (is_multicast_ether_addr(addr))
4647 err = dev_mc_del(dev, addr);
4648
4649 return err;
4650}
4651EXPORT_SYMBOL(ndo_dflt_fdb_del);
4652
4653static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4654 struct netlink_ext_ack *extack)
4655{
4656 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4657 struct net *net = sock_net(skb->sk);
4658 const struct net_device_ops *ops;
4659 struct ndmsg *ndm;
4660 struct nlattr *tb[NDA_MAX+1];
4661 struct net_device *dev;
4662 __u8 *addr = NULL;
4663 int err;
4664 u16 vid;
4665
4666 if (!netlink_capable(skb, CAP_NET_ADMIN))
4667 return -EPERM;
4668
4669 if (!del_bulk) {
4670 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4671 NULL, extack);
4672 } else {
4673 /* For bulk delete, the drivers will parse the message with
4674 * policy.
4675 */
4676 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
4677 }
4678 if (err < 0)
4679 return err;
4680
4681 ndm = nlmsg_data(nlh);
4682 if (ndm->ndm_ifindex == 0) {
4683 NL_SET_ERR_MSG(extack, "invalid ifindex");
4684 return -EINVAL;
4685 }
4686
4687 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4688 if (dev == NULL) {
4689 NL_SET_ERR_MSG(extack, "unknown ifindex");
4690 return -ENODEV;
4691 }
4692
4693 if (!del_bulk) {
4694 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4695 NL_SET_ERR_MSG(extack, "invalid address");
4696 return -EINVAL;
4697 }
4698 addr = nla_data(tb[NDA_LLADDR]);
4699
4700 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4701 if (err)
4702 return err;
4703 }
4704
4705 if (dev->type != ARPHRD_ETHER) {
4706 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4707 return -EINVAL;
4708 }
4709
4710 err = -EOPNOTSUPP;
4711
4712 /* Support fdb on master device the net/bridge default case */
4713 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4714 netif_is_bridge_port(dev)) {
4715 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4716 bool notified = false;
4717
4718 ops = br_dev->netdev_ops;
4719 if (!del_bulk) {
4720 if (ops->ndo_fdb_del)
4721 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid,
4722 ¬ified, extack);
4723 } else {
4724 if (ops->ndo_fdb_del_bulk)
4725 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4726 }
4727
4728 if (err)
4729 goto out;
4730 else
4731 ndm->ndm_flags &= ~NTF_MASTER;
4732 }
4733
4734 /* Embedded bridge, macvlan, and any other device support */
4735 if (ndm->ndm_flags & NTF_SELF) {
4736 bool notified = false;
4737
4738 ops = dev->netdev_ops;
4739 if (!del_bulk) {
4740 if (ops->ndo_fdb_del)
4741 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid,
4742 ¬ified, extack);
4743 else
4744 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4745 } else {
4746 /* in case err was cleared by NTF_MASTER call */
4747 err = -EOPNOTSUPP;
4748 if (ops->ndo_fdb_del_bulk)
4749 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4750 }
4751
4752 if (!err) {
4753 if (!del_bulk && !notified)
4754 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4755 ndm->ndm_state);
4756 ndm->ndm_flags &= ~NTF_SELF;
4757 }
4758 }
4759out:
4760 return err;
4761}
4762
4763static int nlmsg_populate_fdb(struct sk_buff *skb,
4764 struct netlink_callback *cb,
4765 struct net_device *dev,
4766 int *idx,
4767 struct netdev_hw_addr_list *list)
4768{
4769 struct netdev_hw_addr *ha;
4770 int err;
4771 u32 portid, seq;
4772
4773 portid = NETLINK_CB(cb->skb).portid;
4774 seq = cb->nlh->nlmsg_seq;
4775
4776 list_for_each_entry(ha, &list->list, list) {
4777 if (*idx < cb->args[2])
4778 goto skip;
4779
4780 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4781 portid, seq,
4782 RTM_NEWNEIGH, NTF_SELF,
4783 NLM_F_MULTI, NUD_PERMANENT);
4784 if (err < 0)
4785 return err;
4786skip:
4787 *idx += 1;
4788 }
4789 return 0;
4790}
4791
4792/**
4793 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4794 * @skb: socket buffer to store message in
4795 * @cb: netlink callback
4796 * @dev: netdevice
4797 * @filter_dev: ignored
4798 * @idx: the number of FDB table entries dumped is added to *@idx
4799 *
4800 * Default netdevice operation to dump the existing unicast address list.
4801 * Returns number of addresses from list put in skb.
4802 */
4803int ndo_dflt_fdb_dump(struct sk_buff *skb,
4804 struct netlink_callback *cb,
4805 struct net_device *dev,
4806 struct net_device *filter_dev,
4807 int *idx)
4808{
4809 int err;
4810
4811 if (dev->type != ARPHRD_ETHER)
4812 return -EINVAL;
4813
4814 netif_addr_lock_bh(dev);
4815 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4816 if (err)
4817 goto out;
4818 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4819out:
4820 netif_addr_unlock_bh(dev);
4821 return err;
4822}
4823EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4824
4825static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4826 int *br_idx, int *brport_idx,
4827 struct netlink_ext_ack *extack)
4828{
4829 struct nlattr *tb[NDA_MAX + 1];
4830 struct ndmsg *ndm;
4831 int err, i;
4832
4833 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4834 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4835 return -EINVAL;
4836 }
4837
4838 ndm = nlmsg_data(nlh);
4839 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4840 ndm->ndm_flags || ndm->ndm_type) {
4841 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4842 return -EINVAL;
4843 }
4844
4845 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4846 NDA_MAX, NULL, extack);
4847 if (err < 0)
4848 return err;
4849
4850 *brport_idx = ndm->ndm_ifindex;
4851 for (i = 0; i <= NDA_MAX; ++i) {
4852 if (!tb[i])
4853 continue;
4854
4855 switch (i) {
4856 case NDA_IFINDEX:
4857 if (nla_len(tb[i]) != sizeof(u32)) {
4858 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4859 return -EINVAL;
4860 }
4861 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4862 break;
4863 case NDA_MASTER:
4864 if (nla_len(tb[i]) != sizeof(u32)) {
4865 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4866 return -EINVAL;
4867 }
4868 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4869 break;
4870 default:
4871 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4872 return -EINVAL;
4873 }
4874 }
4875
4876 return 0;
4877}
4878
4879static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4880 int *br_idx, int *brport_idx,
4881 struct netlink_ext_ack *extack)
4882{
4883 struct nlattr *tb[IFLA_MAX+1];
4884 int err;
4885
4886 /* A hack to preserve kernel<->userspace interface.
4887 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4888 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4889 * So, check for ndmsg with an optional u32 attribute (not used here).
4890 * Fortunately these sizes don't conflict with the size of ifinfomsg
4891 * with an optional attribute.
4892 */
4893 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4894 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4895 nla_attr_size(sizeof(u32)))) {
4896 struct ifinfomsg *ifm;
4897
4898 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4899 tb, IFLA_MAX, ifla_policy,
4900 extack);
4901 if (err < 0) {
4902 return -EINVAL;
4903 } else if (err == 0) {
4904 if (tb[IFLA_MASTER])
4905 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4906 }
4907
4908 ifm = nlmsg_data(nlh);
4909 *brport_idx = ifm->ifi_index;
4910 }
4911 return 0;
4912}
4913
4914static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4915{
4916 struct net_device *dev;
4917 struct net_device *br_dev = NULL;
4918 const struct net_device_ops *ops = NULL;
4919 const struct net_device_ops *cops = NULL;
4920 struct net *net = sock_net(skb->sk);
4921 struct hlist_head *head;
4922 int brport_idx = 0;
4923 int br_idx = 0;
4924 int h, s_h;
4925 int idx = 0, s_idx;
4926 int err = 0;
4927 int fidx = 0;
4928
4929 if (cb->strict_check)
4930 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4931 cb->extack);
4932 else
4933 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4934 cb->extack);
4935 if (err < 0)
4936 return err;
4937
4938 if (br_idx) {
4939 br_dev = __dev_get_by_index(net, br_idx);
4940 if (!br_dev)
4941 return -ENODEV;
4942
4943 ops = br_dev->netdev_ops;
4944 }
4945
4946 s_h = cb->args[0];
4947 s_idx = cb->args[1];
4948
4949 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4950 idx = 0;
4951 head = &net->dev_index_head[h];
4952 hlist_for_each_entry(dev, head, index_hlist) {
4953
4954 if (brport_idx && (dev->ifindex != brport_idx))
4955 continue;
4956
4957 if (!br_idx) { /* user did not specify a specific bridge */
4958 if (netif_is_bridge_port(dev)) {
4959 br_dev = netdev_master_upper_dev_get(dev);
4960 cops = br_dev->netdev_ops;
4961 }
4962 } else {
4963 if (dev != br_dev &&
4964 !netif_is_bridge_port(dev))
4965 continue;
4966
4967 if (br_dev != netdev_master_upper_dev_get(dev) &&
4968 !netif_is_bridge_master(dev))
4969 continue;
4970 cops = ops;
4971 }
4972
4973 if (idx < s_idx)
4974 goto cont;
4975
4976 if (netif_is_bridge_port(dev)) {
4977 if (cops && cops->ndo_fdb_dump) {
4978 err = cops->ndo_fdb_dump(skb, cb,
4979 br_dev, dev,
4980 &fidx);
4981 if (err == -EMSGSIZE)
4982 goto out;
4983 }
4984 }
4985
4986 if (dev->netdev_ops->ndo_fdb_dump)
4987 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4988 dev, NULL,
4989 &fidx);
4990 else
4991 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4992 &fidx);
4993 if (err == -EMSGSIZE)
4994 goto out;
4995
4996 cops = NULL;
4997
4998 /* reset fdb offset to 0 for rest of the interfaces */
4999 cb->args[2] = 0;
5000 fidx = 0;
5001cont:
5002 idx++;
5003 }
5004 }
5005
5006out:
5007 cb->args[0] = h;
5008 cb->args[1] = idx;
5009 cb->args[2] = fidx;
5010
5011 return skb->len;
5012}
5013
5014static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
5015 struct nlattr **tb, u8 *ndm_flags,
5016 int *br_idx, int *brport_idx, u8 **addr,
5017 u16 *vid, struct netlink_ext_ack *extack)
5018{
5019 struct ndmsg *ndm;
5020 int err, i;
5021
5022 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
5023 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
5024 return -EINVAL;
5025 }
5026
5027 ndm = nlmsg_data(nlh);
5028 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
5029 ndm->ndm_type) {
5030 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
5031 return -EINVAL;
5032 }
5033
5034 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
5035 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
5036 return -EINVAL;
5037 }
5038
5039 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
5040 NDA_MAX, nda_policy, extack);
5041 if (err < 0)
5042 return err;
5043
5044 *ndm_flags = ndm->ndm_flags;
5045 *brport_idx = ndm->ndm_ifindex;
5046 for (i = 0; i <= NDA_MAX; ++i) {
5047 if (!tb[i])
5048 continue;
5049
5050 switch (i) {
5051 case NDA_MASTER:
5052 *br_idx = nla_get_u32(tb[i]);
5053 break;
5054 case NDA_LLADDR:
5055 if (nla_len(tb[i]) != ETH_ALEN) {
5056 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
5057 return -EINVAL;
5058 }
5059 *addr = nla_data(tb[i]);
5060 break;
5061 case NDA_VLAN:
5062 err = fdb_vid_parse(tb[i], vid, extack);
5063 if (err)
5064 return err;
5065 break;
5066 case NDA_VNI:
5067 break;
5068 default:
5069 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
5070 return -EINVAL;
5071 }
5072 }
5073
5074 return 0;
5075}
5076
5077static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5078 struct netlink_ext_ack *extack)
5079{
5080 struct net_device *dev = NULL, *br_dev = NULL;
5081 const struct net_device_ops *ops = NULL;
5082 struct net *net = sock_net(in_skb->sk);
5083 struct nlattr *tb[NDA_MAX + 1];
5084 struct sk_buff *skb;
5085 int brport_idx = 0;
5086 u8 ndm_flags = 0;
5087 int br_idx = 0;
5088 u8 *addr = NULL;
5089 u16 vid = 0;
5090 int err;
5091
5092 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
5093 &brport_idx, &addr, &vid, extack);
5094 if (err < 0)
5095 return err;
5096
5097 if (!addr) {
5098 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
5099 return -EINVAL;
5100 }
5101
5102 if (brport_idx) {
5103 dev = __dev_get_by_index(net, brport_idx);
5104 if (!dev) {
5105 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
5106 return -ENODEV;
5107 }
5108 }
5109
5110 if (br_idx) {
5111 if (dev) {
5112 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
5113 return -EINVAL;
5114 }
5115
5116 br_dev = __dev_get_by_index(net, br_idx);
5117 if (!br_dev) {
5118 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
5119 return -EINVAL;
5120 }
5121 ops = br_dev->netdev_ops;
5122 }
5123
5124 if (dev) {
5125 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
5126 if (!netif_is_bridge_port(dev)) {
5127 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
5128 return -EINVAL;
5129 }
5130 br_dev = netdev_master_upper_dev_get(dev);
5131 if (!br_dev) {
5132 NL_SET_ERR_MSG(extack, "Master of device not found");
5133 return -EINVAL;
5134 }
5135 ops = br_dev->netdev_ops;
5136 } else {
5137 if (!(ndm_flags & NTF_SELF)) {
5138 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
5139 return -EINVAL;
5140 }
5141 ops = dev->netdev_ops;
5142 }
5143 }
5144
5145 if (!br_dev && !dev) {
5146 NL_SET_ERR_MSG(extack, "No device specified");
5147 return -ENODEV;
5148 }
5149
5150 if (!ops || !ops->ndo_fdb_get) {
5151 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
5152 return -EOPNOTSUPP;
5153 }
5154
5155 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
5156 if (!skb)
5157 return -ENOBUFS;
5158
5159 if (br_dev)
5160 dev = br_dev;
5161 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
5162 NETLINK_CB(in_skb).portid,
5163 nlh->nlmsg_seq, extack);
5164 if (err)
5165 goto out;
5166
5167 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5168out:
5169 kfree_skb(skb);
5170 return err;
5171}
5172
5173static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
5174 unsigned int attrnum, unsigned int flag)
5175{
5176 if (mask & flag)
5177 return nla_put_u8(skb, attrnum, !!(flags & flag));
5178 return 0;
5179}
5180
5181int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5182 struct net_device *dev, u16 mode,
5183 u32 flags, u32 mask, int nlflags,
5184 u32 filter_mask,
5185 int (*vlan_fill)(struct sk_buff *skb,
5186 struct net_device *dev,
5187 u32 filter_mask))
5188{
5189 struct nlmsghdr *nlh;
5190 struct ifinfomsg *ifm;
5191 struct nlattr *br_afspec;
5192 struct nlattr *protinfo;
5193 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
5194 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5195 int err = 0;
5196
5197 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
5198 if (nlh == NULL)
5199 return -EMSGSIZE;
5200
5201 ifm = nlmsg_data(nlh);
5202 ifm->ifi_family = AF_BRIDGE;
5203 ifm->__ifi_pad = 0;
5204 ifm->ifi_type = dev->type;
5205 ifm->ifi_index = dev->ifindex;
5206 ifm->ifi_flags = dev_get_flags(dev);
5207 ifm->ifi_change = 0;
5208
5209
5210 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5211 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5212 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
5213 (br_dev &&
5214 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
5215 (dev->addr_len &&
5216 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5217 (dev->ifindex != dev_get_iflink(dev) &&
5218 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
5219 goto nla_put_failure;
5220
5221 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
5222 if (!br_afspec)
5223 goto nla_put_failure;
5224
5225 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
5226 nla_nest_cancel(skb, br_afspec);
5227 goto nla_put_failure;
5228 }
5229
5230 if (mode != BRIDGE_MODE_UNDEF) {
5231 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
5232 nla_nest_cancel(skb, br_afspec);
5233 goto nla_put_failure;
5234 }
5235 }
5236 if (vlan_fill) {
5237 err = vlan_fill(skb, dev, filter_mask);
5238 if (err) {
5239 nla_nest_cancel(skb, br_afspec);
5240 goto nla_put_failure;
5241 }
5242 }
5243 nla_nest_end(skb, br_afspec);
5244
5245 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
5246 if (!protinfo)
5247 goto nla_put_failure;
5248
5249 if (brport_nla_put_flag(skb, flags, mask,
5250 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
5251 brport_nla_put_flag(skb, flags, mask,
5252 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
5253 brport_nla_put_flag(skb, flags, mask,
5254 IFLA_BRPORT_FAST_LEAVE,
5255 BR_MULTICAST_FAST_LEAVE) ||
5256 brport_nla_put_flag(skb, flags, mask,
5257 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
5258 brport_nla_put_flag(skb, flags, mask,
5259 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
5260 brport_nla_put_flag(skb, flags, mask,
5261 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
5262 brport_nla_put_flag(skb, flags, mask,
5263 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
5264 brport_nla_put_flag(skb, flags, mask,
5265 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
5266 brport_nla_put_flag(skb, flags, mask,
5267 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
5268 brport_nla_put_flag(skb, flags, mask,
5269 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
5270 nla_nest_cancel(skb, protinfo);
5271 goto nla_put_failure;
5272 }
5273
5274 nla_nest_end(skb, protinfo);
5275
5276 nlmsg_end(skb, nlh);
5277 return 0;
5278nla_put_failure:
5279 nlmsg_cancel(skb, nlh);
5280 return err ? err : -EMSGSIZE;
5281}
5282EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
5283
5284static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
5285 bool strict_check, u32 *filter_mask,
5286 struct netlink_ext_ack *extack)
5287{
5288 struct nlattr *tb[IFLA_MAX+1];
5289 int err, i;
5290
5291 if (strict_check) {
5292 struct ifinfomsg *ifm;
5293
5294 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5295 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
5296 return -EINVAL;
5297 }
5298
5299 ifm = nlmsg_data(nlh);
5300 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5301 ifm->ifi_change || ifm->ifi_index) {
5302 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
5303 return -EINVAL;
5304 }
5305
5306 err = nlmsg_parse_deprecated_strict(nlh,
5307 sizeof(struct ifinfomsg),
5308 tb, IFLA_MAX, ifla_policy,
5309 extack);
5310 } else {
5311 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
5312 tb, IFLA_MAX, ifla_policy,
5313 extack);
5314 }
5315 if (err < 0)
5316 return err;
5317
5318 /* new attributes should only be added with strict checking */
5319 for (i = 0; i <= IFLA_MAX; ++i) {
5320 if (!tb[i])
5321 continue;
5322
5323 switch (i) {
5324 case IFLA_EXT_MASK:
5325 *filter_mask = nla_get_u32(tb[i]);
5326 break;
5327 default:
5328 if (strict_check) {
5329 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
5330 return -EINVAL;
5331 }
5332 }
5333 }
5334
5335 return 0;
5336}
5337
5338static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
5339{
5340 const struct nlmsghdr *nlh = cb->nlh;
5341 struct net *net = sock_net(skb->sk);
5342 struct net_device *dev;
5343 int idx = 0;
5344 u32 portid = NETLINK_CB(cb->skb).portid;
5345 u32 seq = nlh->nlmsg_seq;
5346 u32 filter_mask = 0;
5347 int err;
5348
5349 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
5350 cb->extack);
5351 if (err < 0 && cb->strict_check)
5352 return err;
5353
5354 rcu_read_lock();
5355 for_each_netdev_rcu(net, dev) {
5356 const struct net_device_ops *ops = dev->netdev_ops;
5357 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5358
5359 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
5360 if (idx >= cb->args[0]) {
5361 err = br_dev->netdev_ops->ndo_bridge_getlink(
5362 skb, portid, seq, dev,
5363 filter_mask, NLM_F_MULTI);
5364 if (err < 0 && err != -EOPNOTSUPP) {
5365 if (likely(skb->len))
5366 break;
5367
5368 goto out_err;
5369 }
5370 }
5371 idx++;
5372 }
5373
5374 if (ops->ndo_bridge_getlink) {
5375 if (idx >= cb->args[0]) {
5376 err = ops->ndo_bridge_getlink(skb, portid,
5377 seq, dev,
5378 filter_mask,
5379 NLM_F_MULTI);
5380 if (err < 0 && err != -EOPNOTSUPP) {
5381 if (likely(skb->len))
5382 break;
5383
5384 goto out_err;
5385 }
5386 }
5387 idx++;
5388 }
5389 }
5390 err = skb->len;
5391out_err:
5392 rcu_read_unlock();
5393 cb->args[0] = idx;
5394
5395 return err;
5396}
5397
5398static inline size_t bridge_nlmsg_size(void)
5399{
5400 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5401 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5402 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5403 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
5404 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5405 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5406 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5407 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5408 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5409 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5410 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5411}
5412
5413static int rtnl_bridge_notify(struct net_device *dev)
5414{
5415 struct net *net = dev_net(dev);
5416 struct sk_buff *skb;
5417 int err = -EOPNOTSUPP;
5418
5419 if (!dev->netdev_ops->ndo_bridge_getlink)
5420 return 0;
5421
5422 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5423 if (!skb) {
5424 err = -ENOMEM;
5425 goto errout;
5426 }
5427
5428 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5429 if (err < 0)
5430 goto errout;
5431
5432 /* Notification info is only filled for bridge ports, not the bridge
5433 * device itself. Therefore, a zero notification length is valid and
5434 * should not result in an error.
5435 */
5436 if (!skb->len)
5437 goto errout;
5438
5439 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5440 return 0;
5441errout:
5442 WARN_ON(err == -EMSGSIZE);
5443 kfree_skb(skb);
5444 if (err)
5445 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5446 return err;
5447}
5448
5449static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5450 struct netlink_ext_ack *extack)
5451{
5452 struct net *net = sock_net(skb->sk);
5453 struct ifinfomsg *ifm;
5454 struct net_device *dev;
5455 struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
5456 int rem, err = -EOPNOTSUPP;
5457 u16 flags = 0;
5458
5459 if (nlmsg_len(nlh) < sizeof(*ifm))
5460 return -EINVAL;
5461
5462 ifm = nlmsg_data(nlh);
5463 if (ifm->ifi_family != AF_BRIDGE)
5464 return -EPFNOSUPPORT;
5465
5466 dev = __dev_get_by_index(net, ifm->ifi_index);
5467 if (!dev) {
5468 NL_SET_ERR_MSG(extack, "unknown ifindex");
5469 return -ENODEV;
5470 }
5471
5472 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5473 if (br_spec) {
5474 nla_for_each_nested(attr, br_spec, rem) {
5475 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
5476 if (nla_len(attr) < sizeof(flags))
5477 return -EINVAL;
5478
5479 br_flags_attr = attr;
5480 flags = nla_get_u16(attr);
5481 }
5482
5483 if (nla_type(attr) == IFLA_BRIDGE_MODE) {
5484 if (nla_len(attr) < sizeof(u16))
5485 return -EINVAL;
5486 }
5487 }
5488 }
5489
5490 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5491 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5492
5493 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5494 err = -EOPNOTSUPP;
5495 goto out;
5496 }
5497
5498 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5499 extack);
5500 if (err)
5501 goto out;
5502
5503 flags &= ~BRIDGE_FLAGS_MASTER;
5504 }
5505
5506 if ((flags & BRIDGE_FLAGS_SELF)) {
5507 if (!dev->netdev_ops->ndo_bridge_setlink)
5508 err = -EOPNOTSUPP;
5509 else
5510 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5511 flags,
5512 extack);
5513 if (!err) {
5514 flags &= ~BRIDGE_FLAGS_SELF;
5515
5516 /* Generate event to notify upper layer of bridge
5517 * change
5518 */
5519 err = rtnl_bridge_notify(dev);
5520 }
5521 }
5522
5523 if (br_flags_attr)
5524 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
5525out:
5526 return err;
5527}
5528
5529static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5530 struct netlink_ext_ack *extack)
5531{
5532 struct net *net = sock_net(skb->sk);
5533 struct ifinfomsg *ifm;
5534 struct net_device *dev;
5535 struct nlattr *br_spec, *attr = NULL;
5536 int rem, err = -EOPNOTSUPP;
5537 u16 flags = 0;
5538 bool have_flags = false;
5539
5540 if (nlmsg_len(nlh) < sizeof(*ifm))
5541 return -EINVAL;
5542
5543 ifm = nlmsg_data(nlh);
5544 if (ifm->ifi_family != AF_BRIDGE)
5545 return -EPFNOSUPPORT;
5546
5547 dev = __dev_get_by_index(net, ifm->ifi_index);
5548 if (!dev) {
5549 NL_SET_ERR_MSG(extack, "unknown ifindex");
5550 return -ENODEV;
5551 }
5552
5553 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5554 if (br_spec) {
5555 nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec,
5556 rem) {
5557 if (nla_len(attr) < sizeof(flags))
5558 return -EINVAL;
5559
5560 have_flags = true;
5561 flags = nla_get_u16(attr);
5562 break;
5563 }
5564 }
5565
5566 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5567 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5568
5569 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5570 err = -EOPNOTSUPP;
5571 goto out;
5572 }
5573
5574 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5575 if (err)
5576 goto out;
5577
5578 flags &= ~BRIDGE_FLAGS_MASTER;
5579 }
5580
5581 if ((flags & BRIDGE_FLAGS_SELF)) {
5582 if (!dev->netdev_ops->ndo_bridge_dellink)
5583 err = -EOPNOTSUPP;
5584 else
5585 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5586 flags);
5587
5588 if (!err) {
5589 flags &= ~BRIDGE_FLAGS_SELF;
5590
5591 /* Generate event to notify upper layer of bridge
5592 * change
5593 */
5594 err = rtnl_bridge_notify(dev);
5595 }
5596 }
5597
5598 if (have_flags)
5599 memcpy(nla_data(attr), &flags, sizeof(flags));
5600out:
5601 return err;
5602}
5603
5604static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5605{
5606 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5607 (!idxattr || idxattr == attrid);
5608}
5609
5610static bool
5611rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5612{
5613 return dev->netdev_ops &&
5614 dev->netdev_ops->ndo_has_offload_stats &&
5615 dev->netdev_ops->ndo_get_offload_stats &&
5616 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5617}
5618
5619static unsigned int
5620rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5621{
5622 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5623 sizeof(struct rtnl_link_stats64) : 0;
5624}
5625
5626static int
5627rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5628 struct sk_buff *skb)
5629{
5630 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5631 struct nlattr *attr = NULL;
5632 void *attr_data;
5633 int err;
5634
5635 if (!size)
5636 return -ENODATA;
5637
5638 attr = nla_reserve_64bit(skb, attr_id, size,
5639 IFLA_OFFLOAD_XSTATS_UNSPEC);
5640 if (!attr)
5641 return -EMSGSIZE;
5642
5643 attr_data = nla_data(attr);
5644 memset(attr_data, 0, size);
5645
5646 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5647 if (err)
5648 return err;
5649
5650 return 0;
5651}
5652
5653static unsigned int
5654rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5655 enum netdev_offload_xstats_type type)
5656{
5657 bool enabled = netdev_offload_xstats_enabled(dev, type);
5658
5659 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5660}
5661
5662struct rtnl_offload_xstats_request_used {
5663 bool request;
5664 bool used;
5665};
5666
5667static int
5668rtnl_offload_xstats_get_stats(struct net_device *dev,
5669 enum netdev_offload_xstats_type type,
5670 struct rtnl_offload_xstats_request_used *ru,
5671 struct rtnl_hw_stats64 *stats,
5672 struct netlink_ext_ack *extack)
5673{
5674 bool request;
5675 bool used;
5676 int err;
5677
5678 request = netdev_offload_xstats_enabled(dev, type);
5679 if (!request) {
5680 used = false;
5681 goto out;
5682 }
5683
5684 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5685 if (err)
5686 return err;
5687
5688out:
5689 if (ru) {
5690 ru->request = request;
5691 ru->used = used;
5692 }
5693 return 0;
5694}
5695
5696static int
5697rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5698 struct rtnl_offload_xstats_request_used *ru)
5699{
5700 struct nlattr *nest;
5701
5702 nest = nla_nest_start(skb, attr_id);
5703 if (!nest)
5704 return -EMSGSIZE;
5705
5706 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5707 goto nla_put_failure;
5708
5709 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5710 goto nla_put_failure;
5711
5712 nla_nest_end(skb, nest);
5713 return 0;
5714
5715nla_put_failure:
5716 nla_nest_cancel(skb, nest);
5717 return -EMSGSIZE;
5718}
5719
5720static int
5721rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5722 struct netlink_ext_ack *extack)
5723{
5724 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5725 struct rtnl_offload_xstats_request_used ru_l3;
5726 struct nlattr *nest;
5727 int err;
5728
5729 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5730 if (err)
5731 return err;
5732
5733 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5734 if (!nest)
5735 return -EMSGSIZE;
5736
5737 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5738 IFLA_OFFLOAD_XSTATS_L3_STATS,
5739 &ru_l3))
5740 goto nla_put_failure;
5741
5742 nla_nest_end(skb, nest);
5743 return 0;
5744
5745nla_put_failure:
5746 nla_nest_cancel(skb, nest);
5747 return -EMSGSIZE;
5748}
5749
5750static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5751 int *prividx, u32 off_filter_mask,
5752 struct netlink_ext_ack *extack)
5753{
5754 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5755 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5756 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5757 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5758 bool have_data = false;
5759 int err;
5760
5761 if (*prividx <= attr_id_cpu_hit &&
5762 (off_filter_mask &
5763 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5764 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5765 if (!err) {
5766 have_data = true;
5767 } else if (err != -ENODATA) {
5768 *prividx = attr_id_cpu_hit;
5769 return err;
5770 }
5771 }
5772
5773 if (*prividx <= attr_id_hw_s_info &&
5774 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5775 *prividx = attr_id_hw_s_info;
5776
5777 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5778 if (err)
5779 return err;
5780
5781 have_data = true;
5782 *prividx = 0;
5783 }
5784
5785 if (*prividx <= attr_id_l3_stats &&
5786 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5787 unsigned int size_l3;
5788 struct nlattr *attr;
5789
5790 *prividx = attr_id_l3_stats;
5791
5792 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5793 if (!size_l3)
5794 goto skip_l3_stats;
5795 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5796 IFLA_OFFLOAD_XSTATS_UNSPEC);
5797 if (!attr)
5798 return -EMSGSIZE;
5799
5800 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5801 nla_data(attr), extack);
5802 if (err)
5803 return err;
5804
5805 have_data = true;
5806skip_l3_stats:
5807 *prividx = 0;
5808 }
5809
5810 if (!have_data)
5811 return -ENODATA;
5812
5813 *prividx = 0;
5814 return 0;
5815}
5816
5817static unsigned int
5818rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5819 enum netdev_offload_xstats_type type)
5820{
5821 return nla_total_size(0) +
5822 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5823 nla_total_size(sizeof(u8)) +
5824 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5825 nla_total_size(sizeof(u8)) +
5826 0;
5827}
5828
5829static unsigned int
5830rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5831{
5832 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5833
5834 return nla_total_size(0) +
5835 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5836 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5837 0;
5838}
5839
5840static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5841 u32 off_filter_mask)
5842{
5843 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5844 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5845 int nla_size = 0;
5846 int size;
5847
5848 if (off_filter_mask &
5849 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5850 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5851 nla_size += nla_total_size_64bit(size);
5852 }
5853
5854 if (off_filter_mask &
5855 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5856 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5857
5858 if (off_filter_mask &
5859 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5860 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5861 nla_size += nla_total_size_64bit(size);
5862 }
5863
5864 if (nla_size != 0)
5865 nla_size += nla_total_size(0);
5866
5867 return nla_size;
5868}
5869
5870struct rtnl_stats_dump_filters {
5871 /* mask[0] filters outer attributes. Then individual nests have their
5872 * filtering mask at the index of the nested attribute.
5873 */
5874 u32 mask[IFLA_STATS_MAX + 1];
5875};
5876
5877static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5878 int type, u32 pid, u32 seq, u32 change,
5879 unsigned int flags,
5880 const struct rtnl_stats_dump_filters *filters,
5881 int *idxattr, int *prividx,
5882 struct netlink_ext_ack *extack)
5883{
5884 unsigned int filter_mask = filters->mask[0];
5885 struct if_stats_msg *ifsm;
5886 struct nlmsghdr *nlh;
5887 struct nlattr *attr;
5888 int s_prividx = *prividx;
5889 int err;
5890
5891 ASSERT_RTNL();
5892
5893 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5894 if (!nlh)
5895 return -EMSGSIZE;
5896
5897 ifsm = nlmsg_data(nlh);
5898 ifsm->family = PF_UNSPEC;
5899 ifsm->pad1 = 0;
5900 ifsm->pad2 = 0;
5901 ifsm->ifindex = dev->ifindex;
5902 ifsm->filter_mask = filter_mask;
5903
5904 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5905 struct rtnl_link_stats64 *sp;
5906
5907 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5908 sizeof(struct rtnl_link_stats64),
5909 IFLA_STATS_UNSPEC);
5910 if (!attr) {
5911 err = -EMSGSIZE;
5912 goto nla_put_failure;
5913 }
5914
5915 sp = nla_data(attr);
5916 dev_get_stats(dev, sp);
5917 }
5918
5919 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5920 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5921
5922 if (ops && ops->fill_linkxstats) {
5923 *idxattr = IFLA_STATS_LINK_XSTATS;
5924 attr = nla_nest_start_noflag(skb,
5925 IFLA_STATS_LINK_XSTATS);
5926 if (!attr) {
5927 err = -EMSGSIZE;
5928 goto nla_put_failure;
5929 }
5930
5931 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5932 nla_nest_end(skb, attr);
5933 if (err)
5934 goto nla_put_failure;
5935 *idxattr = 0;
5936 }
5937 }
5938
5939 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5940 *idxattr)) {
5941 const struct rtnl_link_ops *ops = NULL;
5942 const struct net_device *master;
5943
5944 master = netdev_master_upper_dev_get(dev);
5945 if (master)
5946 ops = master->rtnl_link_ops;
5947 if (ops && ops->fill_linkxstats) {
5948 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5949 attr = nla_nest_start_noflag(skb,
5950 IFLA_STATS_LINK_XSTATS_SLAVE);
5951 if (!attr) {
5952 err = -EMSGSIZE;
5953 goto nla_put_failure;
5954 }
5955
5956 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5957 nla_nest_end(skb, attr);
5958 if (err)
5959 goto nla_put_failure;
5960 *idxattr = 0;
5961 }
5962 }
5963
5964 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5965 *idxattr)) {
5966 u32 off_filter_mask;
5967
5968 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5969 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5970 attr = nla_nest_start_noflag(skb,
5971 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5972 if (!attr) {
5973 err = -EMSGSIZE;
5974 goto nla_put_failure;
5975 }
5976
5977 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5978 off_filter_mask, extack);
5979 if (err == -ENODATA)
5980 nla_nest_cancel(skb, attr);
5981 else
5982 nla_nest_end(skb, attr);
5983
5984 if (err && err != -ENODATA)
5985 goto nla_put_failure;
5986 *idxattr = 0;
5987 }
5988
5989 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5990 struct rtnl_af_ops *af_ops;
5991
5992 *idxattr = IFLA_STATS_AF_SPEC;
5993 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5994 if (!attr) {
5995 err = -EMSGSIZE;
5996 goto nla_put_failure;
5997 }
5998
5999 rcu_read_lock();
6000 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
6001 if (af_ops->fill_stats_af) {
6002 struct nlattr *af;
6003
6004 af = nla_nest_start_noflag(skb,
6005 af_ops->family);
6006 if (!af) {
6007 rcu_read_unlock();
6008 err = -EMSGSIZE;
6009 goto nla_put_failure;
6010 }
6011 err = af_ops->fill_stats_af(skb, dev);
6012
6013 if (err == -ENODATA) {
6014 nla_nest_cancel(skb, af);
6015 } else if (err < 0) {
6016 rcu_read_unlock();
6017 goto nla_put_failure;
6018 }
6019
6020 nla_nest_end(skb, af);
6021 }
6022 }
6023 rcu_read_unlock();
6024
6025 nla_nest_end(skb, attr);
6026
6027 *idxattr = 0;
6028 }
6029
6030 nlmsg_end(skb, nlh);
6031
6032 return 0;
6033
6034nla_put_failure:
6035 /* not a multi message or no progress mean a real error */
6036 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
6037 nlmsg_cancel(skb, nlh);
6038 else
6039 nlmsg_end(skb, nlh);
6040
6041 return err;
6042}
6043
6044static size_t if_nlmsg_stats_size(const struct net_device *dev,
6045 const struct rtnl_stats_dump_filters *filters)
6046{
6047 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
6048 unsigned int filter_mask = filters->mask[0];
6049
6050 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
6051 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
6052
6053 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
6054 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
6055 int attr = IFLA_STATS_LINK_XSTATS;
6056
6057 if (ops && ops->get_linkxstats_size) {
6058 size += nla_total_size(ops->get_linkxstats_size(dev,
6059 attr));
6060 /* for IFLA_STATS_LINK_XSTATS */
6061 size += nla_total_size(0);
6062 }
6063 }
6064
6065 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
6066 struct net_device *_dev = (struct net_device *)dev;
6067 const struct rtnl_link_ops *ops = NULL;
6068 const struct net_device *master;
6069
6070 /* netdev_master_upper_dev_get can't take const */
6071 master = netdev_master_upper_dev_get(_dev);
6072 if (master)
6073 ops = master->rtnl_link_ops;
6074 if (ops && ops->get_linkxstats_size) {
6075 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
6076
6077 size += nla_total_size(ops->get_linkxstats_size(dev,
6078 attr));
6079 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
6080 size += nla_total_size(0);
6081 }
6082 }
6083
6084 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
6085 u32 off_filter_mask;
6086
6087 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
6088 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
6089 }
6090
6091 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
6092 struct rtnl_af_ops *af_ops;
6093
6094 /* for IFLA_STATS_AF_SPEC */
6095 size += nla_total_size(0);
6096
6097 rcu_read_lock();
6098 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
6099 if (af_ops->get_stats_af_size) {
6100 size += nla_total_size(
6101 af_ops->get_stats_af_size(dev));
6102
6103 /* for AF_* */
6104 size += nla_total_size(0);
6105 }
6106 }
6107 rcu_read_unlock();
6108 }
6109
6110 return size;
6111}
6112
6113#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
6114
6115static const struct nla_policy
6116rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
6117 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
6118 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
6119};
6120
6121static const struct nla_policy
6122rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
6123 [IFLA_STATS_GET_FILTERS] =
6124 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
6125};
6126
6127static const struct nla_policy
6128ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
6129 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
6130};
6131
6132static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
6133 struct rtnl_stats_dump_filters *filters,
6134 struct netlink_ext_ack *extack)
6135{
6136 struct nlattr *tb[IFLA_STATS_MAX + 1];
6137 int err;
6138 int at;
6139
6140 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
6141 rtnl_stats_get_policy_filters, extack);
6142 if (err < 0)
6143 return err;
6144
6145 for (at = 1; at <= IFLA_STATS_MAX; at++) {
6146 if (tb[at]) {
6147 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
6148 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
6149 return -EINVAL;
6150 }
6151 filters->mask[at] = nla_get_u32(tb[at]);
6152 }
6153 }
6154
6155 return 0;
6156}
6157
6158static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
6159 u32 filter_mask,
6160 struct rtnl_stats_dump_filters *filters,
6161 struct netlink_ext_ack *extack)
6162{
6163 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6164 int err;
6165 int i;
6166
6167 filters->mask[0] = filter_mask;
6168 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
6169 filters->mask[i] = -1U;
6170
6171 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
6172 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
6173 if (err < 0)
6174 return err;
6175
6176 if (tb[IFLA_STATS_GET_FILTERS]) {
6177 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
6178 filters, extack);
6179 if (err)
6180 return err;
6181 }
6182
6183 return 0;
6184}
6185
6186static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
6187 bool is_dump, struct netlink_ext_ack *extack)
6188{
6189 struct if_stats_msg *ifsm;
6190
6191 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
6192 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
6193 return -EINVAL;
6194 }
6195
6196 if (!strict_check)
6197 return 0;
6198
6199 ifsm = nlmsg_data(nlh);
6200
6201 /* only requests using strict checks can pass data to influence
6202 * the dump. The legacy exception is filter_mask.
6203 */
6204 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
6205 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
6206 return -EINVAL;
6207 }
6208 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
6209 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
6210 return -EINVAL;
6211 }
6212
6213 return 0;
6214}
6215
6216static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
6217 struct netlink_ext_ack *extack)
6218{
6219 struct rtnl_stats_dump_filters filters;
6220 struct net *net = sock_net(skb->sk);
6221 struct net_device *dev = NULL;
6222 int idxattr = 0, prividx = 0;
6223 struct if_stats_msg *ifsm;
6224 struct sk_buff *nskb;
6225 int err;
6226
6227 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6228 false, extack);
6229 if (err)
6230 return err;
6231
6232 ifsm = nlmsg_data(nlh);
6233 if (ifsm->ifindex > 0)
6234 dev = __dev_get_by_index(net, ifsm->ifindex);
6235 else
6236 return -EINVAL;
6237
6238 if (!dev)
6239 return -ENODEV;
6240
6241 if (!ifsm->filter_mask) {
6242 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
6243 return -EINVAL;
6244 }
6245
6246 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
6247 if (err)
6248 return err;
6249
6250 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
6251 if (!nskb)
6252 return -ENOBUFS;
6253
6254 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
6255 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
6256 0, &filters, &idxattr, &prividx, extack);
6257 if (err < 0) {
6258 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
6259 WARN_ON(err == -EMSGSIZE);
6260 kfree_skb(nskb);
6261 } else {
6262 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
6263 }
6264
6265 return err;
6266}
6267
6268static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
6269{
6270 struct netlink_ext_ack *extack = cb->extack;
6271 struct rtnl_stats_dump_filters filters;
6272 struct net *net = sock_net(skb->sk);
6273 unsigned int flags = NLM_F_MULTI;
6274 struct if_stats_msg *ifsm;
6275 struct {
6276 unsigned long ifindex;
6277 int idxattr;
6278 int prividx;
6279 } *ctx = (void *)cb->ctx;
6280 struct net_device *dev;
6281 int err;
6282
6283 cb->seq = net->dev_base_seq;
6284
6285 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
6286 if (err)
6287 return err;
6288
6289 ifsm = nlmsg_data(cb->nlh);
6290 if (!ifsm->filter_mask) {
6291 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
6292 return -EINVAL;
6293 }
6294
6295 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
6296 extack);
6297 if (err)
6298 return err;
6299
6300 for_each_netdev_dump(net, dev, ctx->ifindex) {
6301 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
6302 NETLINK_CB(cb->skb).portid,
6303 cb->nlh->nlmsg_seq, 0,
6304 flags, &filters,
6305 &ctx->idxattr, &ctx->prividx,
6306 extack);
6307 /* If we ran out of room on the first message,
6308 * we're in trouble.
6309 */
6310 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
6311
6312 if (err < 0)
6313 break;
6314 ctx->prividx = 0;
6315 ctx->idxattr = 0;
6316 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
6317 }
6318
6319 return err;
6320}
6321
6322void rtnl_offload_xstats_notify(struct net_device *dev)
6323{
6324 struct rtnl_stats_dump_filters response_filters = {};
6325 struct net *net = dev_net(dev);
6326 int idxattr = 0, prividx = 0;
6327 struct sk_buff *skb;
6328 int err = -ENOBUFS;
6329
6330 ASSERT_RTNL();
6331
6332 response_filters.mask[0] |=
6333 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6334 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6335 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6336
6337 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
6338 GFP_KERNEL);
6339 if (!skb)
6340 goto errout;
6341
6342 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
6343 &response_filters, &idxattr, &prividx, NULL);
6344 if (err < 0) {
6345 kfree_skb(skb);
6346 goto errout;
6347 }
6348
6349 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
6350 return;
6351
6352errout:
6353 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
6354}
6355EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6356
6357static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6358 struct netlink_ext_ack *extack)
6359{
6360 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
6361 struct rtnl_stats_dump_filters response_filters = {};
6362 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6363 struct net *net = sock_net(skb->sk);
6364 struct net_device *dev = NULL;
6365 struct if_stats_msg *ifsm;
6366 bool notify = false;
6367 int err;
6368
6369 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6370 false, extack);
6371 if (err)
6372 return err;
6373
6374 ifsm = nlmsg_data(nlh);
6375 if (ifsm->family != AF_UNSPEC) {
6376 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6377 return -EINVAL;
6378 }
6379
6380 if (ifsm->ifindex > 0)
6381 dev = __dev_get_by_index(net, ifsm->ifindex);
6382 else
6383 return -EINVAL;
6384
6385 if (!dev)
6386 return -ENODEV;
6387
6388 if (ifsm->filter_mask) {
6389 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6390 return -EINVAL;
6391 }
6392
6393 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6394 ifla_stats_set_policy, extack);
6395 if (err < 0)
6396 return err;
6397
6398 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6399 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6400
6401 if (req)
6402 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6403 else
6404 err = netdev_offload_xstats_disable(dev, t_l3);
6405
6406 if (!err)
6407 notify = true;
6408 else if (err != -EALREADY)
6409 return err;
6410
6411 response_filters.mask[0] |=
6412 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6413 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6414 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6415 }
6416
6417 if (notify)
6418 rtnl_offload_xstats_notify(dev);
6419
6420 return 0;
6421}
6422
6423static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
6424 struct netlink_ext_ack *extack)
6425{
6426 struct br_port_msg *bpm;
6427
6428 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
6429 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
6430 return -EINVAL;
6431 }
6432
6433 bpm = nlmsg_data(nlh);
6434 if (bpm->ifindex) {
6435 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
6436 return -EINVAL;
6437 }
6438 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
6439 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
6440 return -EINVAL;
6441 }
6442
6443 return 0;
6444}
6445
6446struct rtnl_mdb_dump_ctx {
6447 long idx;
6448};
6449
6450static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
6451{
6452 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
6453 struct net *net = sock_net(skb->sk);
6454 struct net_device *dev;
6455 int idx, s_idx;
6456 int err;
6457
6458 NL_ASSERT_CTX_FITS(struct rtnl_mdb_dump_ctx);
6459
6460 if (cb->strict_check) {
6461 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
6462 if (err)
6463 return err;
6464 }
6465
6466 s_idx = ctx->idx;
6467 idx = 0;
6468
6469 for_each_netdev(net, dev) {
6470 if (idx < s_idx)
6471 goto skip;
6472 if (!dev->netdev_ops->ndo_mdb_dump)
6473 goto skip;
6474
6475 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6476 if (err == -EMSGSIZE)
6477 goto out;
6478 /* Moving on to next device, reset markers and sequence
6479 * counters since they are all maintained per-device.
6480 */
6481 memset(cb->ctx, 0, sizeof(cb->ctx));
6482 cb->prev_seq = 0;
6483 cb->seq = 0;
6484skip:
6485 idx++;
6486 }
6487
6488out:
6489 ctx->idx = idx;
6490 return skb->len;
6491}
6492
6493static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
6494 struct netlink_ext_ack *extack)
6495{
6496 struct br_mdb_entry *entry = nla_data(attr);
6497
6498 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6499 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6500 return -EINVAL;
6501 }
6502
6503 if (entry->ifindex) {
6504 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
6505 return -EINVAL;
6506 }
6507
6508 if (entry->state) {
6509 NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
6510 return -EINVAL;
6511 }
6512
6513 if (entry->flags) {
6514 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
6515 return -EINVAL;
6516 }
6517
6518 if (entry->vid >= VLAN_VID_MASK) {
6519 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6520 return -EINVAL;
6521 }
6522
6523 if (entry->addr.proto != htons(ETH_P_IP) &&
6524 entry->addr.proto != htons(ETH_P_IPV6) &&
6525 entry->addr.proto != 0) {
6526 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6527 return -EINVAL;
6528 }
6529
6530 return 0;
6531}
6532
6533static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
6534 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6535 rtnl_validate_mdb_entry_get,
6536 sizeof(struct br_mdb_entry)),
6537 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6538};
6539
6540static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6541 struct netlink_ext_ack *extack)
6542{
6543 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
6544 struct net *net = sock_net(in_skb->sk);
6545 struct br_port_msg *bpm;
6546 struct net_device *dev;
6547 int err;
6548
6549 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
6550 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
6551 if (err)
6552 return err;
6553
6554 bpm = nlmsg_data(nlh);
6555 if (!bpm->ifindex) {
6556 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6557 return -EINVAL;
6558 }
6559
6560 dev = __dev_get_by_index(net, bpm->ifindex);
6561 if (!dev) {
6562 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6563 return -ENODEV;
6564 }
6565
6566 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
6567 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
6568 return -EINVAL;
6569 }
6570
6571 if (!dev->netdev_ops->ndo_mdb_get) {
6572 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6573 return -EOPNOTSUPP;
6574 }
6575
6576 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
6577 nlh->nlmsg_seq, extack);
6578}
6579
6580static int rtnl_validate_mdb_entry(const struct nlattr *attr,
6581 struct netlink_ext_ack *extack)
6582{
6583 struct br_mdb_entry *entry = nla_data(attr);
6584
6585 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6586 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6587 return -EINVAL;
6588 }
6589
6590 if (entry->ifindex == 0) {
6591 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
6592 return -EINVAL;
6593 }
6594
6595 if (entry->addr.proto == htons(ETH_P_IP)) {
6596 if (!ipv4_is_multicast(entry->addr.u.ip4) &&
6597 !ipv4_is_zeronet(entry->addr.u.ip4)) {
6598 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
6599 return -EINVAL;
6600 }
6601 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
6602 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
6603 return -EINVAL;
6604 }
6605#if IS_ENABLED(CONFIG_IPV6)
6606 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
6607 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
6608 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
6609 return -EINVAL;
6610 }
6611#endif
6612 } else if (entry->addr.proto == 0) {
6613 /* L2 mdb */
6614 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
6615 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
6616 return -EINVAL;
6617 }
6618 } else {
6619 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6620 return -EINVAL;
6621 }
6622
6623 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6624 NL_SET_ERR_MSG(extack, "Unknown entry state");
6625 return -EINVAL;
6626 }
6627 if (entry->vid >= VLAN_VID_MASK) {
6628 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6629 return -EINVAL;
6630 }
6631
6632 return 0;
6633}
6634
6635static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
6636 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
6637 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6638 rtnl_validate_mdb_entry,
6639 sizeof(struct br_mdb_entry)),
6640 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6641};
6642
6643static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
6644 struct netlink_ext_ack *extack)
6645{
6646 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6647 struct net *net = sock_net(skb->sk);
6648 struct br_port_msg *bpm;
6649 struct net_device *dev;
6650 int err;
6651
6652 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6653 MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6654 if (err)
6655 return err;
6656
6657 bpm = nlmsg_data(nlh);
6658 if (!bpm->ifindex) {
6659 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6660 return -EINVAL;
6661 }
6662
6663 dev = __dev_get_by_index(net, bpm->ifindex);
6664 if (!dev) {
6665 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6666 return -ENODEV;
6667 }
6668
6669 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6670 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6671 return -EINVAL;
6672 }
6673
6674 if (!dev->netdev_ops->ndo_mdb_add) {
6675 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6676 return -EOPNOTSUPP;
6677 }
6678
6679 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6680}
6681
6682static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
6683 struct netlink_ext_ack *extack)
6684{
6685 struct br_mdb_entry *entry = nla_data(attr);
6686 struct br_mdb_entry zero_entry = {};
6687
6688 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6689 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6690 return -EINVAL;
6691 }
6692
6693 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6694 NL_SET_ERR_MSG(extack, "Unknown entry state");
6695 return -EINVAL;
6696 }
6697
6698 if (entry->flags) {
6699 NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
6700 return -EINVAL;
6701 }
6702
6703 if (entry->vid >= VLAN_N_VID - 1) {
6704 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6705 return -EINVAL;
6706 }
6707
6708 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
6709 NL_SET_ERR_MSG(extack, "Entry address cannot be set");
6710 return -EINVAL;
6711 }
6712
6713 return 0;
6714}
6715
6716static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
6717 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6718 rtnl_validate_mdb_entry_del_bulk,
6719 sizeof(struct br_mdb_entry)),
6720 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6721};
6722
6723static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
6724 struct netlink_ext_ack *extack)
6725{
6726 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
6727 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6728 struct net *net = sock_net(skb->sk);
6729 struct br_port_msg *bpm;
6730 struct net_device *dev;
6731 int err;
6732
6733 if (!del_bulk)
6734 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6735 MDBA_SET_ENTRY_MAX, mdba_policy,
6736 extack);
6737 else
6738 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
6739 mdba_del_bulk_policy, extack);
6740 if (err)
6741 return err;
6742
6743 bpm = nlmsg_data(nlh);
6744 if (!bpm->ifindex) {
6745 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6746 return -EINVAL;
6747 }
6748
6749 dev = __dev_get_by_index(net, bpm->ifindex);
6750 if (!dev) {
6751 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6752 return -ENODEV;
6753 }
6754
6755 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6756 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6757 return -EINVAL;
6758 }
6759
6760 if (del_bulk) {
6761 if (!dev->netdev_ops->ndo_mdb_del_bulk) {
6762 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
6763 return -EOPNOTSUPP;
6764 }
6765 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
6766 }
6767
6768 if (!dev->netdev_ops->ndo_mdb_del) {
6769 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6770 return -EOPNOTSUPP;
6771 }
6772
6773 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6774}
6775
6776/* Process one rtnetlink message. */
6777
6778static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
6779{
6780 const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED);
6781 rtnl_dumpit_func dumpit = cb->data;
6782 int err;
6783
6784 /* Previous iteration have already finished, avoid calling->dumpit()
6785 * again, it may not expect to be called after it reached the end.
6786 */
6787 if (!dumpit)
6788 return 0;
6789
6790 if (needs_lock)
6791 rtnl_lock();
6792 err = dumpit(skb, cb);
6793 if (needs_lock)
6794 rtnl_unlock();
6795
6796 /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
6797 * Some applications which parse netlink manually depend on this.
6798 */
6799 if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
6800 if (err < 0 && err != -EMSGSIZE)
6801 return err;
6802 if (!err)
6803 cb->data = NULL;
6804
6805 return skb->len;
6806 }
6807 return err;
6808}
6809
6810static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
6811 const struct nlmsghdr *nlh,
6812 struct netlink_dump_control *control)
6813{
6814 if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE ||
6815 !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) {
6816 WARN_ON(control->data);
6817 control->data = control->dump;
6818 control->dump = rtnl_dumpit;
6819 }
6820
6821 return netlink_dump_start(ssk, skb, nlh, control);
6822}
6823
6824static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6825 struct netlink_ext_ack *extack)
6826{
6827 struct net *net = sock_net(skb->sk);
6828 struct rtnl_link *link;
6829 enum rtnl_kinds kind;
6830 struct module *owner;
6831 int err = -EOPNOTSUPP;
6832 rtnl_doit_func doit;
6833 unsigned int flags;
6834 int family;
6835 int type;
6836
6837 type = nlh->nlmsg_type;
6838 if (type > RTM_MAX)
6839 return -EOPNOTSUPP;
6840
6841 type -= RTM_BASE;
6842
6843 /* All the messages must have at least 1 byte length */
6844 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6845 return 0;
6846
6847 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6848 kind = rtnl_msgtype_kind(type);
6849
6850 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6851 return -EPERM;
6852
6853 rcu_read_lock();
6854 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6855 struct sock *rtnl;
6856 rtnl_dumpit_func dumpit;
6857 u32 min_dump_alloc = 0;
6858
6859 link = rtnl_get_link(family, type);
6860 if (!link || !link->dumpit) {
6861 family = PF_UNSPEC;
6862 link = rtnl_get_link(family, type);
6863 if (!link || !link->dumpit)
6864 goto err_unlock;
6865 }
6866 owner = link->owner;
6867 dumpit = link->dumpit;
6868 flags = link->flags;
6869
6870 if (type == RTM_GETLINK - RTM_BASE)
6871 min_dump_alloc = rtnl_calcit(skb, nlh);
6872
6873 err = 0;
6874 /* need to do this before rcu_read_unlock() */
6875 if (!try_module_get(owner))
6876 err = -EPROTONOSUPPORT;
6877
6878 rcu_read_unlock();
6879
6880 rtnl = net->rtnl;
6881 if (err == 0) {
6882 struct netlink_dump_control c = {
6883 .dump = dumpit,
6884 .min_dump_alloc = min_dump_alloc,
6885 .module = owner,
6886 .flags = flags,
6887 };
6888 err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
6889 /* netlink_dump_start() will keep a reference on
6890 * module if dump is still in progress.
6891 */
6892 module_put(owner);
6893 }
6894 return err;
6895 }
6896
6897 link = rtnl_get_link(family, type);
6898 if (!link || !link->doit) {
6899 family = PF_UNSPEC;
6900 link = rtnl_get_link(PF_UNSPEC, type);
6901 if (!link || !link->doit)
6902 goto out_unlock;
6903 }
6904
6905 owner = link->owner;
6906 if (!try_module_get(owner)) {
6907 err = -EPROTONOSUPPORT;
6908 goto out_unlock;
6909 }
6910
6911 flags = link->flags;
6912 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6913 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6914 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6915 module_put(owner);
6916 goto err_unlock;
6917 }
6918
6919 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6920 doit = link->doit;
6921 rcu_read_unlock();
6922 if (doit)
6923 err = doit(skb, nlh, extack);
6924 module_put(owner);
6925 return err;
6926 }
6927 rcu_read_unlock();
6928
6929 rtnl_lock();
6930 link = rtnl_get_link(family, type);
6931 if (link && link->doit)
6932 err = link->doit(skb, nlh, extack);
6933 rtnl_unlock();
6934
6935 module_put(owner);
6936
6937 return err;
6938
6939out_unlock:
6940 rcu_read_unlock();
6941 return err;
6942
6943err_unlock:
6944 rcu_read_unlock();
6945 return -EOPNOTSUPP;
6946}
6947
6948static void rtnetlink_rcv(struct sk_buff *skb)
6949{
6950 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6951}
6952
6953static int rtnetlink_bind(struct net *net, int group)
6954{
6955 switch (group) {
6956 case RTNLGRP_IPV4_MROUTE_R:
6957 case RTNLGRP_IPV6_MROUTE_R:
6958 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6959 return -EPERM;
6960 break;
6961 }
6962 return 0;
6963}
6964
6965static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6966{
6967 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6968
6969 switch (event) {
6970 case NETDEV_REBOOT:
6971 case NETDEV_CHANGEMTU:
6972 case NETDEV_CHANGEADDR:
6973 case NETDEV_CHANGENAME:
6974 case NETDEV_FEAT_CHANGE:
6975 case NETDEV_BONDING_FAILOVER:
6976 case NETDEV_POST_TYPE_CHANGE:
6977 case NETDEV_NOTIFY_PEERS:
6978 case NETDEV_CHANGEUPPER:
6979 case NETDEV_RESEND_IGMP:
6980 case NETDEV_CHANGEINFODATA:
6981 case NETDEV_CHANGELOWERSTATE:
6982 case NETDEV_CHANGE_TX_QUEUE_LEN:
6983 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6984 GFP_KERNEL, NULL, 0, 0, NULL);
6985 break;
6986 default:
6987 break;
6988 }
6989 return NOTIFY_DONE;
6990}
6991
6992static struct notifier_block rtnetlink_dev_notifier = {
6993 .notifier_call = rtnetlink_event,
6994};
6995
6996
6997static int __net_init rtnetlink_net_init(struct net *net)
6998{
6999 struct sock *sk;
7000 struct netlink_kernel_cfg cfg = {
7001 .groups = RTNLGRP_MAX,
7002 .input = rtnetlink_rcv,
7003 .flags = NL_CFG_F_NONROOT_RECV,
7004 .bind = rtnetlink_bind,
7005 };
7006
7007 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
7008 if (!sk)
7009 return -ENOMEM;
7010 net->rtnl = sk;
7011 return 0;
7012}
7013
7014static void __net_exit rtnetlink_net_exit(struct net *net)
7015{
7016 netlink_kernel_release(net->rtnl);
7017 net->rtnl = NULL;
7018}
7019
7020static struct pernet_operations rtnetlink_net_ops = {
7021 .init = rtnetlink_net_init,
7022 .exit = rtnetlink_net_exit,
7023};
7024
7025static const struct rtnl_msg_handler rtnetlink_rtnl_msg_handlers[] __initconst = {
7026 {.msgtype = RTM_NEWLINK, .doit = rtnl_newlink,
7027 .flags = RTNL_FLAG_DOIT_PERNET},
7028 {.msgtype = RTM_DELLINK, .doit = rtnl_dellink,
7029 .flags = RTNL_FLAG_DOIT_PERNET_WIP},
7030 {.msgtype = RTM_GETLINK, .doit = rtnl_getlink,
7031 .dumpit = rtnl_dump_ifinfo, .flags = RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
7032 {.msgtype = RTM_SETLINK, .doit = rtnl_setlink,
7033 .flags = RTNL_FLAG_DOIT_PERNET_WIP},
7034 {.msgtype = RTM_GETADDR, .dumpit = rtnl_dump_all},
7035 {.msgtype = RTM_GETROUTE, .dumpit = rtnl_dump_all},
7036 {.msgtype = RTM_GETNETCONF, .dumpit = rtnl_dump_all},
7037 {.msgtype = RTM_GETSTATS, .doit = rtnl_stats_get,
7038 .dumpit = rtnl_stats_dump},
7039 {.msgtype = RTM_SETSTATS, .doit = rtnl_stats_set},
7040 {.msgtype = RTM_NEWLINKPROP, .doit = rtnl_newlinkprop},
7041 {.msgtype = RTM_DELLINKPROP, .doit = rtnl_dellinkprop},
7042 {.protocol = PF_BRIDGE, .msgtype = RTM_GETLINK,
7043 .dumpit = rtnl_bridge_getlink},
7044 {.protocol = PF_BRIDGE, .msgtype = RTM_DELLINK,
7045 .doit = rtnl_bridge_dellink},
7046 {.protocol = PF_BRIDGE, .msgtype = RTM_SETLINK,
7047 .doit = rtnl_bridge_setlink},
7048 {.protocol = PF_BRIDGE, .msgtype = RTM_NEWNEIGH, .doit = rtnl_fdb_add},
7049 {.protocol = PF_BRIDGE, .msgtype = RTM_DELNEIGH, .doit = rtnl_fdb_del,
7050 .flags = RTNL_FLAG_BULK_DEL_SUPPORTED},
7051 {.protocol = PF_BRIDGE, .msgtype = RTM_GETNEIGH, .doit = rtnl_fdb_get,
7052 .dumpit = rtnl_fdb_dump},
7053 {.protocol = PF_BRIDGE, .msgtype = RTM_NEWMDB, .doit = rtnl_mdb_add},
7054 {.protocol = PF_BRIDGE, .msgtype = RTM_DELMDB, .doit = rtnl_mdb_del,
7055 .flags = RTNL_FLAG_BULK_DEL_SUPPORTED},
7056 {.protocol = PF_BRIDGE, .msgtype = RTM_GETMDB, .doit = rtnl_mdb_get,
7057 .dumpit = rtnl_mdb_dump},
7058};
7059
7060void __init rtnetlink_init(void)
7061{
7062 if (register_pernet_subsys(&rtnetlink_net_ops))
7063 panic("rtnetlink_init: cannot initialize rtnetlink\n");
7064
7065 register_netdevice_notifier(&rtnetlink_dev_notifier);
7066
7067 rtnl_register_many(rtnetlink_rtnl_msg_handlers);
7068}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
13 */
14
15#include <linux/bitops.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
21#include <linux/timer.h>
22#include <linux/string.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/fcntl.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/capability.h>
30#include <linux/skbuff.h>
31#include <linux/init.h>
32#include <linux/security.h>
33#include <linux/mutex.h>
34#include <linux/if_addr.h>
35#include <linux/if_bridge.h>
36#include <linux/if_vlan.h>
37#include <linux/pci.h>
38#include <linux/etherdevice.h>
39#include <linux/bpf.h>
40
41#include <linux/uaccess.h>
42
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/arp.h>
48#include <net/route.h>
49#include <net/udp.h>
50#include <net/tcp.h>
51#include <net/sock.h>
52#include <net/pkt_sched.h>
53#include <net/fib_rules.h>
54#include <net/rtnetlink.h>
55#include <net/net_namespace.h>
56#include <net/devlink.h>
57
58#include "dev.h"
59
60#define RTNL_MAX_TYPE 50
61#define RTNL_SLAVE_MAX_TYPE 40
62
63struct rtnl_link {
64 rtnl_doit_func doit;
65 rtnl_dumpit_func dumpit;
66 struct module *owner;
67 unsigned int flags;
68 struct rcu_head rcu;
69};
70
71static DEFINE_MUTEX(rtnl_mutex);
72
73void rtnl_lock(void)
74{
75 mutex_lock(&rtnl_mutex);
76}
77EXPORT_SYMBOL(rtnl_lock);
78
79int rtnl_lock_killable(void)
80{
81 return mutex_lock_killable(&rtnl_mutex);
82}
83EXPORT_SYMBOL(rtnl_lock_killable);
84
85static struct sk_buff *defer_kfree_skb_list;
86void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
87{
88 if (head && tail) {
89 tail->next = defer_kfree_skb_list;
90 defer_kfree_skb_list = head;
91 }
92}
93EXPORT_SYMBOL(rtnl_kfree_skbs);
94
95void __rtnl_unlock(void)
96{
97 struct sk_buff *head = defer_kfree_skb_list;
98
99 defer_kfree_skb_list = NULL;
100
101 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
102 * is used. In some places, e.g. in cfg80211, we have code that will do
103 * something like
104 * rtnl_lock()
105 * wiphy_lock()
106 * ...
107 * rtnl_unlock()
108 *
109 * and because netdev_run_todo() acquires the RTNL for items on the list
110 * we could cause a situation such as this:
111 * Thread 1 Thread 2
112 * rtnl_lock()
113 * unregister_netdevice()
114 * __rtnl_unlock()
115 * rtnl_lock()
116 * wiphy_lock()
117 * rtnl_unlock()
118 * netdev_run_todo()
119 * __rtnl_unlock()
120 *
121 * // list not empty now
122 * // because of thread 2
123 * rtnl_lock()
124 * while (!list_empty(...))
125 * rtnl_lock()
126 * wiphy_lock()
127 * **** DEADLOCK ****
128 *
129 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
130 * it's not used in cases where something is added to do the list.
131 */
132 WARN_ON(!list_empty(&net_todo_list));
133
134 mutex_unlock(&rtnl_mutex);
135
136 while (head) {
137 struct sk_buff *next = head->next;
138
139 kfree_skb(head);
140 cond_resched();
141 head = next;
142 }
143}
144
145void rtnl_unlock(void)
146{
147 /* This fellow will unlock it for us. */
148 netdev_run_todo();
149}
150EXPORT_SYMBOL(rtnl_unlock);
151
152int rtnl_trylock(void)
153{
154 return mutex_trylock(&rtnl_mutex);
155}
156EXPORT_SYMBOL(rtnl_trylock);
157
158int rtnl_is_locked(void)
159{
160 return mutex_is_locked(&rtnl_mutex);
161}
162EXPORT_SYMBOL(rtnl_is_locked);
163
164bool refcount_dec_and_rtnl_lock(refcount_t *r)
165{
166 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
167}
168EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
169
170#ifdef CONFIG_PROVE_LOCKING
171bool lockdep_rtnl_is_held(void)
172{
173 return lockdep_is_held(&rtnl_mutex);
174}
175EXPORT_SYMBOL(lockdep_rtnl_is_held);
176#endif /* #ifdef CONFIG_PROVE_LOCKING */
177
178static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
179
180static inline int rtm_msgindex(int msgtype)
181{
182 int msgindex = msgtype - RTM_BASE;
183
184 /*
185 * msgindex < 0 implies someone tried to register a netlink
186 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
187 * the message type has not been added to linux/rtnetlink.h
188 */
189 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
190
191 return msgindex;
192}
193
194static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
195{
196 struct rtnl_link __rcu **tab;
197
198 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
199 protocol = PF_UNSPEC;
200
201 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
202 if (!tab)
203 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
204
205 return rcu_dereference_rtnl(tab[msgtype]);
206}
207
208static int rtnl_register_internal(struct module *owner,
209 int protocol, int msgtype,
210 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
211 unsigned int flags)
212{
213 struct rtnl_link *link, *old;
214 struct rtnl_link __rcu **tab;
215 int msgindex;
216 int ret = -ENOBUFS;
217
218 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
219 msgindex = rtm_msgindex(msgtype);
220
221 rtnl_lock();
222 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
223 if (tab == NULL) {
224 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
225 if (!tab)
226 goto unlock;
227
228 /* ensures we see the 0 stores */
229 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
230 }
231
232 old = rtnl_dereference(tab[msgindex]);
233 if (old) {
234 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
235 if (!link)
236 goto unlock;
237 } else {
238 link = kzalloc(sizeof(*link), GFP_KERNEL);
239 if (!link)
240 goto unlock;
241 }
242
243 WARN_ON(link->owner && link->owner != owner);
244 link->owner = owner;
245
246 WARN_ON(doit && link->doit && link->doit != doit);
247 if (doit)
248 link->doit = doit;
249 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
250 if (dumpit)
251 link->dumpit = dumpit;
252
253 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
254 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
255 link->flags |= flags;
256
257 /* publish protocol:msgtype */
258 rcu_assign_pointer(tab[msgindex], link);
259 ret = 0;
260 if (old)
261 kfree_rcu(old, rcu);
262unlock:
263 rtnl_unlock();
264 return ret;
265}
266
267/**
268 * rtnl_register_module - Register a rtnetlink message type
269 *
270 * @owner: module registering the hook (THIS_MODULE)
271 * @protocol: Protocol family or PF_UNSPEC
272 * @msgtype: rtnetlink message type
273 * @doit: Function pointer called for each request message
274 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
275 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
276 *
277 * Like rtnl_register, but for use by removable modules.
278 */
279int rtnl_register_module(struct module *owner,
280 int protocol, int msgtype,
281 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
282 unsigned int flags)
283{
284 return rtnl_register_internal(owner, protocol, msgtype,
285 doit, dumpit, flags);
286}
287EXPORT_SYMBOL_GPL(rtnl_register_module);
288
289/**
290 * rtnl_register - Register a rtnetlink message type
291 * @protocol: Protocol family or PF_UNSPEC
292 * @msgtype: rtnetlink message type
293 * @doit: Function pointer called for each request message
294 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
295 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
296 *
297 * Registers the specified function pointers (at least one of them has
298 * to be non-NULL) to be called whenever a request message for the
299 * specified protocol family and message type is received.
300 *
301 * The special protocol family PF_UNSPEC may be used to define fallback
302 * function pointers for the case when no entry for the specific protocol
303 * family exists.
304 */
305void rtnl_register(int protocol, int msgtype,
306 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
307 unsigned int flags)
308{
309 int err;
310
311 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
312 flags);
313 if (err)
314 pr_err("Unable to register rtnetlink message handler, "
315 "protocol = %d, message type = %d\n", protocol, msgtype);
316}
317
318/**
319 * rtnl_unregister - Unregister a rtnetlink message type
320 * @protocol: Protocol family or PF_UNSPEC
321 * @msgtype: rtnetlink message type
322 *
323 * Returns 0 on success or a negative error code.
324 */
325int rtnl_unregister(int protocol, int msgtype)
326{
327 struct rtnl_link __rcu **tab;
328 struct rtnl_link *link;
329 int msgindex;
330
331 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
332 msgindex = rtm_msgindex(msgtype);
333
334 rtnl_lock();
335 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
336 if (!tab) {
337 rtnl_unlock();
338 return -ENOENT;
339 }
340
341 link = rtnl_dereference(tab[msgindex]);
342 RCU_INIT_POINTER(tab[msgindex], NULL);
343 rtnl_unlock();
344
345 kfree_rcu(link, rcu);
346
347 return 0;
348}
349EXPORT_SYMBOL_GPL(rtnl_unregister);
350
351/**
352 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
353 * @protocol : Protocol family or PF_UNSPEC
354 *
355 * Identical to calling rtnl_unregster() for all registered message types
356 * of a certain protocol family.
357 */
358void rtnl_unregister_all(int protocol)
359{
360 struct rtnl_link __rcu **tab;
361 struct rtnl_link *link;
362 int msgindex;
363
364 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
365
366 rtnl_lock();
367 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
368 if (!tab) {
369 rtnl_unlock();
370 return;
371 }
372 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
373 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
374 link = rtnl_dereference(tab[msgindex]);
375 if (!link)
376 continue;
377
378 RCU_INIT_POINTER(tab[msgindex], NULL);
379 kfree_rcu(link, rcu);
380 }
381 rtnl_unlock();
382
383 synchronize_net();
384
385 kfree(tab);
386}
387EXPORT_SYMBOL_GPL(rtnl_unregister_all);
388
389static LIST_HEAD(link_ops);
390
391static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
392{
393 const struct rtnl_link_ops *ops;
394
395 list_for_each_entry(ops, &link_ops, list) {
396 if (!strcmp(ops->kind, kind))
397 return ops;
398 }
399 return NULL;
400}
401
402/**
403 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
404 * @ops: struct rtnl_link_ops * to register
405 *
406 * The caller must hold the rtnl_mutex. This function should be used
407 * by drivers that create devices during module initialization. It
408 * must be called before registering the devices.
409 *
410 * Returns 0 on success or a negative error code.
411 */
412int __rtnl_link_register(struct rtnl_link_ops *ops)
413{
414 if (rtnl_link_ops_get(ops->kind))
415 return -EEXIST;
416
417 /* The check for alloc/setup is here because if ops
418 * does not have that filled up, it is not possible
419 * to use the ops for creating device. So do not
420 * fill up dellink as well. That disables rtnl_dellink.
421 */
422 if ((ops->alloc || ops->setup) && !ops->dellink)
423 ops->dellink = unregister_netdevice_queue;
424
425 list_add_tail(&ops->list, &link_ops);
426 return 0;
427}
428EXPORT_SYMBOL_GPL(__rtnl_link_register);
429
430/**
431 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
432 * @ops: struct rtnl_link_ops * to register
433 *
434 * Returns 0 on success or a negative error code.
435 */
436int rtnl_link_register(struct rtnl_link_ops *ops)
437{
438 int err;
439
440 /* Sanity-check max sizes to avoid stack buffer overflow. */
441 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
442 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
443 return -EINVAL;
444
445 rtnl_lock();
446 err = __rtnl_link_register(ops);
447 rtnl_unlock();
448 return err;
449}
450EXPORT_SYMBOL_GPL(rtnl_link_register);
451
452static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
453{
454 struct net_device *dev;
455 LIST_HEAD(list_kill);
456
457 for_each_netdev(net, dev) {
458 if (dev->rtnl_link_ops == ops)
459 ops->dellink(dev, &list_kill);
460 }
461 unregister_netdevice_many(&list_kill);
462}
463
464/**
465 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
466 * @ops: struct rtnl_link_ops * to unregister
467 *
468 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
469 * integrity (hold pernet_ops_rwsem for writing to close the race
470 * with setup_net() and cleanup_net()).
471 */
472void __rtnl_link_unregister(struct rtnl_link_ops *ops)
473{
474 struct net *net;
475
476 for_each_net(net) {
477 __rtnl_kill_links(net, ops);
478 }
479 list_del(&ops->list);
480}
481EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
482
483/* Return with the rtnl_lock held when there are no network
484 * devices unregistering in any network namespace.
485 */
486static void rtnl_lock_unregistering_all(void)
487{
488 struct net *net;
489 bool unregistering;
490 DEFINE_WAIT_FUNC(wait, woken_wake_function);
491
492 add_wait_queue(&netdev_unregistering_wq, &wait);
493 for (;;) {
494 unregistering = false;
495 rtnl_lock();
496 /* We held write locked pernet_ops_rwsem, and parallel
497 * setup_net() and cleanup_net() are not possible.
498 */
499 for_each_net(net) {
500 if (atomic_read(&net->dev_unreg_count) > 0) {
501 unregistering = true;
502 break;
503 }
504 }
505 if (!unregistering)
506 break;
507 __rtnl_unlock();
508
509 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
510 }
511 remove_wait_queue(&netdev_unregistering_wq, &wait);
512}
513
514/**
515 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
516 * @ops: struct rtnl_link_ops * to unregister
517 */
518void rtnl_link_unregister(struct rtnl_link_ops *ops)
519{
520 /* Close the race with setup_net() and cleanup_net() */
521 down_write(&pernet_ops_rwsem);
522 rtnl_lock_unregistering_all();
523 __rtnl_link_unregister(ops);
524 rtnl_unlock();
525 up_write(&pernet_ops_rwsem);
526}
527EXPORT_SYMBOL_GPL(rtnl_link_unregister);
528
529static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
530{
531 struct net_device *master_dev;
532 const struct rtnl_link_ops *ops;
533 size_t size = 0;
534
535 rcu_read_lock();
536
537 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
538 if (!master_dev)
539 goto out;
540
541 ops = master_dev->rtnl_link_ops;
542 if (!ops || !ops->get_slave_size)
543 goto out;
544 /* IFLA_INFO_SLAVE_DATA + nested data */
545 size = nla_total_size(sizeof(struct nlattr)) +
546 ops->get_slave_size(master_dev, dev);
547
548out:
549 rcu_read_unlock();
550 return size;
551}
552
553static size_t rtnl_link_get_size(const struct net_device *dev)
554{
555 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
556 size_t size;
557
558 if (!ops)
559 return 0;
560
561 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
562 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
563
564 if (ops->get_size)
565 /* IFLA_INFO_DATA + nested data */
566 size += nla_total_size(sizeof(struct nlattr)) +
567 ops->get_size(dev);
568
569 if (ops->get_xstats_size)
570 /* IFLA_INFO_XSTATS */
571 size += nla_total_size(ops->get_xstats_size(dev));
572
573 size += rtnl_link_get_slave_info_data_size(dev);
574
575 return size;
576}
577
578static LIST_HEAD(rtnl_af_ops);
579
580static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
581{
582 const struct rtnl_af_ops *ops;
583
584 ASSERT_RTNL();
585
586 list_for_each_entry(ops, &rtnl_af_ops, list) {
587 if (ops->family == family)
588 return ops;
589 }
590
591 return NULL;
592}
593
594/**
595 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
596 * @ops: struct rtnl_af_ops * to register
597 *
598 * Returns 0 on success or a negative error code.
599 */
600void rtnl_af_register(struct rtnl_af_ops *ops)
601{
602 rtnl_lock();
603 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
604 rtnl_unlock();
605}
606EXPORT_SYMBOL_GPL(rtnl_af_register);
607
608/**
609 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
610 * @ops: struct rtnl_af_ops * to unregister
611 */
612void rtnl_af_unregister(struct rtnl_af_ops *ops)
613{
614 rtnl_lock();
615 list_del_rcu(&ops->list);
616 rtnl_unlock();
617
618 synchronize_rcu();
619}
620EXPORT_SYMBOL_GPL(rtnl_af_unregister);
621
622static size_t rtnl_link_get_af_size(const struct net_device *dev,
623 u32 ext_filter_mask)
624{
625 struct rtnl_af_ops *af_ops;
626 size_t size;
627
628 /* IFLA_AF_SPEC */
629 size = nla_total_size(sizeof(struct nlattr));
630
631 rcu_read_lock();
632 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
633 if (af_ops->get_link_af_size) {
634 /* AF_* + nested data */
635 size += nla_total_size(sizeof(struct nlattr)) +
636 af_ops->get_link_af_size(dev, ext_filter_mask);
637 }
638 }
639 rcu_read_unlock();
640
641 return size;
642}
643
644static bool rtnl_have_link_slave_info(const struct net_device *dev)
645{
646 struct net_device *master_dev;
647 bool ret = false;
648
649 rcu_read_lock();
650
651 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
652 if (master_dev && master_dev->rtnl_link_ops)
653 ret = true;
654 rcu_read_unlock();
655 return ret;
656}
657
658static int rtnl_link_slave_info_fill(struct sk_buff *skb,
659 const struct net_device *dev)
660{
661 struct net_device *master_dev;
662 const struct rtnl_link_ops *ops;
663 struct nlattr *slave_data;
664 int err;
665
666 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
667 if (!master_dev)
668 return 0;
669 ops = master_dev->rtnl_link_ops;
670 if (!ops)
671 return 0;
672 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
673 return -EMSGSIZE;
674 if (ops->fill_slave_info) {
675 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
676 if (!slave_data)
677 return -EMSGSIZE;
678 err = ops->fill_slave_info(skb, master_dev, dev);
679 if (err < 0)
680 goto err_cancel_slave_data;
681 nla_nest_end(skb, slave_data);
682 }
683 return 0;
684
685err_cancel_slave_data:
686 nla_nest_cancel(skb, slave_data);
687 return err;
688}
689
690static int rtnl_link_info_fill(struct sk_buff *skb,
691 const struct net_device *dev)
692{
693 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
694 struct nlattr *data;
695 int err;
696
697 if (!ops)
698 return 0;
699 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
700 return -EMSGSIZE;
701 if (ops->fill_xstats) {
702 err = ops->fill_xstats(skb, dev);
703 if (err < 0)
704 return err;
705 }
706 if (ops->fill_info) {
707 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
708 if (data == NULL)
709 return -EMSGSIZE;
710 err = ops->fill_info(skb, dev);
711 if (err < 0)
712 goto err_cancel_data;
713 nla_nest_end(skb, data);
714 }
715 return 0;
716
717err_cancel_data:
718 nla_nest_cancel(skb, data);
719 return err;
720}
721
722static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
723{
724 struct nlattr *linkinfo;
725 int err = -EMSGSIZE;
726
727 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
728 if (linkinfo == NULL)
729 goto out;
730
731 err = rtnl_link_info_fill(skb, dev);
732 if (err < 0)
733 goto err_cancel_link;
734
735 err = rtnl_link_slave_info_fill(skb, dev);
736 if (err < 0)
737 goto err_cancel_link;
738
739 nla_nest_end(skb, linkinfo);
740 return 0;
741
742err_cancel_link:
743 nla_nest_cancel(skb, linkinfo);
744out:
745 return err;
746}
747
748int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
749{
750 struct sock *rtnl = net->rtnl;
751
752 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
753}
754
755int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
756{
757 struct sock *rtnl = net->rtnl;
758
759 return nlmsg_unicast(rtnl, skb, pid);
760}
761EXPORT_SYMBOL(rtnl_unicast);
762
763void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
764 const struct nlmsghdr *nlh, gfp_t flags)
765{
766 struct sock *rtnl = net->rtnl;
767
768 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
769}
770EXPORT_SYMBOL(rtnl_notify);
771
772void rtnl_set_sk_err(struct net *net, u32 group, int error)
773{
774 struct sock *rtnl = net->rtnl;
775
776 netlink_set_err(rtnl, 0, group, error);
777}
778EXPORT_SYMBOL(rtnl_set_sk_err);
779
780int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
781{
782 struct nlattr *mx;
783 int i, valid = 0;
784
785 /* nothing is dumped for dst_default_metrics, so just skip the loop */
786 if (metrics == dst_default_metrics.metrics)
787 return 0;
788
789 mx = nla_nest_start_noflag(skb, RTA_METRICS);
790 if (mx == NULL)
791 return -ENOBUFS;
792
793 for (i = 0; i < RTAX_MAX; i++) {
794 if (metrics[i]) {
795 if (i == RTAX_CC_ALGO - 1) {
796 char tmp[TCP_CA_NAME_MAX], *name;
797
798 name = tcp_ca_get_name_by_key(metrics[i], tmp);
799 if (!name)
800 continue;
801 if (nla_put_string(skb, i + 1, name))
802 goto nla_put_failure;
803 } else if (i == RTAX_FEATURES - 1) {
804 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
805
806 if (!user_features)
807 continue;
808 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
809 if (nla_put_u32(skb, i + 1, user_features))
810 goto nla_put_failure;
811 } else {
812 if (nla_put_u32(skb, i + 1, metrics[i]))
813 goto nla_put_failure;
814 }
815 valid++;
816 }
817 }
818
819 if (!valid) {
820 nla_nest_cancel(skb, mx);
821 return 0;
822 }
823
824 return nla_nest_end(skb, mx);
825
826nla_put_failure:
827 nla_nest_cancel(skb, mx);
828 return -EMSGSIZE;
829}
830EXPORT_SYMBOL(rtnetlink_put_metrics);
831
832int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
833 long expires, u32 error)
834{
835 struct rta_cacheinfo ci = {
836 .rta_error = error,
837 .rta_id = id,
838 };
839
840 if (dst) {
841 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
842 ci.rta_used = dst->__use;
843 ci.rta_clntref = atomic_read(&dst->__refcnt);
844 }
845 if (expires) {
846 unsigned long clock;
847
848 clock = jiffies_to_clock_t(abs(expires));
849 clock = min_t(unsigned long, clock, INT_MAX);
850 ci.rta_expires = (expires > 0) ? clock : -clock;
851 }
852 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
853}
854EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
855
856static void set_operstate(struct net_device *dev, unsigned char transition)
857{
858 unsigned char operstate = dev->operstate;
859
860 switch (transition) {
861 case IF_OPER_UP:
862 if ((operstate == IF_OPER_DORMANT ||
863 operstate == IF_OPER_TESTING ||
864 operstate == IF_OPER_UNKNOWN) &&
865 !netif_dormant(dev) && !netif_testing(dev))
866 operstate = IF_OPER_UP;
867 break;
868
869 case IF_OPER_TESTING:
870 if (netif_oper_up(dev))
871 operstate = IF_OPER_TESTING;
872 break;
873
874 case IF_OPER_DORMANT:
875 if (netif_oper_up(dev))
876 operstate = IF_OPER_DORMANT;
877 break;
878 }
879
880 if (dev->operstate != operstate) {
881 write_lock(&dev_base_lock);
882 dev->operstate = operstate;
883 write_unlock(&dev_base_lock);
884 netdev_state_change(dev);
885 }
886}
887
888static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
889{
890 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
891 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
892}
893
894static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
895 const struct ifinfomsg *ifm)
896{
897 unsigned int flags = ifm->ifi_flags;
898
899 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
900 if (ifm->ifi_change)
901 flags = (flags & ifm->ifi_change) |
902 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
903
904 return flags;
905}
906
907static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
908 const struct rtnl_link_stats64 *b)
909{
910 a->rx_packets = b->rx_packets;
911 a->tx_packets = b->tx_packets;
912 a->rx_bytes = b->rx_bytes;
913 a->tx_bytes = b->tx_bytes;
914 a->rx_errors = b->rx_errors;
915 a->tx_errors = b->tx_errors;
916 a->rx_dropped = b->rx_dropped;
917 a->tx_dropped = b->tx_dropped;
918
919 a->multicast = b->multicast;
920 a->collisions = b->collisions;
921
922 a->rx_length_errors = b->rx_length_errors;
923 a->rx_over_errors = b->rx_over_errors;
924 a->rx_crc_errors = b->rx_crc_errors;
925 a->rx_frame_errors = b->rx_frame_errors;
926 a->rx_fifo_errors = b->rx_fifo_errors;
927 a->rx_missed_errors = b->rx_missed_errors;
928
929 a->tx_aborted_errors = b->tx_aborted_errors;
930 a->tx_carrier_errors = b->tx_carrier_errors;
931 a->tx_fifo_errors = b->tx_fifo_errors;
932 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
933 a->tx_window_errors = b->tx_window_errors;
934
935 a->rx_compressed = b->rx_compressed;
936 a->tx_compressed = b->tx_compressed;
937
938 a->rx_nohandler = b->rx_nohandler;
939}
940
941/* All VF info */
942static inline int rtnl_vfinfo_size(const struct net_device *dev,
943 u32 ext_filter_mask)
944{
945 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
946 int num_vfs = dev_num_vf(dev->dev.parent);
947 size_t size = nla_total_size(0);
948 size += num_vfs *
949 (nla_total_size(0) +
950 nla_total_size(sizeof(struct ifla_vf_mac)) +
951 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
952 nla_total_size(sizeof(struct ifla_vf_vlan)) +
953 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
954 nla_total_size(MAX_VLAN_LIST_LEN *
955 sizeof(struct ifla_vf_vlan_info)) +
956 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
957 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
958 nla_total_size(sizeof(struct ifla_vf_rate)) +
959 nla_total_size(sizeof(struct ifla_vf_link_state)) +
960 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
961 nla_total_size(0) + /* nest IFLA_VF_STATS */
962 /* IFLA_VF_STATS_RX_PACKETS */
963 nla_total_size_64bit(sizeof(__u64)) +
964 /* IFLA_VF_STATS_TX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_RX_BYTES */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_TX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_BROADCAST */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_MULTICAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_RX_DROPPED */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_TX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)) +
978 nla_total_size(sizeof(struct ifla_vf_trust)));
979 return size;
980 } else
981 return 0;
982}
983
984static size_t rtnl_port_size(const struct net_device *dev,
985 u32 ext_filter_mask)
986{
987 size_t port_size = nla_total_size(4) /* PORT_VF */
988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
991 + nla_total_size(1) /* PROT_VDP_REQUEST */
992 + nla_total_size(2); /* PORT_VDP_RESPONSE */
993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
995 + port_size;
996 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
997 + port_size;
998
999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000 !(ext_filter_mask & RTEXT_FILTER_VF))
1001 return 0;
1002 if (dev_num_vf(dev->dev.parent))
1003 return port_self_size + vf_ports_size +
1004 vf_port_size * dev_num_vf(dev->dev.parent);
1005 else
1006 return port_self_size;
1007}
1008
1009static size_t rtnl_xdp_size(void)
1010{
1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1012 nla_total_size(1) + /* XDP_ATTACHED */
1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1015
1016 return xdp_size;
1017}
1018
1019static size_t rtnl_prop_list_size(const struct net_device *dev)
1020{
1021 struct netdev_name_node *name_node;
1022 size_t size;
1023
1024 if (list_empty(&dev->name_node->list))
1025 return 0;
1026 size = nla_total_size(0);
1027 list_for_each_entry(name_node, &dev->name_node->list, list)
1028 size += nla_total_size(ALTIFNAMSIZ);
1029 return size;
1030}
1031
1032static size_t rtnl_proto_down_size(const struct net_device *dev)
1033{
1034 size_t size = nla_total_size(1);
1035
1036 if (dev->proto_down_reason)
1037 size += nla_total_size(0) + nla_total_size(4);
1038
1039 return size;
1040}
1041
1042static size_t rtnl_devlink_port_size(const struct net_device *dev)
1043{
1044 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1045
1046 if (dev->devlink_port)
1047 size += devlink_nl_port_handle_size(dev->devlink_port);
1048
1049 return size;
1050}
1051
1052static noinline size_t if_nlmsg_size(const struct net_device *dev,
1053 u32 ext_filter_mask)
1054{
1055 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1056 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1057 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1058 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1059 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1060 + nla_total_size(sizeof(struct rtnl_link_stats))
1061 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1062 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1063 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1064 + nla_total_size(4) /* IFLA_TXQLEN */
1065 + nla_total_size(4) /* IFLA_WEIGHT */
1066 + nla_total_size(4) /* IFLA_MTU */
1067 + nla_total_size(4) /* IFLA_LINK */
1068 + nla_total_size(4) /* IFLA_MASTER */
1069 + nla_total_size(1) /* IFLA_CARRIER */
1070 + nla_total_size(4) /* IFLA_PROMISCUITY */
1071 + nla_total_size(4) /* IFLA_ALLMULTI */
1072 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1073 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1074 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1075 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1076 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1077 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1078 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1079 + nla_total_size(1) /* IFLA_OPERSTATE */
1080 + nla_total_size(1) /* IFLA_LINKMODE */
1081 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1082 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1083 + nla_total_size(4) /* IFLA_GROUP */
1084 + nla_total_size(ext_filter_mask
1085 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1086 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1087 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1088 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1089 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1090 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1091 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1092 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1093 + rtnl_xdp_size() /* IFLA_XDP */
1094 + nla_total_size(4) /* IFLA_EVENT */
1095 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1096 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1097 + rtnl_proto_down_size(dev) /* proto down */
1098 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1099 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1100 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1101 + nla_total_size(4) /* IFLA_MIN_MTU */
1102 + nla_total_size(4) /* IFLA_MAX_MTU */
1103 + rtnl_prop_list_size(dev)
1104 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1105 + rtnl_devlink_port_size(dev)
1106 + 0;
1107}
1108
1109static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1110{
1111 struct nlattr *vf_ports;
1112 struct nlattr *vf_port;
1113 int vf;
1114 int err;
1115
1116 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1117 if (!vf_ports)
1118 return -EMSGSIZE;
1119
1120 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1121 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1122 if (!vf_port)
1123 goto nla_put_failure;
1124 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1125 goto nla_put_failure;
1126 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1127 if (err == -EMSGSIZE)
1128 goto nla_put_failure;
1129 if (err) {
1130 nla_nest_cancel(skb, vf_port);
1131 continue;
1132 }
1133 nla_nest_end(skb, vf_port);
1134 }
1135
1136 nla_nest_end(skb, vf_ports);
1137
1138 return 0;
1139
1140nla_put_failure:
1141 nla_nest_cancel(skb, vf_ports);
1142 return -EMSGSIZE;
1143}
1144
1145static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1146{
1147 struct nlattr *port_self;
1148 int err;
1149
1150 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1151 if (!port_self)
1152 return -EMSGSIZE;
1153
1154 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1155 if (err) {
1156 nla_nest_cancel(skb, port_self);
1157 return (err == -EMSGSIZE) ? err : 0;
1158 }
1159
1160 nla_nest_end(skb, port_self);
1161
1162 return 0;
1163}
1164
1165static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1166 u32 ext_filter_mask)
1167{
1168 int err;
1169
1170 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1171 !(ext_filter_mask & RTEXT_FILTER_VF))
1172 return 0;
1173
1174 err = rtnl_port_self_fill(skb, dev);
1175 if (err)
1176 return err;
1177
1178 if (dev_num_vf(dev->dev.parent)) {
1179 err = rtnl_vf_ports_fill(skb, dev);
1180 if (err)
1181 return err;
1182 }
1183
1184 return 0;
1185}
1186
1187static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1188{
1189 int err;
1190 struct netdev_phys_item_id ppid;
1191
1192 err = dev_get_phys_port_id(dev, &ppid);
1193 if (err) {
1194 if (err == -EOPNOTSUPP)
1195 return 0;
1196 return err;
1197 }
1198
1199 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1200 return -EMSGSIZE;
1201
1202 return 0;
1203}
1204
1205static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1206{
1207 char name[IFNAMSIZ];
1208 int err;
1209
1210 err = dev_get_phys_port_name(dev, name, sizeof(name));
1211 if (err) {
1212 if (err == -EOPNOTSUPP)
1213 return 0;
1214 return err;
1215 }
1216
1217 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1218 return -EMSGSIZE;
1219
1220 return 0;
1221}
1222
1223static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1224{
1225 struct netdev_phys_item_id ppid = { };
1226 int err;
1227
1228 err = dev_get_port_parent_id(dev, &ppid, false);
1229 if (err) {
1230 if (err == -EOPNOTSUPP)
1231 return 0;
1232 return err;
1233 }
1234
1235 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1236 return -EMSGSIZE;
1237
1238 return 0;
1239}
1240
1241static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1242 struct net_device *dev)
1243{
1244 struct rtnl_link_stats64 *sp;
1245 struct nlattr *attr;
1246
1247 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1248 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1249 if (!attr)
1250 return -EMSGSIZE;
1251
1252 sp = nla_data(attr);
1253 dev_get_stats(dev, sp);
1254
1255 attr = nla_reserve(skb, IFLA_STATS,
1256 sizeof(struct rtnl_link_stats));
1257 if (!attr)
1258 return -EMSGSIZE;
1259
1260 copy_rtnl_link_stats(nla_data(attr), sp);
1261
1262 return 0;
1263}
1264
1265static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1266 struct net_device *dev,
1267 int vfs_num,
1268 struct nlattr *vfinfo)
1269{
1270 struct ifla_vf_rss_query_en vf_rss_query_en;
1271 struct nlattr *vf, *vfstats, *vfvlanlist;
1272 struct ifla_vf_link_state vf_linkstate;
1273 struct ifla_vf_vlan_info vf_vlan_info;
1274 struct ifla_vf_spoofchk vf_spoofchk;
1275 struct ifla_vf_tx_rate vf_tx_rate;
1276 struct ifla_vf_stats vf_stats;
1277 struct ifla_vf_trust vf_trust;
1278 struct ifla_vf_vlan vf_vlan;
1279 struct ifla_vf_rate vf_rate;
1280 struct ifla_vf_mac vf_mac;
1281 struct ifla_vf_broadcast vf_broadcast;
1282 struct ifla_vf_info ivi;
1283 struct ifla_vf_guid node_guid;
1284 struct ifla_vf_guid port_guid;
1285
1286 memset(&ivi, 0, sizeof(ivi));
1287
1288 /* Not all SR-IOV capable drivers support the
1289 * spoofcheck and "RSS query enable" query. Preset to
1290 * -1 so the user space tool can detect that the driver
1291 * didn't report anything.
1292 */
1293 ivi.spoofchk = -1;
1294 ivi.rss_query_en = -1;
1295 ivi.trusted = -1;
1296 /* The default value for VF link state is "auto"
1297 * IFLA_VF_LINK_STATE_AUTO which equals zero
1298 */
1299 ivi.linkstate = 0;
1300 /* VLAN Protocol by default is 802.1Q */
1301 ivi.vlan_proto = htons(ETH_P_8021Q);
1302 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1303 return 0;
1304
1305 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1306 memset(&node_guid, 0, sizeof(node_guid));
1307 memset(&port_guid, 0, sizeof(port_guid));
1308
1309 vf_mac.vf =
1310 vf_vlan.vf =
1311 vf_vlan_info.vf =
1312 vf_rate.vf =
1313 vf_tx_rate.vf =
1314 vf_spoofchk.vf =
1315 vf_linkstate.vf =
1316 vf_rss_query_en.vf =
1317 vf_trust.vf =
1318 node_guid.vf =
1319 port_guid.vf = ivi.vf;
1320
1321 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1322 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1323 vf_vlan.vlan = ivi.vlan;
1324 vf_vlan.qos = ivi.qos;
1325 vf_vlan_info.vlan = ivi.vlan;
1326 vf_vlan_info.qos = ivi.qos;
1327 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1328 vf_tx_rate.rate = ivi.max_tx_rate;
1329 vf_rate.min_tx_rate = ivi.min_tx_rate;
1330 vf_rate.max_tx_rate = ivi.max_tx_rate;
1331 vf_spoofchk.setting = ivi.spoofchk;
1332 vf_linkstate.link_state = ivi.linkstate;
1333 vf_rss_query_en.setting = ivi.rss_query_en;
1334 vf_trust.setting = ivi.trusted;
1335 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1336 if (!vf)
1337 goto nla_put_vfinfo_failure;
1338 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1339 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1340 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1341 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1342 &vf_rate) ||
1343 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1344 &vf_tx_rate) ||
1345 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1346 &vf_spoofchk) ||
1347 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1348 &vf_linkstate) ||
1349 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1350 sizeof(vf_rss_query_en),
1351 &vf_rss_query_en) ||
1352 nla_put(skb, IFLA_VF_TRUST,
1353 sizeof(vf_trust), &vf_trust))
1354 goto nla_put_vf_failure;
1355
1356 if (dev->netdev_ops->ndo_get_vf_guid &&
1357 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1358 &port_guid)) {
1359 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1360 &node_guid) ||
1361 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1362 &port_guid))
1363 goto nla_put_vf_failure;
1364 }
1365 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1366 if (!vfvlanlist)
1367 goto nla_put_vf_failure;
1368 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1369 &vf_vlan_info)) {
1370 nla_nest_cancel(skb, vfvlanlist);
1371 goto nla_put_vf_failure;
1372 }
1373 nla_nest_end(skb, vfvlanlist);
1374 memset(&vf_stats, 0, sizeof(vf_stats));
1375 if (dev->netdev_ops->ndo_get_vf_stats)
1376 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1377 &vf_stats);
1378 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1379 if (!vfstats)
1380 goto nla_put_vf_failure;
1381 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1382 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1383 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1384 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1385 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1386 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1387 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1388 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1389 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1390 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1391 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1392 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1393 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1394 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1395 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1396 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1397 nla_nest_cancel(skb, vfstats);
1398 goto nla_put_vf_failure;
1399 }
1400 nla_nest_end(skb, vfstats);
1401 nla_nest_end(skb, vf);
1402 return 0;
1403
1404nla_put_vf_failure:
1405 nla_nest_cancel(skb, vf);
1406nla_put_vfinfo_failure:
1407 nla_nest_cancel(skb, vfinfo);
1408 return -EMSGSIZE;
1409}
1410
1411static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1412 struct net_device *dev,
1413 u32 ext_filter_mask)
1414{
1415 struct nlattr *vfinfo;
1416 int i, num_vfs;
1417
1418 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1419 return 0;
1420
1421 num_vfs = dev_num_vf(dev->dev.parent);
1422 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1423 return -EMSGSIZE;
1424
1425 if (!dev->netdev_ops->ndo_get_vf_config)
1426 return 0;
1427
1428 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1429 if (!vfinfo)
1430 return -EMSGSIZE;
1431
1432 for (i = 0; i < num_vfs; i++) {
1433 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1434 return -EMSGSIZE;
1435 }
1436
1437 nla_nest_end(skb, vfinfo);
1438 return 0;
1439}
1440
1441static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1442{
1443 struct rtnl_link_ifmap map;
1444
1445 memset(&map, 0, sizeof(map));
1446 map.mem_start = dev->mem_start;
1447 map.mem_end = dev->mem_end;
1448 map.base_addr = dev->base_addr;
1449 map.irq = dev->irq;
1450 map.dma = dev->dma;
1451 map.port = dev->if_port;
1452
1453 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1454 return -EMSGSIZE;
1455
1456 return 0;
1457}
1458
1459static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1460{
1461 const struct bpf_prog *generic_xdp_prog;
1462
1463 ASSERT_RTNL();
1464
1465 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1466 if (!generic_xdp_prog)
1467 return 0;
1468 return generic_xdp_prog->aux->id;
1469}
1470
1471static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1472{
1473 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1474}
1475
1476static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1477{
1478 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1479}
1480
1481static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1482 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1483 u32 (*get_prog_id)(struct net_device *dev))
1484{
1485 u32 curr_id;
1486 int err;
1487
1488 curr_id = get_prog_id(dev);
1489 if (!curr_id)
1490 return 0;
1491
1492 *prog_id = curr_id;
1493 err = nla_put_u32(skb, attr, curr_id);
1494 if (err)
1495 return err;
1496
1497 if (*mode != XDP_ATTACHED_NONE)
1498 *mode = XDP_ATTACHED_MULTI;
1499 else
1500 *mode = tgt_mode;
1501
1502 return 0;
1503}
1504
1505static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1506{
1507 struct nlattr *xdp;
1508 u32 prog_id;
1509 int err;
1510 u8 mode;
1511
1512 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1513 if (!xdp)
1514 return -EMSGSIZE;
1515
1516 prog_id = 0;
1517 mode = XDP_ATTACHED_NONE;
1518 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1519 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1520 if (err)
1521 goto err_cancel;
1522 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1523 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1524 if (err)
1525 goto err_cancel;
1526 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1527 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1528 if (err)
1529 goto err_cancel;
1530
1531 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1532 if (err)
1533 goto err_cancel;
1534
1535 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1536 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1537 if (err)
1538 goto err_cancel;
1539 }
1540
1541 nla_nest_end(skb, xdp);
1542 return 0;
1543
1544err_cancel:
1545 nla_nest_cancel(skb, xdp);
1546 return err;
1547}
1548
1549static u32 rtnl_get_event(unsigned long event)
1550{
1551 u32 rtnl_event_type = IFLA_EVENT_NONE;
1552
1553 switch (event) {
1554 case NETDEV_REBOOT:
1555 rtnl_event_type = IFLA_EVENT_REBOOT;
1556 break;
1557 case NETDEV_FEAT_CHANGE:
1558 rtnl_event_type = IFLA_EVENT_FEATURES;
1559 break;
1560 case NETDEV_BONDING_FAILOVER:
1561 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1562 break;
1563 case NETDEV_NOTIFY_PEERS:
1564 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1565 break;
1566 case NETDEV_RESEND_IGMP:
1567 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1568 break;
1569 case NETDEV_CHANGEINFODATA:
1570 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1571 break;
1572 default:
1573 break;
1574 }
1575
1576 return rtnl_event_type;
1577}
1578
1579static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1580{
1581 const struct net_device *upper_dev;
1582 int ret = 0;
1583
1584 rcu_read_lock();
1585
1586 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1587 if (upper_dev)
1588 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1589
1590 rcu_read_unlock();
1591 return ret;
1592}
1593
1594static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1595 bool force)
1596{
1597 int ifindex = dev_get_iflink(dev);
1598
1599 if (force || dev->ifindex != ifindex)
1600 return nla_put_u32(skb, IFLA_LINK, ifindex);
1601
1602 return 0;
1603}
1604
1605static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1606 struct net_device *dev)
1607{
1608 char buf[IFALIASZ];
1609 int ret;
1610
1611 ret = dev_get_alias(dev, buf, sizeof(buf));
1612 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1613}
1614
1615static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1616 const struct net_device *dev,
1617 struct net *src_net, gfp_t gfp)
1618{
1619 bool put_iflink = false;
1620
1621 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1622 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1623
1624 if (!net_eq(dev_net(dev), link_net)) {
1625 int id = peernet2id_alloc(src_net, link_net, gfp);
1626
1627 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1628 return -EMSGSIZE;
1629
1630 put_iflink = true;
1631 }
1632 }
1633
1634 return nla_put_iflink(skb, dev, put_iflink);
1635}
1636
1637static int rtnl_fill_link_af(struct sk_buff *skb,
1638 const struct net_device *dev,
1639 u32 ext_filter_mask)
1640{
1641 const struct rtnl_af_ops *af_ops;
1642 struct nlattr *af_spec;
1643
1644 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1645 if (!af_spec)
1646 return -EMSGSIZE;
1647
1648 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1649 struct nlattr *af;
1650 int err;
1651
1652 if (!af_ops->fill_link_af)
1653 continue;
1654
1655 af = nla_nest_start_noflag(skb, af_ops->family);
1656 if (!af)
1657 return -EMSGSIZE;
1658
1659 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1660 /*
1661 * Caller may return ENODATA to indicate that there
1662 * was no data to be dumped. This is not an error, it
1663 * means we should trim the attribute header and
1664 * continue.
1665 */
1666 if (err == -ENODATA)
1667 nla_nest_cancel(skb, af);
1668 else if (err < 0)
1669 return -EMSGSIZE;
1670
1671 nla_nest_end(skb, af);
1672 }
1673
1674 nla_nest_end(skb, af_spec);
1675 return 0;
1676}
1677
1678static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1679 const struct net_device *dev)
1680{
1681 struct netdev_name_node *name_node;
1682 int count = 0;
1683
1684 list_for_each_entry(name_node, &dev->name_node->list, list) {
1685 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1686 return -EMSGSIZE;
1687 count++;
1688 }
1689 return count;
1690}
1691
1692static int rtnl_fill_prop_list(struct sk_buff *skb,
1693 const struct net_device *dev)
1694{
1695 struct nlattr *prop_list;
1696 int ret;
1697
1698 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1699 if (!prop_list)
1700 return -EMSGSIZE;
1701
1702 ret = rtnl_fill_alt_ifnames(skb, dev);
1703 if (ret <= 0)
1704 goto nest_cancel;
1705
1706 nla_nest_end(skb, prop_list);
1707 return 0;
1708
1709nest_cancel:
1710 nla_nest_cancel(skb, prop_list);
1711 return ret;
1712}
1713
1714static int rtnl_fill_proto_down(struct sk_buff *skb,
1715 const struct net_device *dev)
1716{
1717 struct nlattr *pr;
1718 u32 preason;
1719
1720 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1721 goto nla_put_failure;
1722
1723 preason = dev->proto_down_reason;
1724 if (!preason)
1725 return 0;
1726
1727 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1728 if (!pr)
1729 return -EMSGSIZE;
1730
1731 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1732 nla_nest_cancel(skb, pr);
1733 goto nla_put_failure;
1734 }
1735
1736 nla_nest_end(skb, pr);
1737 return 0;
1738
1739nla_put_failure:
1740 return -EMSGSIZE;
1741}
1742
1743static int rtnl_fill_devlink_port(struct sk_buff *skb,
1744 const struct net_device *dev)
1745{
1746 struct nlattr *devlink_port_nest;
1747 int ret;
1748
1749 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1750 if (!devlink_port_nest)
1751 return -EMSGSIZE;
1752
1753 if (dev->devlink_port) {
1754 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1755 if (ret < 0)
1756 goto nest_cancel;
1757 }
1758
1759 nla_nest_end(skb, devlink_port_nest);
1760 return 0;
1761
1762nest_cancel:
1763 nla_nest_cancel(skb, devlink_port_nest);
1764 return ret;
1765}
1766
1767static int rtnl_fill_ifinfo(struct sk_buff *skb,
1768 struct net_device *dev, struct net *src_net,
1769 int type, u32 pid, u32 seq, u32 change,
1770 unsigned int flags, u32 ext_filter_mask,
1771 u32 event, int *new_nsid, int new_ifindex,
1772 int tgt_netnsid, gfp_t gfp)
1773{
1774 struct ifinfomsg *ifm;
1775 struct nlmsghdr *nlh;
1776 struct Qdisc *qdisc;
1777
1778 ASSERT_RTNL();
1779 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1780 if (nlh == NULL)
1781 return -EMSGSIZE;
1782
1783 ifm = nlmsg_data(nlh);
1784 ifm->ifi_family = AF_UNSPEC;
1785 ifm->__ifi_pad = 0;
1786 ifm->ifi_type = dev->type;
1787 ifm->ifi_index = dev->ifindex;
1788 ifm->ifi_flags = dev_get_flags(dev);
1789 ifm->ifi_change = change;
1790
1791 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1792 goto nla_put_failure;
1793
1794 qdisc = rtnl_dereference(dev->qdisc);
1795 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1796 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1797 nla_put_u8(skb, IFLA_OPERSTATE,
1798 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1799 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1800 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1801 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1802 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1803 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1804 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1805 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1806 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1807 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1808 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1809 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1810 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1811 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1812#ifdef CONFIG_RPS
1813 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1814#endif
1815 put_master_ifindex(skb, dev) ||
1816 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1817 (qdisc &&
1818 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1819 nla_put_ifalias(skb, dev) ||
1820 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1821 atomic_read(&dev->carrier_up_count) +
1822 atomic_read(&dev->carrier_down_count)) ||
1823 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1824 atomic_read(&dev->carrier_up_count)) ||
1825 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1826 atomic_read(&dev->carrier_down_count)))
1827 goto nla_put_failure;
1828
1829 if (rtnl_fill_proto_down(skb, dev))
1830 goto nla_put_failure;
1831
1832 if (event != IFLA_EVENT_NONE) {
1833 if (nla_put_u32(skb, IFLA_EVENT, event))
1834 goto nla_put_failure;
1835 }
1836
1837 if (rtnl_fill_link_ifmap(skb, dev))
1838 goto nla_put_failure;
1839
1840 if (dev->addr_len) {
1841 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1842 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1843 goto nla_put_failure;
1844 }
1845
1846 if (rtnl_phys_port_id_fill(skb, dev))
1847 goto nla_put_failure;
1848
1849 if (rtnl_phys_port_name_fill(skb, dev))
1850 goto nla_put_failure;
1851
1852 if (rtnl_phys_switch_id_fill(skb, dev))
1853 goto nla_put_failure;
1854
1855 if (rtnl_fill_stats(skb, dev))
1856 goto nla_put_failure;
1857
1858 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1859 goto nla_put_failure;
1860
1861 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1862 goto nla_put_failure;
1863
1864 if (rtnl_xdp_fill(skb, dev))
1865 goto nla_put_failure;
1866
1867 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1868 if (rtnl_link_fill(skb, dev) < 0)
1869 goto nla_put_failure;
1870 }
1871
1872 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1873 goto nla_put_failure;
1874
1875 if (new_nsid &&
1876 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1877 goto nla_put_failure;
1878 if (new_ifindex &&
1879 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1880 goto nla_put_failure;
1881
1882 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1883 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1884 goto nla_put_failure;
1885
1886 rcu_read_lock();
1887 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1888 goto nla_put_failure_rcu;
1889 rcu_read_unlock();
1890
1891 if (rtnl_fill_prop_list(skb, dev))
1892 goto nla_put_failure;
1893
1894 if (dev->dev.parent &&
1895 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1896 dev_name(dev->dev.parent)))
1897 goto nla_put_failure;
1898
1899 if (dev->dev.parent && dev->dev.parent->bus &&
1900 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1901 dev->dev.parent->bus->name))
1902 goto nla_put_failure;
1903
1904 if (rtnl_fill_devlink_port(skb, dev))
1905 goto nla_put_failure;
1906
1907 nlmsg_end(skb, nlh);
1908 return 0;
1909
1910nla_put_failure_rcu:
1911 rcu_read_unlock();
1912nla_put_failure:
1913 nlmsg_cancel(skb, nlh);
1914 return -EMSGSIZE;
1915}
1916
1917static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1918 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1919 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1920 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1921 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1922 [IFLA_MTU] = { .type = NLA_U32 },
1923 [IFLA_LINK] = { .type = NLA_U32 },
1924 [IFLA_MASTER] = { .type = NLA_U32 },
1925 [IFLA_CARRIER] = { .type = NLA_U8 },
1926 [IFLA_TXQLEN] = { .type = NLA_U32 },
1927 [IFLA_WEIGHT] = { .type = NLA_U32 },
1928 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1929 [IFLA_LINKMODE] = { .type = NLA_U8 },
1930 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1931 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1932 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1933 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1934 * allow 0-length string (needed to remove an alias).
1935 */
1936 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1937 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1938 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1939 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1940 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1941 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1942 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1943 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1944 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1945 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1946 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1947 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1948 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1949 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1950 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1951 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1952 [IFLA_XDP] = { .type = NLA_NESTED },
1953 [IFLA_EVENT] = { .type = NLA_U32 },
1954 [IFLA_GROUP] = { .type = NLA_U32 },
1955 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1956 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1957 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1958 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1959 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1960 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
1961 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
1962 .len = ALTIFNAMSIZ - 1 },
1963 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
1964 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1965 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
1966 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
1967 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
1968 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
1969 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
1970 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
1971};
1972
1973static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1974 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1975 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1976 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1977 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1978};
1979
1980static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1981 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1982 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
1983 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1984 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1985 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1986 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1987 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1988 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1989 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1990 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1991 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1992 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1993 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1994};
1995
1996static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1997 [IFLA_PORT_VF] = { .type = NLA_U32 },
1998 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1999 .len = PORT_PROFILE_MAX },
2000 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2001 .len = PORT_UUID_MAX },
2002 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2003 .len = PORT_UUID_MAX },
2004 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2005 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
2006
2007 /* Unused, but we need to keep it here since user space could
2008 * fill it. It's also broken with regard to NLA_BINARY use in
2009 * combination with structs.
2010 */
2011 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2012 .len = sizeof(struct ifla_port_vsi) },
2013};
2014
2015static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2016 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2017 [IFLA_XDP_FD] = { .type = NLA_S32 },
2018 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
2019 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
2020 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
2021 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
2022};
2023
2024static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2025{
2026 const struct rtnl_link_ops *ops = NULL;
2027 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2028
2029 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2030 return NULL;
2031
2032 if (linfo[IFLA_INFO_KIND]) {
2033 char kind[MODULE_NAME_LEN];
2034
2035 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2036 ops = rtnl_link_ops_get(kind);
2037 }
2038
2039 return ops;
2040}
2041
2042static bool link_master_filtered(struct net_device *dev, int master_idx)
2043{
2044 struct net_device *master;
2045
2046 if (!master_idx)
2047 return false;
2048
2049 master = netdev_master_upper_dev_get(dev);
2050
2051 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2052 * another invalid value for ifindex to denote "no master".
2053 */
2054 if (master_idx == -1)
2055 return !!master;
2056
2057 if (!master || master->ifindex != master_idx)
2058 return true;
2059
2060 return false;
2061}
2062
2063static bool link_kind_filtered(const struct net_device *dev,
2064 const struct rtnl_link_ops *kind_ops)
2065{
2066 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2067 return true;
2068
2069 return false;
2070}
2071
2072static bool link_dump_filtered(struct net_device *dev,
2073 int master_idx,
2074 const struct rtnl_link_ops *kind_ops)
2075{
2076 if (link_master_filtered(dev, master_idx) ||
2077 link_kind_filtered(dev, kind_ops))
2078 return true;
2079
2080 return false;
2081}
2082
2083/**
2084 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2085 * @sk: netlink socket
2086 * @netnsid: network namespace identifier
2087 *
2088 * Returns the network namespace identified by netnsid on success or an error
2089 * pointer on failure.
2090 */
2091struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2092{
2093 struct net *net;
2094
2095 net = get_net_ns_by_id(sock_net(sk), netnsid);
2096 if (!net)
2097 return ERR_PTR(-EINVAL);
2098
2099 /* For now, the caller is required to have CAP_NET_ADMIN in
2100 * the user namespace owning the target net ns.
2101 */
2102 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2103 put_net(net);
2104 return ERR_PTR(-EACCES);
2105 }
2106 return net;
2107}
2108EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2109
2110static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2111 bool strict_check, struct nlattr **tb,
2112 struct netlink_ext_ack *extack)
2113{
2114 int hdrlen;
2115
2116 if (strict_check) {
2117 struct ifinfomsg *ifm;
2118
2119 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2120 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2121 return -EINVAL;
2122 }
2123
2124 ifm = nlmsg_data(nlh);
2125 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2126 ifm->ifi_change) {
2127 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2128 return -EINVAL;
2129 }
2130 if (ifm->ifi_index) {
2131 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2132 return -EINVAL;
2133 }
2134
2135 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2136 IFLA_MAX, ifla_policy,
2137 extack);
2138 }
2139
2140 /* A hack to preserve kernel<->userspace interface.
2141 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2142 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2143 * what iproute2 < v3.9.0 used.
2144 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2145 * attribute, its netlink message is shorter than struct ifinfomsg.
2146 */
2147 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2148 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2149
2150 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2151 extack);
2152}
2153
2154static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2155{
2156 struct netlink_ext_ack *extack = cb->extack;
2157 const struct nlmsghdr *nlh = cb->nlh;
2158 struct net *net = sock_net(skb->sk);
2159 struct net *tgt_net = net;
2160 int h, s_h;
2161 int idx = 0, s_idx;
2162 struct net_device *dev;
2163 struct hlist_head *head;
2164 struct nlattr *tb[IFLA_MAX+1];
2165 u32 ext_filter_mask = 0;
2166 const struct rtnl_link_ops *kind_ops = NULL;
2167 unsigned int flags = NLM_F_MULTI;
2168 int master_idx = 0;
2169 int netnsid = -1;
2170 int err, i;
2171
2172 s_h = cb->args[0];
2173 s_idx = cb->args[1];
2174
2175 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2176 if (err < 0) {
2177 if (cb->strict_check)
2178 return err;
2179
2180 goto walk_entries;
2181 }
2182
2183 for (i = 0; i <= IFLA_MAX; ++i) {
2184 if (!tb[i])
2185 continue;
2186
2187 /* new attributes should only be added with strict checking */
2188 switch (i) {
2189 case IFLA_TARGET_NETNSID:
2190 netnsid = nla_get_s32(tb[i]);
2191 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2192 if (IS_ERR(tgt_net)) {
2193 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2194 return PTR_ERR(tgt_net);
2195 }
2196 break;
2197 case IFLA_EXT_MASK:
2198 ext_filter_mask = nla_get_u32(tb[i]);
2199 break;
2200 case IFLA_MASTER:
2201 master_idx = nla_get_u32(tb[i]);
2202 break;
2203 case IFLA_LINKINFO:
2204 kind_ops = linkinfo_to_kind_ops(tb[i]);
2205 break;
2206 default:
2207 if (cb->strict_check) {
2208 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2209 return -EINVAL;
2210 }
2211 }
2212 }
2213
2214 if (master_idx || kind_ops)
2215 flags |= NLM_F_DUMP_FILTERED;
2216
2217walk_entries:
2218 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2219 idx = 0;
2220 head = &tgt_net->dev_index_head[h];
2221 hlist_for_each_entry(dev, head, index_hlist) {
2222 if (link_dump_filtered(dev, master_idx, kind_ops))
2223 goto cont;
2224 if (idx < s_idx)
2225 goto cont;
2226 err = rtnl_fill_ifinfo(skb, dev, net,
2227 RTM_NEWLINK,
2228 NETLINK_CB(cb->skb).portid,
2229 nlh->nlmsg_seq, 0, flags,
2230 ext_filter_mask, 0, NULL, 0,
2231 netnsid, GFP_KERNEL);
2232
2233 if (err < 0) {
2234 if (likely(skb->len))
2235 goto out;
2236
2237 goto out_err;
2238 }
2239cont:
2240 idx++;
2241 }
2242 }
2243out:
2244 err = skb->len;
2245out_err:
2246 cb->args[1] = idx;
2247 cb->args[0] = h;
2248 cb->seq = tgt_net->dev_base_seq;
2249 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2250 if (netnsid >= 0)
2251 put_net(tgt_net);
2252
2253 return err;
2254}
2255
2256int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2257 struct netlink_ext_ack *exterr)
2258{
2259 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2260 exterr);
2261}
2262EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2263
2264struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2265{
2266 struct net *net;
2267 /* Examine the link attributes and figure out which
2268 * network namespace we are talking about.
2269 */
2270 if (tb[IFLA_NET_NS_PID])
2271 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2272 else if (tb[IFLA_NET_NS_FD])
2273 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2274 else
2275 net = get_net(src_net);
2276 return net;
2277}
2278EXPORT_SYMBOL(rtnl_link_get_net);
2279
2280/* Figure out which network namespace we are talking about by
2281 * examining the link attributes in the following order:
2282 *
2283 * 1. IFLA_NET_NS_PID
2284 * 2. IFLA_NET_NS_FD
2285 * 3. IFLA_TARGET_NETNSID
2286 */
2287static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2288 struct nlattr *tb[])
2289{
2290 struct net *net;
2291
2292 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2293 return rtnl_link_get_net(src_net, tb);
2294
2295 if (!tb[IFLA_TARGET_NETNSID])
2296 return get_net(src_net);
2297
2298 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2299 if (!net)
2300 return ERR_PTR(-EINVAL);
2301
2302 return net;
2303}
2304
2305static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2306 struct net *src_net,
2307 struct nlattr *tb[], int cap)
2308{
2309 struct net *net;
2310
2311 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2312 if (IS_ERR(net))
2313 return net;
2314
2315 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2316 put_net(net);
2317 return ERR_PTR(-EPERM);
2318 }
2319
2320 return net;
2321}
2322
2323/* Verify that rtnetlink requests do not pass additional properties
2324 * potentially referring to different network namespaces.
2325 */
2326static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2327 struct netlink_ext_ack *extack,
2328 bool netns_id_only)
2329{
2330
2331 if (netns_id_only) {
2332 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2333 return 0;
2334
2335 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2336 return -EOPNOTSUPP;
2337 }
2338
2339 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2340 goto invalid_attr;
2341
2342 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2343 goto invalid_attr;
2344
2345 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2346 goto invalid_attr;
2347
2348 return 0;
2349
2350invalid_attr:
2351 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2352 return -EINVAL;
2353}
2354
2355static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2356 int max_tx_rate)
2357{
2358 const struct net_device_ops *ops = dev->netdev_ops;
2359
2360 if (!ops->ndo_set_vf_rate)
2361 return -EOPNOTSUPP;
2362 if (max_tx_rate && max_tx_rate < min_tx_rate)
2363 return -EINVAL;
2364
2365 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2366}
2367
2368static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2369 struct netlink_ext_ack *extack)
2370{
2371 if (dev) {
2372 if (tb[IFLA_ADDRESS] &&
2373 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2374 return -EINVAL;
2375
2376 if (tb[IFLA_BROADCAST] &&
2377 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2378 return -EINVAL;
2379 }
2380
2381 if (tb[IFLA_AF_SPEC]) {
2382 struct nlattr *af;
2383 int rem, err;
2384
2385 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2386 const struct rtnl_af_ops *af_ops;
2387
2388 af_ops = rtnl_af_lookup(nla_type(af));
2389 if (!af_ops)
2390 return -EAFNOSUPPORT;
2391
2392 if (!af_ops->set_link_af)
2393 return -EOPNOTSUPP;
2394
2395 if (af_ops->validate_link_af) {
2396 err = af_ops->validate_link_af(dev, af, extack);
2397 if (err < 0)
2398 return err;
2399 }
2400 }
2401 }
2402
2403 return 0;
2404}
2405
2406static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2407 int guid_type)
2408{
2409 const struct net_device_ops *ops = dev->netdev_ops;
2410
2411 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2412}
2413
2414static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2415{
2416 if (dev->type != ARPHRD_INFINIBAND)
2417 return -EOPNOTSUPP;
2418
2419 return handle_infiniband_guid(dev, ivt, guid_type);
2420}
2421
2422static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2423{
2424 const struct net_device_ops *ops = dev->netdev_ops;
2425 int err = -EINVAL;
2426
2427 if (tb[IFLA_VF_MAC]) {
2428 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2429
2430 if (ivm->vf >= INT_MAX)
2431 return -EINVAL;
2432 err = -EOPNOTSUPP;
2433 if (ops->ndo_set_vf_mac)
2434 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2435 ivm->mac);
2436 if (err < 0)
2437 return err;
2438 }
2439
2440 if (tb[IFLA_VF_VLAN]) {
2441 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2442
2443 if (ivv->vf >= INT_MAX)
2444 return -EINVAL;
2445 err = -EOPNOTSUPP;
2446 if (ops->ndo_set_vf_vlan)
2447 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2448 ivv->qos,
2449 htons(ETH_P_8021Q));
2450 if (err < 0)
2451 return err;
2452 }
2453
2454 if (tb[IFLA_VF_VLAN_LIST]) {
2455 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2456 struct nlattr *attr;
2457 int rem, len = 0;
2458
2459 err = -EOPNOTSUPP;
2460 if (!ops->ndo_set_vf_vlan)
2461 return err;
2462
2463 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2464 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2465 nla_len(attr) < NLA_HDRLEN) {
2466 return -EINVAL;
2467 }
2468 if (len >= MAX_VLAN_LIST_LEN)
2469 return -EOPNOTSUPP;
2470 ivvl[len] = nla_data(attr);
2471
2472 len++;
2473 }
2474 if (len == 0)
2475 return -EINVAL;
2476
2477 if (ivvl[0]->vf >= INT_MAX)
2478 return -EINVAL;
2479 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2480 ivvl[0]->qos, ivvl[0]->vlan_proto);
2481 if (err < 0)
2482 return err;
2483 }
2484
2485 if (tb[IFLA_VF_TX_RATE]) {
2486 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2487 struct ifla_vf_info ivf;
2488
2489 if (ivt->vf >= INT_MAX)
2490 return -EINVAL;
2491 err = -EOPNOTSUPP;
2492 if (ops->ndo_get_vf_config)
2493 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2494 if (err < 0)
2495 return err;
2496
2497 err = rtnl_set_vf_rate(dev, ivt->vf,
2498 ivf.min_tx_rate, ivt->rate);
2499 if (err < 0)
2500 return err;
2501 }
2502
2503 if (tb[IFLA_VF_RATE]) {
2504 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2505
2506 if (ivt->vf >= INT_MAX)
2507 return -EINVAL;
2508
2509 err = rtnl_set_vf_rate(dev, ivt->vf,
2510 ivt->min_tx_rate, ivt->max_tx_rate);
2511 if (err < 0)
2512 return err;
2513 }
2514
2515 if (tb[IFLA_VF_SPOOFCHK]) {
2516 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2517
2518 if (ivs->vf >= INT_MAX)
2519 return -EINVAL;
2520 err = -EOPNOTSUPP;
2521 if (ops->ndo_set_vf_spoofchk)
2522 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2523 ivs->setting);
2524 if (err < 0)
2525 return err;
2526 }
2527
2528 if (tb[IFLA_VF_LINK_STATE]) {
2529 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2530
2531 if (ivl->vf >= INT_MAX)
2532 return -EINVAL;
2533 err = -EOPNOTSUPP;
2534 if (ops->ndo_set_vf_link_state)
2535 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2536 ivl->link_state);
2537 if (err < 0)
2538 return err;
2539 }
2540
2541 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2542 struct ifla_vf_rss_query_en *ivrssq_en;
2543
2544 err = -EOPNOTSUPP;
2545 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2546 if (ivrssq_en->vf >= INT_MAX)
2547 return -EINVAL;
2548 if (ops->ndo_set_vf_rss_query_en)
2549 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2550 ivrssq_en->setting);
2551 if (err < 0)
2552 return err;
2553 }
2554
2555 if (tb[IFLA_VF_TRUST]) {
2556 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2557
2558 if (ivt->vf >= INT_MAX)
2559 return -EINVAL;
2560 err = -EOPNOTSUPP;
2561 if (ops->ndo_set_vf_trust)
2562 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2563 if (err < 0)
2564 return err;
2565 }
2566
2567 if (tb[IFLA_VF_IB_NODE_GUID]) {
2568 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2569
2570 if (ivt->vf >= INT_MAX)
2571 return -EINVAL;
2572 if (!ops->ndo_set_vf_guid)
2573 return -EOPNOTSUPP;
2574 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2575 }
2576
2577 if (tb[IFLA_VF_IB_PORT_GUID]) {
2578 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2579
2580 if (ivt->vf >= INT_MAX)
2581 return -EINVAL;
2582 if (!ops->ndo_set_vf_guid)
2583 return -EOPNOTSUPP;
2584
2585 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2586 }
2587
2588 return err;
2589}
2590
2591static int do_set_master(struct net_device *dev, int ifindex,
2592 struct netlink_ext_ack *extack)
2593{
2594 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2595 const struct net_device_ops *ops;
2596 int err;
2597
2598 if (upper_dev) {
2599 if (upper_dev->ifindex == ifindex)
2600 return 0;
2601 ops = upper_dev->netdev_ops;
2602 if (ops->ndo_del_slave) {
2603 err = ops->ndo_del_slave(upper_dev, dev);
2604 if (err)
2605 return err;
2606 } else {
2607 return -EOPNOTSUPP;
2608 }
2609 }
2610
2611 if (ifindex) {
2612 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2613 if (!upper_dev)
2614 return -EINVAL;
2615 ops = upper_dev->netdev_ops;
2616 if (ops->ndo_add_slave) {
2617 err = ops->ndo_add_slave(upper_dev, dev, extack);
2618 if (err)
2619 return err;
2620 } else {
2621 return -EOPNOTSUPP;
2622 }
2623 }
2624 return 0;
2625}
2626
2627static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2628 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2629 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2630};
2631
2632static int do_set_proto_down(struct net_device *dev,
2633 struct nlattr *nl_proto_down,
2634 struct nlattr *nl_proto_down_reason,
2635 struct netlink_ext_ack *extack)
2636{
2637 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2638 unsigned long mask = 0;
2639 u32 value;
2640 bool proto_down;
2641 int err;
2642
2643 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2644 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2645 return -EOPNOTSUPP;
2646 }
2647
2648 if (nl_proto_down_reason) {
2649 err = nla_parse_nested_deprecated(pdreason,
2650 IFLA_PROTO_DOWN_REASON_MAX,
2651 nl_proto_down_reason,
2652 ifla_proto_down_reason_policy,
2653 NULL);
2654 if (err < 0)
2655 return err;
2656
2657 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2658 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2659 return -EINVAL;
2660 }
2661
2662 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2663
2664 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2665 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2666
2667 dev_change_proto_down_reason(dev, mask, value);
2668 }
2669
2670 if (nl_proto_down) {
2671 proto_down = nla_get_u8(nl_proto_down);
2672
2673 /* Don't turn off protodown if there are active reasons */
2674 if (!proto_down && dev->proto_down_reason) {
2675 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2676 return -EBUSY;
2677 }
2678 err = dev_change_proto_down(dev,
2679 proto_down);
2680 if (err)
2681 return err;
2682 }
2683
2684 return 0;
2685}
2686
2687#define DO_SETLINK_MODIFIED 0x01
2688/* notify flag means notify + modified. */
2689#define DO_SETLINK_NOTIFY 0x03
2690static int do_setlink(const struct sk_buff *skb,
2691 struct net_device *dev, struct ifinfomsg *ifm,
2692 struct netlink_ext_ack *extack,
2693 struct nlattr **tb, int status)
2694{
2695 const struct net_device_ops *ops = dev->netdev_ops;
2696 char ifname[IFNAMSIZ];
2697 int err;
2698
2699 err = validate_linkmsg(dev, tb, extack);
2700 if (err < 0)
2701 return err;
2702
2703 if (tb[IFLA_IFNAME])
2704 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2705 else
2706 ifname[0] = '\0';
2707
2708 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2709 const char *pat = ifname[0] ? ifname : NULL;
2710 struct net *net;
2711 int new_ifindex;
2712
2713 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2714 tb, CAP_NET_ADMIN);
2715 if (IS_ERR(net)) {
2716 err = PTR_ERR(net);
2717 goto errout;
2718 }
2719
2720 if (tb[IFLA_NEW_IFINDEX])
2721 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2722 else
2723 new_ifindex = 0;
2724
2725 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2726 put_net(net);
2727 if (err)
2728 goto errout;
2729 status |= DO_SETLINK_MODIFIED;
2730 }
2731
2732 if (tb[IFLA_MAP]) {
2733 struct rtnl_link_ifmap *u_map;
2734 struct ifmap k_map;
2735
2736 if (!ops->ndo_set_config) {
2737 err = -EOPNOTSUPP;
2738 goto errout;
2739 }
2740
2741 if (!netif_device_present(dev)) {
2742 err = -ENODEV;
2743 goto errout;
2744 }
2745
2746 u_map = nla_data(tb[IFLA_MAP]);
2747 k_map.mem_start = (unsigned long) u_map->mem_start;
2748 k_map.mem_end = (unsigned long) u_map->mem_end;
2749 k_map.base_addr = (unsigned short) u_map->base_addr;
2750 k_map.irq = (unsigned char) u_map->irq;
2751 k_map.dma = (unsigned char) u_map->dma;
2752 k_map.port = (unsigned char) u_map->port;
2753
2754 err = ops->ndo_set_config(dev, &k_map);
2755 if (err < 0)
2756 goto errout;
2757
2758 status |= DO_SETLINK_NOTIFY;
2759 }
2760
2761 if (tb[IFLA_ADDRESS]) {
2762 struct sockaddr *sa;
2763 int len;
2764
2765 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2766 sizeof(*sa));
2767 sa = kmalloc(len, GFP_KERNEL);
2768 if (!sa) {
2769 err = -ENOMEM;
2770 goto errout;
2771 }
2772 sa->sa_family = dev->type;
2773 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2774 dev->addr_len);
2775 err = dev_set_mac_address_user(dev, sa, extack);
2776 kfree(sa);
2777 if (err)
2778 goto errout;
2779 status |= DO_SETLINK_MODIFIED;
2780 }
2781
2782 if (tb[IFLA_MTU]) {
2783 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2784 if (err < 0)
2785 goto errout;
2786 status |= DO_SETLINK_MODIFIED;
2787 }
2788
2789 if (tb[IFLA_GROUP]) {
2790 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2791 status |= DO_SETLINK_NOTIFY;
2792 }
2793
2794 /*
2795 * Interface selected by interface index but interface
2796 * name provided implies that a name change has been
2797 * requested.
2798 */
2799 if (ifm->ifi_index > 0 && ifname[0]) {
2800 err = dev_change_name(dev, ifname);
2801 if (err < 0)
2802 goto errout;
2803 status |= DO_SETLINK_MODIFIED;
2804 }
2805
2806 if (tb[IFLA_IFALIAS]) {
2807 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2808 nla_len(tb[IFLA_IFALIAS]));
2809 if (err < 0)
2810 goto errout;
2811 status |= DO_SETLINK_NOTIFY;
2812 }
2813
2814 if (tb[IFLA_BROADCAST]) {
2815 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2816 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2817 }
2818
2819 if (tb[IFLA_MASTER]) {
2820 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2821 if (err)
2822 goto errout;
2823 status |= DO_SETLINK_MODIFIED;
2824 }
2825
2826 if (ifm->ifi_flags || ifm->ifi_change) {
2827 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2828 extack);
2829 if (err < 0)
2830 goto errout;
2831 }
2832
2833 if (tb[IFLA_CARRIER]) {
2834 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2835 if (err)
2836 goto errout;
2837 status |= DO_SETLINK_MODIFIED;
2838 }
2839
2840 if (tb[IFLA_TXQLEN]) {
2841 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2842
2843 err = dev_change_tx_queue_len(dev, value);
2844 if (err)
2845 goto errout;
2846 status |= DO_SETLINK_MODIFIED;
2847 }
2848
2849 if (tb[IFLA_GSO_MAX_SIZE]) {
2850 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2851
2852 if (max_size > dev->tso_max_size) {
2853 err = -EINVAL;
2854 goto errout;
2855 }
2856
2857 if (dev->gso_max_size ^ max_size) {
2858 netif_set_gso_max_size(dev, max_size);
2859 status |= DO_SETLINK_MODIFIED;
2860 }
2861 }
2862
2863 if (tb[IFLA_GSO_MAX_SEGS]) {
2864 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2865
2866 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2867 err = -EINVAL;
2868 goto errout;
2869 }
2870
2871 if (dev->gso_max_segs ^ max_segs) {
2872 netif_set_gso_max_segs(dev, max_segs);
2873 status |= DO_SETLINK_MODIFIED;
2874 }
2875 }
2876
2877 if (tb[IFLA_GRO_MAX_SIZE]) {
2878 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2879
2880 if (dev->gro_max_size ^ gro_max_size) {
2881 netif_set_gro_max_size(dev, gro_max_size);
2882 status |= DO_SETLINK_MODIFIED;
2883 }
2884 }
2885
2886 if (tb[IFLA_OPERSTATE])
2887 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2888
2889 if (tb[IFLA_LINKMODE]) {
2890 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2891
2892 write_lock(&dev_base_lock);
2893 if (dev->link_mode ^ value)
2894 status |= DO_SETLINK_NOTIFY;
2895 dev->link_mode = value;
2896 write_unlock(&dev_base_lock);
2897 }
2898
2899 if (tb[IFLA_VFINFO_LIST]) {
2900 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2901 struct nlattr *attr;
2902 int rem;
2903
2904 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2905 if (nla_type(attr) != IFLA_VF_INFO ||
2906 nla_len(attr) < NLA_HDRLEN) {
2907 err = -EINVAL;
2908 goto errout;
2909 }
2910 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2911 attr,
2912 ifla_vf_policy,
2913 NULL);
2914 if (err < 0)
2915 goto errout;
2916 err = do_setvfinfo(dev, vfinfo);
2917 if (err < 0)
2918 goto errout;
2919 status |= DO_SETLINK_NOTIFY;
2920 }
2921 }
2922 err = 0;
2923
2924 if (tb[IFLA_VF_PORTS]) {
2925 struct nlattr *port[IFLA_PORT_MAX+1];
2926 struct nlattr *attr;
2927 int vf;
2928 int rem;
2929
2930 err = -EOPNOTSUPP;
2931 if (!ops->ndo_set_vf_port)
2932 goto errout;
2933
2934 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2935 if (nla_type(attr) != IFLA_VF_PORT ||
2936 nla_len(attr) < NLA_HDRLEN) {
2937 err = -EINVAL;
2938 goto errout;
2939 }
2940 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2941 attr,
2942 ifla_port_policy,
2943 NULL);
2944 if (err < 0)
2945 goto errout;
2946 if (!port[IFLA_PORT_VF]) {
2947 err = -EOPNOTSUPP;
2948 goto errout;
2949 }
2950 vf = nla_get_u32(port[IFLA_PORT_VF]);
2951 err = ops->ndo_set_vf_port(dev, vf, port);
2952 if (err < 0)
2953 goto errout;
2954 status |= DO_SETLINK_NOTIFY;
2955 }
2956 }
2957 err = 0;
2958
2959 if (tb[IFLA_PORT_SELF]) {
2960 struct nlattr *port[IFLA_PORT_MAX+1];
2961
2962 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2963 tb[IFLA_PORT_SELF],
2964 ifla_port_policy, NULL);
2965 if (err < 0)
2966 goto errout;
2967
2968 err = -EOPNOTSUPP;
2969 if (ops->ndo_set_vf_port)
2970 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2971 if (err < 0)
2972 goto errout;
2973 status |= DO_SETLINK_NOTIFY;
2974 }
2975
2976 if (tb[IFLA_AF_SPEC]) {
2977 struct nlattr *af;
2978 int rem;
2979
2980 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2981 const struct rtnl_af_ops *af_ops;
2982
2983 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2984
2985 err = af_ops->set_link_af(dev, af, extack);
2986 if (err < 0)
2987 goto errout;
2988
2989 status |= DO_SETLINK_NOTIFY;
2990 }
2991 }
2992 err = 0;
2993
2994 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2995 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2996 tb[IFLA_PROTO_DOWN_REASON], extack);
2997 if (err)
2998 goto errout;
2999 status |= DO_SETLINK_NOTIFY;
3000 }
3001
3002 if (tb[IFLA_XDP]) {
3003 struct nlattr *xdp[IFLA_XDP_MAX + 1];
3004 u32 xdp_flags = 0;
3005
3006 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3007 tb[IFLA_XDP],
3008 ifla_xdp_policy, NULL);
3009 if (err < 0)
3010 goto errout;
3011
3012 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3013 err = -EINVAL;
3014 goto errout;
3015 }
3016
3017 if (xdp[IFLA_XDP_FLAGS]) {
3018 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3019 if (xdp_flags & ~XDP_FLAGS_MASK) {
3020 err = -EINVAL;
3021 goto errout;
3022 }
3023 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3024 err = -EINVAL;
3025 goto errout;
3026 }
3027 }
3028
3029 if (xdp[IFLA_XDP_FD]) {
3030 int expected_fd = -1;
3031
3032 if (xdp_flags & XDP_FLAGS_REPLACE) {
3033 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3034 err = -EINVAL;
3035 goto errout;
3036 }
3037 expected_fd =
3038 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3039 }
3040
3041 err = dev_change_xdp_fd(dev, extack,
3042 nla_get_s32(xdp[IFLA_XDP_FD]),
3043 expected_fd,
3044 xdp_flags);
3045 if (err)
3046 goto errout;
3047 status |= DO_SETLINK_NOTIFY;
3048 }
3049 }
3050
3051errout:
3052 if (status & DO_SETLINK_MODIFIED) {
3053 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3054 netdev_state_change(dev);
3055
3056 if (err < 0)
3057 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3058 dev->name);
3059 }
3060
3061 return err;
3062}
3063
3064static struct net_device *rtnl_dev_get(struct net *net,
3065 struct nlattr *tb[])
3066{
3067 char ifname[ALTIFNAMSIZ];
3068
3069 if (tb[IFLA_IFNAME])
3070 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3071 else if (tb[IFLA_ALT_IFNAME])
3072 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3073 else
3074 return NULL;
3075
3076 return __dev_get_by_name(net, ifname);
3077}
3078
3079static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3080 struct netlink_ext_ack *extack)
3081{
3082 struct net *net = sock_net(skb->sk);
3083 struct ifinfomsg *ifm;
3084 struct net_device *dev;
3085 int err;
3086 struct nlattr *tb[IFLA_MAX+1];
3087
3088 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3089 ifla_policy, extack);
3090 if (err < 0)
3091 goto errout;
3092
3093 err = rtnl_ensure_unique_netns(tb, extack, false);
3094 if (err < 0)
3095 goto errout;
3096
3097 err = -EINVAL;
3098 ifm = nlmsg_data(nlh);
3099 if (ifm->ifi_index > 0)
3100 dev = __dev_get_by_index(net, ifm->ifi_index);
3101 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3102 dev = rtnl_dev_get(net, tb);
3103 else
3104 goto errout;
3105
3106 if (dev == NULL) {
3107 err = -ENODEV;
3108 goto errout;
3109 }
3110
3111 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3112errout:
3113 return err;
3114}
3115
3116static int rtnl_group_dellink(const struct net *net, int group)
3117{
3118 struct net_device *dev, *aux;
3119 LIST_HEAD(list_kill);
3120 bool found = false;
3121
3122 if (!group)
3123 return -EPERM;
3124
3125 for_each_netdev(net, dev) {
3126 if (dev->group == group) {
3127 const struct rtnl_link_ops *ops;
3128
3129 found = true;
3130 ops = dev->rtnl_link_ops;
3131 if (!ops || !ops->dellink)
3132 return -EOPNOTSUPP;
3133 }
3134 }
3135
3136 if (!found)
3137 return -ENODEV;
3138
3139 for_each_netdev_safe(net, dev, aux) {
3140 if (dev->group == group) {
3141 const struct rtnl_link_ops *ops;
3142
3143 ops = dev->rtnl_link_ops;
3144 ops->dellink(dev, &list_kill);
3145 }
3146 }
3147 unregister_netdevice_many(&list_kill);
3148
3149 return 0;
3150}
3151
3152int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3153{
3154 const struct rtnl_link_ops *ops;
3155 LIST_HEAD(list_kill);
3156
3157 ops = dev->rtnl_link_ops;
3158 if (!ops || !ops->dellink)
3159 return -EOPNOTSUPP;
3160
3161 ops->dellink(dev, &list_kill);
3162 unregister_netdevice_many_notify(&list_kill, portid, nlh);
3163
3164 return 0;
3165}
3166EXPORT_SYMBOL_GPL(rtnl_delete_link);
3167
3168static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3169 struct netlink_ext_ack *extack)
3170{
3171 struct net *net = sock_net(skb->sk);
3172 u32 portid = NETLINK_CB(skb).portid;
3173 struct net *tgt_net = net;
3174 struct net_device *dev = NULL;
3175 struct ifinfomsg *ifm;
3176 struct nlattr *tb[IFLA_MAX+1];
3177 int err;
3178 int netnsid = -1;
3179
3180 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3181 ifla_policy, extack);
3182 if (err < 0)
3183 return err;
3184
3185 err = rtnl_ensure_unique_netns(tb, extack, true);
3186 if (err < 0)
3187 return err;
3188
3189 if (tb[IFLA_TARGET_NETNSID]) {
3190 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3191 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3192 if (IS_ERR(tgt_net))
3193 return PTR_ERR(tgt_net);
3194 }
3195
3196 err = -EINVAL;
3197 ifm = nlmsg_data(nlh);
3198 if (ifm->ifi_index > 0)
3199 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3200 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3201 dev = rtnl_dev_get(net, tb);
3202 else if (tb[IFLA_GROUP])
3203 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3204 else
3205 goto out;
3206
3207 if (!dev) {
3208 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3209 err = -ENODEV;
3210
3211 goto out;
3212 }
3213
3214 err = rtnl_delete_link(dev, portid, nlh);
3215
3216out:
3217 if (netnsid >= 0)
3218 put_net(tgt_net);
3219
3220 return err;
3221}
3222
3223int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3224 u32 portid, const struct nlmsghdr *nlh)
3225{
3226 unsigned int old_flags;
3227 int err;
3228
3229 old_flags = dev->flags;
3230 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3231 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3232 NULL);
3233 if (err < 0)
3234 return err;
3235 }
3236
3237 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3238 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3239 } else {
3240 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3241 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3242 }
3243 return 0;
3244}
3245EXPORT_SYMBOL(rtnl_configure_link);
3246
3247struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3248 unsigned char name_assign_type,
3249 const struct rtnl_link_ops *ops,
3250 struct nlattr *tb[],
3251 struct netlink_ext_ack *extack)
3252{
3253 struct net_device *dev;
3254 unsigned int num_tx_queues = 1;
3255 unsigned int num_rx_queues = 1;
3256
3257 if (tb[IFLA_NUM_TX_QUEUES])
3258 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3259 else if (ops->get_num_tx_queues)
3260 num_tx_queues = ops->get_num_tx_queues();
3261
3262 if (tb[IFLA_NUM_RX_QUEUES])
3263 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3264 else if (ops->get_num_rx_queues)
3265 num_rx_queues = ops->get_num_rx_queues();
3266
3267 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3268 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3269 return ERR_PTR(-EINVAL);
3270 }
3271
3272 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3273 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3274 return ERR_PTR(-EINVAL);
3275 }
3276
3277 if (ops->alloc) {
3278 dev = ops->alloc(tb, ifname, name_assign_type,
3279 num_tx_queues, num_rx_queues);
3280 if (IS_ERR(dev))
3281 return dev;
3282 } else {
3283 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3284 name_assign_type, ops->setup,
3285 num_tx_queues, num_rx_queues);
3286 }
3287
3288 if (!dev)
3289 return ERR_PTR(-ENOMEM);
3290
3291 dev_net_set(dev, net);
3292 dev->rtnl_link_ops = ops;
3293 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3294
3295 if (tb[IFLA_MTU]) {
3296 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3297 int err;
3298
3299 err = dev_validate_mtu(dev, mtu, extack);
3300 if (err) {
3301 free_netdev(dev);
3302 return ERR_PTR(err);
3303 }
3304 dev->mtu = mtu;
3305 }
3306 if (tb[IFLA_ADDRESS]) {
3307 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3308 nla_len(tb[IFLA_ADDRESS]));
3309 dev->addr_assign_type = NET_ADDR_SET;
3310 }
3311 if (tb[IFLA_BROADCAST])
3312 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3313 nla_len(tb[IFLA_BROADCAST]));
3314 if (tb[IFLA_TXQLEN])
3315 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3316 if (tb[IFLA_OPERSTATE])
3317 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3318 if (tb[IFLA_LINKMODE])
3319 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3320 if (tb[IFLA_GROUP])
3321 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3322 if (tb[IFLA_GSO_MAX_SIZE])
3323 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3324 if (tb[IFLA_GSO_MAX_SEGS])
3325 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3326 if (tb[IFLA_GRO_MAX_SIZE])
3327 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3328
3329 return dev;
3330}
3331EXPORT_SYMBOL(rtnl_create_link);
3332
3333static int rtnl_group_changelink(const struct sk_buff *skb,
3334 struct net *net, int group,
3335 struct ifinfomsg *ifm,
3336 struct netlink_ext_ack *extack,
3337 struct nlattr **tb)
3338{
3339 struct net_device *dev, *aux;
3340 int err;
3341
3342 for_each_netdev_safe(net, dev, aux) {
3343 if (dev->group == group) {
3344 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3345 if (err < 0)
3346 return err;
3347 }
3348 }
3349
3350 return 0;
3351}
3352
3353static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3354 const struct rtnl_link_ops *ops,
3355 const struct nlmsghdr *nlh,
3356 struct nlattr **tb, struct nlattr **data,
3357 struct netlink_ext_ack *extack)
3358{
3359 unsigned char name_assign_type = NET_NAME_USER;
3360 struct net *net = sock_net(skb->sk);
3361 u32 portid = NETLINK_CB(skb).portid;
3362 struct net *dest_net, *link_net;
3363 struct net_device *dev;
3364 char ifname[IFNAMSIZ];
3365 int err;
3366
3367 if (!ops->alloc && !ops->setup)
3368 return -EOPNOTSUPP;
3369
3370 if (tb[IFLA_IFNAME]) {
3371 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3372 } else {
3373 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3374 name_assign_type = NET_NAME_ENUM;
3375 }
3376
3377 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3378 if (IS_ERR(dest_net))
3379 return PTR_ERR(dest_net);
3380
3381 if (tb[IFLA_LINK_NETNSID]) {
3382 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3383
3384 link_net = get_net_ns_by_id(dest_net, id);
3385 if (!link_net) {
3386 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3387 err = -EINVAL;
3388 goto out;
3389 }
3390 err = -EPERM;
3391 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3392 goto out;
3393 } else {
3394 link_net = NULL;
3395 }
3396
3397 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3398 name_assign_type, ops, tb, extack);
3399 if (IS_ERR(dev)) {
3400 err = PTR_ERR(dev);
3401 goto out;
3402 }
3403
3404 dev->ifindex = ifm->ifi_index;
3405
3406 if (ops->newlink)
3407 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3408 else
3409 err = register_netdevice(dev);
3410 if (err < 0) {
3411 free_netdev(dev);
3412 goto out;
3413 }
3414
3415 err = rtnl_configure_link(dev, ifm, portid, nlh);
3416 if (err < 0)
3417 goto out_unregister;
3418 if (link_net) {
3419 err = dev_change_net_namespace(dev, dest_net, ifname);
3420 if (err < 0)
3421 goto out_unregister;
3422 }
3423 if (tb[IFLA_MASTER]) {
3424 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3425 if (err)
3426 goto out_unregister;
3427 }
3428out:
3429 if (link_net)
3430 put_net(link_net);
3431 put_net(dest_net);
3432 return err;
3433out_unregister:
3434 if (ops->newlink) {
3435 LIST_HEAD(list_kill);
3436
3437 ops->dellink(dev, &list_kill);
3438 unregister_netdevice_many(&list_kill);
3439 } else {
3440 unregister_netdevice(dev);
3441 }
3442 goto out;
3443}
3444
3445struct rtnl_newlink_tbs {
3446 struct nlattr *tb[IFLA_MAX + 1];
3447 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3448 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3449};
3450
3451static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3452 struct rtnl_newlink_tbs *tbs,
3453 struct netlink_ext_ack *extack)
3454{
3455 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3456 struct nlattr ** const tb = tbs->tb;
3457 const struct rtnl_link_ops *m_ops;
3458 struct net_device *master_dev;
3459 struct net *net = sock_net(skb->sk);
3460 const struct rtnl_link_ops *ops;
3461 struct nlattr **slave_data;
3462 char kind[MODULE_NAME_LEN];
3463 struct net_device *dev;
3464 struct ifinfomsg *ifm;
3465 struct nlattr **data;
3466 bool link_specified;
3467 int err;
3468
3469#ifdef CONFIG_MODULES
3470replay:
3471#endif
3472 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3473 ifla_policy, extack);
3474 if (err < 0)
3475 return err;
3476
3477 err = rtnl_ensure_unique_netns(tb, extack, false);
3478 if (err < 0)
3479 return err;
3480
3481 ifm = nlmsg_data(nlh);
3482 if (ifm->ifi_index > 0) {
3483 link_specified = true;
3484 dev = __dev_get_by_index(net, ifm->ifi_index);
3485 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3486 link_specified = true;
3487 dev = rtnl_dev_get(net, tb);
3488 } else {
3489 link_specified = false;
3490 dev = NULL;
3491 }
3492
3493 master_dev = NULL;
3494 m_ops = NULL;
3495 if (dev) {
3496 master_dev = netdev_master_upper_dev_get(dev);
3497 if (master_dev)
3498 m_ops = master_dev->rtnl_link_ops;
3499 }
3500
3501 err = validate_linkmsg(dev, tb, extack);
3502 if (err < 0)
3503 return err;
3504
3505 if (tb[IFLA_LINKINFO]) {
3506 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3507 tb[IFLA_LINKINFO],
3508 ifla_info_policy, NULL);
3509 if (err < 0)
3510 return err;
3511 } else
3512 memset(linkinfo, 0, sizeof(linkinfo));
3513
3514 if (linkinfo[IFLA_INFO_KIND]) {
3515 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3516 ops = rtnl_link_ops_get(kind);
3517 } else {
3518 kind[0] = '\0';
3519 ops = NULL;
3520 }
3521
3522 data = NULL;
3523 if (ops) {
3524 if (ops->maxtype > RTNL_MAX_TYPE)
3525 return -EINVAL;
3526
3527 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3528 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3529 linkinfo[IFLA_INFO_DATA],
3530 ops->policy, extack);
3531 if (err < 0)
3532 return err;
3533 data = tbs->attr;
3534 }
3535 if (ops->validate) {
3536 err = ops->validate(tb, data, extack);
3537 if (err < 0)
3538 return err;
3539 }
3540 }
3541
3542 slave_data = NULL;
3543 if (m_ops) {
3544 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3545 return -EINVAL;
3546
3547 if (m_ops->slave_maxtype &&
3548 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3549 err = nla_parse_nested_deprecated(tbs->slave_attr,
3550 m_ops->slave_maxtype,
3551 linkinfo[IFLA_INFO_SLAVE_DATA],
3552 m_ops->slave_policy,
3553 extack);
3554 if (err < 0)
3555 return err;
3556 slave_data = tbs->slave_attr;
3557 }
3558 }
3559
3560 if (dev) {
3561 int status = 0;
3562
3563 if (nlh->nlmsg_flags & NLM_F_EXCL)
3564 return -EEXIST;
3565 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3566 return -EOPNOTSUPP;
3567
3568 if (linkinfo[IFLA_INFO_DATA]) {
3569 if (!ops || ops != dev->rtnl_link_ops ||
3570 !ops->changelink)
3571 return -EOPNOTSUPP;
3572
3573 err = ops->changelink(dev, tb, data, extack);
3574 if (err < 0)
3575 return err;
3576 status |= DO_SETLINK_NOTIFY;
3577 }
3578
3579 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3580 if (!m_ops || !m_ops->slave_changelink)
3581 return -EOPNOTSUPP;
3582
3583 err = m_ops->slave_changelink(master_dev, dev, tb,
3584 slave_data, extack);
3585 if (err < 0)
3586 return err;
3587 status |= DO_SETLINK_NOTIFY;
3588 }
3589
3590 return do_setlink(skb, dev, ifm, extack, tb, status);
3591 }
3592
3593 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3594 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3595 * or it's for a group
3596 */
3597 if (link_specified)
3598 return -ENODEV;
3599 if (tb[IFLA_GROUP])
3600 return rtnl_group_changelink(skb, net,
3601 nla_get_u32(tb[IFLA_GROUP]),
3602 ifm, extack, tb);
3603 return -ENODEV;
3604 }
3605
3606 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3607 return -EOPNOTSUPP;
3608
3609 if (!ops) {
3610#ifdef CONFIG_MODULES
3611 if (kind[0]) {
3612 __rtnl_unlock();
3613 request_module("rtnl-link-%s", kind);
3614 rtnl_lock();
3615 ops = rtnl_link_ops_get(kind);
3616 if (ops)
3617 goto replay;
3618 }
3619#endif
3620 NL_SET_ERR_MSG(extack, "Unknown device type");
3621 return -EOPNOTSUPP;
3622 }
3623
3624 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
3625}
3626
3627static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3628 struct netlink_ext_ack *extack)
3629{
3630 struct rtnl_newlink_tbs *tbs;
3631 int ret;
3632
3633 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3634 if (!tbs)
3635 return -ENOMEM;
3636
3637 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3638 kfree(tbs);
3639 return ret;
3640}
3641
3642static int rtnl_valid_getlink_req(struct sk_buff *skb,
3643 const struct nlmsghdr *nlh,
3644 struct nlattr **tb,
3645 struct netlink_ext_ack *extack)
3646{
3647 struct ifinfomsg *ifm;
3648 int i, err;
3649
3650 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3651 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3652 return -EINVAL;
3653 }
3654
3655 if (!netlink_strict_get_check(skb))
3656 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3657 ifla_policy, extack);
3658
3659 ifm = nlmsg_data(nlh);
3660 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3661 ifm->ifi_change) {
3662 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3663 return -EINVAL;
3664 }
3665
3666 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3667 ifla_policy, extack);
3668 if (err)
3669 return err;
3670
3671 for (i = 0; i <= IFLA_MAX; i++) {
3672 if (!tb[i])
3673 continue;
3674
3675 switch (i) {
3676 case IFLA_IFNAME:
3677 case IFLA_ALT_IFNAME:
3678 case IFLA_EXT_MASK:
3679 case IFLA_TARGET_NETNSID:
3680 break;
3681 default:
3682 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3683 return -EINVAL;
3684 }
3685 }
3686
3687 return 0;
3688}
3689
3690static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3691 struct netlink_ext_ack *extack)
3692{
3693 struct net *net = sock_net(skb->sk);
3694 struct net *tgt_net = net;
3695 struct ifinfomsg *ifm;
3696 struct nlattr *tb[IFLA_MAX+1];
3697 struct net_device *dev = NULL;
3698 struct sk_buff *nskb;
3699 int netnsid = -1;
3700 int err;
3701 u32 ext_filter_mask = 0;
3702
3703 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3704 if (err < 0)
3705 return err;
3706
3707 err = rtnl_ensure_unique_netns(tb, extack, true);
3708 if (err < 0)
3709 return err;
3710
3711 if (tb[IFLA_TARGET_NETNSID]) {
3712 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3713 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3714 if (IS_ERR(tgt_net))
3715 return PTR_ERR(tgt_net);
3716 }
3717
3718 if (tb[IFLA_EXT_MASK])
3719 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3720
3721 err = -EINVAL;
3722 ifm = nlmsg_data(nlh);
3723 if (ifm->ifi_index > 0)
3724 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3725 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3726 dev = rtnl_dev_get(tgt_net, tb);
3727 else
3728 goto out;
3729
3730 err = -ENODEV;
3731 if (dev == NULL)
3732 goto out;
3733
3734 err = -ENOBUFS;
3735 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3736 if (nskb == NULL)
3737 goto out;
3738
3739 err = rtnl_fill_ifinfo(nskb, dev, net,
3740 RTM_NEWLINK, NETLINK_CB(skb).portid,
3741 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3742 0, NULL, 0, netnsid, GFP_KERNEL);
3743 if (err < 0) {
3744 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3745 WARN_ON(err == -EMSGSIZE);
3746 kfree_skb(nskb);
3747 } else
3748 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3749out:
3750 if (netnsid >= 0)
3751 put_net(tgt_net);
3752
3753 return err;
3754}
3755
3756static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3757 bool *changed, struct netlink_ext_ack *extack)
3758{
3759 char *alt_ifname;
3760 size_t size;
3761 int err;
3762
3763 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3764 if (err)
3765 return err;
3766
3767 if (cmd == RTM_NEWLINKPROP) {
3768 size = rtnl_prop_list_size(dev);
3769 size += nla_total_size(ALTIFNAMSIZ);
3770 if (size >= U16_MAX) {
3771 NL_SET_ERR_MSG(extack,
3772 "effective property list too long");
3773 return -EINVAL;
3774 }
3775 }
3776
3777 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3778 if (!alt_ifname)
3779 return -ENOMEM;
3780
3781 if (cmd == RTM_NEWLINKPROP) {
3782 err = netdev_name_node_alt_create(dev, alt_ifname);
3783 if (!err)
3784 alt_ifname = NULL;
3785 } else if (cmd == RTM_DELLINKPROP) {
3786 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3787 } else {
3788 WARN_ON_ONCE(1);
3789 err = -EINVAL;
3790 }
3791
3792 kfree(alt_ifname);
3793 if (!err)
3794 *changed = true;
3795 return err;
3796}
3797
3798static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3799 struct netlink_ext_ack *extack)
3800{
3801 struct net *net = sock_net(skb->sk);
3802 struct nlattr *tb[IFLA_MAX + 1];
3803 struct net_device *dev;
3804 struct ifinfomsg *ifm;
3805 bool changed = false;
3806 struct nlattr *attr;
3807 int err, rem;
3808
3809 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3810 if (err)
3811 return err;
3812
3813 err = rtnl_ensure_unique_netns(tb, extack, true);
3814 if (err)
3815 return err;
3816
3817 ifm = nlmsg_data(nlh);
3818 if (ifm->ifi_index > 0)
3819 dev = __dev_get_by_index(net, ifm->ifi_index);
3820 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3821 dev = rtnl_dev_get(net, tb);
3822 else
3823 return -EINVAL;
3824
3825 if (!dev)
3826 return -ENODEV;
3827
3828 if (!tb[IFLA_PROP_LIST])
3829 return 0;
3830
3831 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3832 switch (nla_type(attr)) {
3833 case IFLA_ALT_IFNAME:
3834 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3835 if (err)
3836 return err;
3837 break;
3838 }
3839 }
3840
3841 if (changed)
3842 netdev_state_change(dev);
3843 return 0;
3844}
3845
3846static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3847 struct netlink_ext_ack *extack)
3848{
3849 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3850}
3851
3852static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3853 struct netlink_ext_ack *extack)
3854{
3855 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3856}
3857
3858static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3859{
3860 struct net *net = sock_net(skb->sk);
3861 size_t min_ifinfo_dump_size = 0;
3862 struct nlattr *tb[IFLA_MAX+1];
3863 u32 ext_filter_mask = 0;
3864 struct net_device *dev;
3865 int hdrlen;
3866
3867 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3868 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3869 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3870
3871 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3872 if (tb[IFLA_EXT_MASK])
3873 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3874 }
3875
3876 if (!ext_filter_mask)
3877 return NLMSG_GOODSIZE;
3878 /*
3879 * traverse the list of net devices and compute the minimum
3880 * buffer size based upon the filter mask.
3881 */
3882 rcu_read_lock();
3883 for_each_netdev_rcu(net, dev) {
3884 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3885 if_nlmsg_size(dev, ext_filter_mask));
3886 }
3887 rcu_read_unlock();
3888
3889 return nlmsg_total_size(min_ifinfo_dump_size);
3890}
3891
3892static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3893{
3894 int idx;
3895 int s_idx = cb->family;
3896 int type = cb->nlh->nlmsg_type - RTM_BASE;
3897 int ret = 0;
3898
3899 if (s_idx == 0)
3900 s_idx = 1;
3901
3902 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3903 struct rtnl_link __rcu **tab;
3904 struct rtnl_link *link;
3905 rtnl_dumpit_func dumpit;
3906
3907 if (idx < s_idx || idx == PF_PACKET)
3908 continue;
3909
3910 if (type < 0 || type >= RTM_NR_MSGTYPES)
3911 continue;
3912
3913 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3914 if (!tab)
3915 continue;
3916
3917 link = rcu_dereference_rtnl(tab[type]);
3918 if (!link)
3919 continue;
3920
3921 dumpit = link->dumpit;
3922 if (!dumpit)
3923 continue;
3924
3925 if (idx > s_idx) {
3926 memset(&cb->args[0], 0, sizeof(cb->args));
3927 cb->prev_seq = 0;
3928 cb->seq = 0;
3929 }
3930 ret = dumpit(skb, cb);
3931 if (ret)
3932 break;
3933 }
3934 cb->family = idx;
3935
3936 return skb->len ? : ret;
3937}
3938
3939struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3940 unsigned int change,
3941 u32 event, gfp_t flags, int *new_nsid,
3942 int new_ifindex, u32 portid, u32 seq)
3943{
3944 struct net *net = dev_net(dev);
3945 struct sk_buff *skb;
3946 int err = -ENOBUFS;
3947
3948 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3949 if (skb == NULL)
3950 goto errout;
3951
3952 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3953 type, portid, seq, change, 0, 0, event,
3954 new_nsid, new_ifindex, -1, flags);
3955 if (err < 0) {
3956 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3957 WARN_ON(err == -EMSGSIZE);
3958 kfree_skb(skb);
3959 goto errout;
3960 }
3961 return skb;
3962errout:
3963 if (err < 0)
3964 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3965 return NULL;
3966}
3967
3968void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
3969 u32 portid, const struct nlmsghdr *nlh)
3970{
3971 struct net *net = dev_net(dev);
3972
3973 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
3974}
3975
3976static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3977 unsigned int change, u32 event,
3978 gfp_t flags, int *new_nsid, int new_ifindex,
3979 u32 portid, const struct nlmsghdr *nlh)
3980{
3981 struct sk_buff *skb;
3982
3983 if (dev->reg_state != NETREG_REGISTERED)
3984 return;
3985
3986 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3987 new_ifindex, portid, nlmsg_seq(nlh));
3988 if (skb)
3989 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
3990}
3991
3992void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3993 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
3994{
3995 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3996 NULL, 0, portid, nlh);
3997}
3998
3999void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4000 gfp_t flags, int *new_nsid, int new_ifindex)
4001{
4002 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4003 new_nsid, new_ifindex, 0, NULL);
4004}
4005
4006static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4007 struct net_device *dev,
4008 u8 *addr, u16 vid, u32 pid, u32 seq,
4009 int type, unsigned int flags,
4010 int nlflags, u16 ndm_state)
4011{
4012 struct nlmsghdr *nlh;
4013 struct ndmsg *ndm;
4014
4015 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4016 if (!nlh)
4017 return -EMSGSIZE;
4018
4019 ndm = nlmsg_data(nlh);
4020 ndm->ndm_family = AF_BRIDGE;
4021 ndm->ndm_pad1 = 0;
4022 ndm->ndm_pad2 = 0;
4023 ndm->ndm_flags = flags;
4024 ndm->ndm_type = 0;
4025 ndm->ndm_ifindex = dev->ifindex;
4026 ndm->ndm_state = ndm_state;
4027
4028 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
4029 goto nla_put_failure;
4030 if (vid)
4031 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4032 goto nla_put_failure;
4033
4034 nlmsg_end(skb, nlh);
4035 return 0;
4036
4037nla_put_failure:
4038 nlmsg_cancel(skb, nlh);
4039 return -EMSGSIZE;
4040}
4041
4042static inline size_t rtnl_fdb_nlmsg_size(void)
4043{
4044 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4045 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
4046 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4047 0;
4048}
4049
4050static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4051 u16 ndm_state)
4052{
4053 struct net *net = dev_net(dev);
4054 struct sk_buff *skb;
4055 int err = -ENOBUFS;
4056
4057 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
4058 if (!skb)
4059 goto errout;
4060
4061 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4062 0, 0, type, NTF_SELF, 0, ndm_state);
4063 if (err < 0) {
4064 kfree_skb(skb);
4065 goto errout;
4066 }
4067
4068 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4069 return;
4070errout:
4071 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4072}
4073
4074/*
4075 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4076 */
4077int ndo_dflt_fdb_add(struct ndmsg *ndm,
4078 struct nlattr *tb[],
4079 struct net_device *dev,
4080 const unsigned char *addr, u16 vid,
4081 u16 flags)
4082{
4083 int err = -EINVAL;
4084
4085 /* If aging addresses are supported device will need to
4086 * implement its own handler for this.
4087 */
4088 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4089 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4090 return err;
4091 }
4092
4093 if (tb[NDA_FLAGS_EXT]) {
4094 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4095 return err;
4096 }
4097
4098 if (vid) {
4099 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4100 return err;
4101 }
4102
4103 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4104 err = dev_uc_add_excl(dev, addr);
4105 else if (is_multicast_ether_addr(addr))
4106 err = dev_mc_add_excl(dev, addr);
4107
4108 /* Only return duplicate errors if NLM_F_EXCL is set */
4109 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4110 err = 0;
4111
4112 return err;
4113}
4114EXPORT_SYMBOL(ndo_dflt_fdb_add);
4115
4116static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4117 struct netlink_ext_ack *extack)
4118{
4119 u16 vid = 0;
4120
4121 if (vlan_attr) {
4122 if (nla_len(vlan_attr) != sizeof(u16)) {
4123 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4124 return -EINVAL;
4125 }
4126
4127 vid = nla_get_u16(vlan_attr);
4128
4129 if (!vid || vid >= VLAN_VID_MASK) {
4130 NL_SET_ERR_MSG(extack, "invalid vlan id");
4131 return -EINVAL;
4132 }
4133 }
4134 *p_vid = vid;
4135 return 0;
4136}
4137
4138static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4139 struct netlink_ext_ack *extack)
4140{
4141 struct net *net = sock_net(skb->sk);
4142 struct ndmsg *ndm;
4143 struct nlattr *tb[NDA_MAX+1];
4144 struct net_device *dev;
4145 u8 *addr;
4146 u16 vid;
4147 int err;
4148
4149 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4150 extack);
4151 if (err < 0)
4152 return err;
4153
4154 ndm = nlmsg_data(nlh);
4155 if (ndm->ndm_ifindex == 0) {
4156 NL_SET_ERR_MSG(extack, "invalid ifindex");
4157 return -EINVAL;
4158 }
4159
4160 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4161 if (dev == NULL) {
4162 NL_SET_ERR_MSG(extack, "unknown ifindex");
4163 return -ENODEV;
4164 }
4165
4166 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4167 NL_SET_ERR_MSG(extack, "invalid address");
4168 return -EINVAL;
4169 }
4170
4171 if (dev->type != ARPHRD_ETHER) {
4172 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4173 return -EINVAL;
4174 }
4175
4176 addr = nla_data(tb[NDA_LLADDR]);
4177
4178 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4179 if (err)
4180 return err;
4181
4182 err = -EOPNOTSUPP;
4183
4184 /* Support fdb on master device the net/bridge default case */
4185 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4186 netif_is_bridge_port(dev)) {
4187 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4188 const struct net_device_ops *ops = br_dev->netdev_ops;
4189
4190 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4191 nlh->nlmsg_flags, extack);
4192 if (err)
4193 goto out;
4194 else
4195 ndm->ndm_flags &= ~NTF_MASTER;
4196 }
4197
4198 /* Embedded bridge, macvlan, and any other device support */
4199 if ((ndm->ndm_flags & NTF_SELF)) {
4200 if (dev->netdev_ops->ndo_fdb_add)
4201 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4202 vid,
4203 nlh->nlmsg_flags,
4204 extack);
4205 else
4206 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4207 nlh->nlmsg_flags);
4208
4209 if (!err) {
4210 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4211 ndm->ndm_state);
4212 ndm->ndm_flags &= ~NTF_SELF;
4213 }
4214 }
4215out:
4216 return err;
4217}
4218
4219/*
4220 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4221 */
4222int ndo_dflt_fdb_del(struct ndmsg *ndm,
4223 struct nlattr *tb[],
4224 struct net_device *dev,
4225 const unsigned char *addr, u16 vid)
4226{
4227 int err = -EINVAL;
4228
4229 /* If aging addresses are supported device will need to
4230 * implement its own handler for this.
4231 */
4232 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4233 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4234 return err;
4235 }
4236
4237 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4238 err = dev_uc_del(dev, addr);
4239 else if (is_multicast_ether_addr(addr))
4240 err = dev_mc_del(dev, addr);
4241
4242 return err;
4243}
4244EXPORT_SYMBOL(ndo_dflt_fdb_del);
4245
4246static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4247 [NDA_VLAN] = { .type = NLA_U16 },
4248 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
4249 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
4250 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
4251};
4252
4253static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4254 struct netlink_ext_ack *extack)
4255{
4256 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4257 struct net *net = sock_net(skb->sk);
4258 const struct net_device_ops *ops;
4259 struct ndmsg *ndm;
4260 struct nlattr *tb[NDA_MAX+1];
4261 struct net_device *dev;
4262 __u8 *addr = NULL;
4263 int err;
4264 u16 vid;
4265
4266 if (!netlink_capable(skb, CAP_NET_ADMIN))
4267 return -EPERM;
4268
4269 if (!del_bulk) {
4270 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4271 NULL, extack);
4272 } else {
4273 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4274 fdb_del_bulk_policy, extack);
4275 }
4276 if (err < 0)
4277 return err;
4278
4279 ndm = nlmsg_data(nlh);
4280 if (ndm->ndm_ifindex == 0) {
4281 NL_SET_ERR_MSG(extack, "invalid ifindex");
4282 return -EINVAL;
4283 }
4284
4285 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4286 if (dev == NULL) {
4287 NL_SET_ERR_MSG(extack, "unknown ifindex");
4288 return -ENODEV;
4289 }
4290
4291 if (!del_bulk) {
4292 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4293 NL_SET_ERR_MSG(extack, "invalid address");
4294 return -EINVAL;
4295 }
4296 addr = nla_data(tb[NDA_LLADDR]);
4297 }
4298
4299 if (dev->type != ARPHRD_ETHER) {
4300 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4301 return -EINVAL;
4302 }
4303
4304 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4305 if (err)
4306 return err;
4307
4308 err = -EOPNOTSUPP;
4309
4310 /* Support fdb on master device the net/bridge default case */
4311 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4312 netif_is_bridge_port(dev)) {
4313 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4314
4315 ops = br_dev->netdev_ops;
4316 if (!del_bulk) {
4317 if (ops->ndo_fdb_del)
4318 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4319 } else {
4320 if (ops->ndo_fdb_del_bulk)
4321 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4322 extack);
4323 }
4324
4325 if (err)
4326 goto out;
4327 else
4328 ndm->ndm_flags &= ~NTF_MASTER;
4329 }
4330
4331 /* Embedded bridge, macvlan, and any other device support */
4332 if (ndm->ndm_flags & NTF_SELF) {
4333 ops = dev->netdev_ops;
4334 if (!del_bulk) {
4335 if (ops->ndo_fdb_del)
4336 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4337 else
4338 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4339 } else {
4340 /* in case err was cleared by NTF_MASTER call */
4341 err = -EOPNOTSUPP;
4342 if (ops->ndo_fdb_del_bulk)
4343 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4344 extack);
4345 }
4346
4347 if (!err) {
4348 if (!del_bulk)
4349 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4350 ndm->ndm_state);
4351 ndm->ndm_flags &= ~NTF_SELF;
4352 }
4353 }
4354out:
4355 return err;
4356}
4357
4358static int nlmsg_populate_fdb(struct sk_buff *skb,
4359 struct netlink_callback *cb,
4360 struct net_device *dev,
4361 int *idx,
4362 struct netdev_hw_addr_list *list)
4363{
4364 struct netdev_hw_addr *ha;
4365 int err;
4366 u32 portid, seq;
4367
4368 portid = NETLINK_CB(cb->skb).portid;
4369 seq = cb->nlh->nlmsg_seq;
4370
4371 list_for_each_entry(ha, &list->list, list) {
4372 if (*idx < cb->args[2])
4373 goto skip;
4374
4375 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4376 portid, seq,
4377 RTM_NEWNEIGH, NTF_SELF,
4378 NLM_F_MULTI, NUD_PERMANENT);
4379 if (err < 0)
4380 return err;
4381skip:
4382 *idx += 1;
4383 }
4384 return 0;
4385}
4386
4387/**
4388 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4389 * @skb: socket buffer to store message in
4390 * @cb: netlink callback
4391 * @dev: netdevice
4392 * @filter_dev: ignored
4393 * @idx: the number of FDB table entries dumped is added to *@idx
4394 *
4395 * Default netdevice operation to dump the existing unicast address list.
4396 * Returns number of addresses from list put in skb.
4397 */
4398int ndo_dflt_fdb_dump(struct sk_buff *skb,
4399 struct netlink_callback *cb,
4400 struct net_device *dev,
4401 struct net_device *filter_dev,
4402 int *idx)
4403{
4404 int err;
4405
4406 if (dev->type != ARPHRD_ETHER)
4407 return -EINVAL;
4408
4409 netif_addr_lock_bh(dev);
4410 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4411 if (err)
4412 goto out;
4413 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4414out:
4415 netif_addr_unlock_bh(dev);
4416 return err;
4417}
4418EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4419
4420static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4421 int *br_idx, int *brport_idx,
4422 struct netlink_ext_ack *extack)
4423{
4424 struct nlattr *tb[NDA_MAX + 1];
4425 struct ndmsg *ndm;
4426 int err, i;
4427
4428 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4429 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4430 return -EINVAL;
4431 }
4432
4433 ndm = nlmsg_data(nlh);
4434 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4435 ndm->ndm_flags || ndm->ndm_type) {
4436 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4437 return -EINVAL;
4438 }
4439
4440 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4441 NDA_MAX, NULL, extack);
4442 if (err < 0)
4443 return err;
4444
4445 *brport_idx = ndm->ndm_ifindex;
4446 for (i = 0; i <= NDA_MAX; ++i) {
4447 if (!tb[i])
4448 continue;
4449
4450 switch (i) {
4451 case NDA_IFINDEX:
4452 if (nla_len(tb[i]) != sizeof(u32)) {
4453 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4454 return -EINVAL;
4455 }
4456 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4457 break;
4458 case NDA_MASTER:
4459 if (nla_len(tb[i]) != sizeof(u32)) {
4460 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4461 return -EINVAL;
4462 }
4463 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4464 break;
4465 default:
4466 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4467 return -EINVAL;
4468 }
4469 }
4470
4471 return 0;
4472}
4473
4474static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4475 int *br_idx, int *brport_idx,
4476 struct netlink_ext_ack *extack)
4477{
4478 struct nlattr *tb[IFLA_MAX+1];
4479 int err;
4480
4481 /* A hack to preserve kernel<->userspace interface.
4482 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4483 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4484 * So, check for ndmsg with an optional u32 attribute (not used here).
4485 * Fortunately these sizes don't conflict with the size of ifinfomsg
4486 * with an optional attribute.
4487 */
4488 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4489 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4490 nla_attr_size(sizeof(u32)))) {
4491 struct ifinfomsg *ifm;
4492
4493 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4494 tb, IFLA_MAX, ifla_policy,
4495 extack);
4496 if (err < 0) {
4497 return -EINVAL;
4498 } else if (err == 0) {
4499 if (tb[IFLA_MASTER])
4500 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4501 }
4502
4503 ifm = nlmsg_data(nlh);
4504 *brport_idx = ifm->ifi_index;
4505 }
4506 return 0;
4507}
4508
4509static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4510{
4511 struct net_device *dev;
4512 struct net_device *br_dev = NULL;
4513 const struct net_device_ops *ops = NULL;
4514 const struct net_device_ops *cops = NULL;
4515 struct net *net = sock_net(skb->sk);
4516 struct hlist_head *head;
4517 int brport_idx = 0;
4518 int br_idx = 0;
4519 int h, s_h;
4520 int idx = 0, s_idx;
4521 int err = 0;
4522 int fidx = 0;
4523
4524 if (cb->strict_check)
4525 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4526 cb->extack);
4527 else
4528 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4529 cb->extack);
4530 if (err < 0)
4531 return err;
4532
4533 if (br_idx) {
4534 br_dev = __dev_get_by_index(net, br_idx);
4535 if (!br_dev)
4536 return -ENODEV;
4537
4538 ops = br_dev->netdev_ops;
4539 }
4540
4541 s_h = cb->args[0];
4542 s_idx = cb->args[1];
4543
4544 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4545 idx = 0;
4546 head = &net->dev_index_head[h];
4547 hlist_for_each_entry(dev, head, index_hlist) {
4548
4549 if (brport_idx && (dev->ifindex != brport_idx))
4550 continue;
4551
4552 if (!br_idx) { /* user did not specify a specific bridge */
4553 if (netif_is_bridge_port(dev)) {
4554 br_dev = netdev_master_upper_dev_get(dev);
4555 cops = br_dev->netdev_ops;
4556 }
4557 } else {
4558 if (dev != br_dev &&
4559 !netif_is_bridge_port(dev))
4560 continue;
4561
4562 if (br_dev != netdev_master_upper_dev_get(dev) &&
4563 !netif_is_bridge_master(dev))
4564 continue;
4565 cops = ops;
4566 }
4567
4568 if (idx < s_idx)
4569 goto cont;
4570
4571 if (netif_is_bridge_port(dev)) {
4572 if (cops && cops->ndo_fdb_dump) {
4573 err = cops->ndo_fdb_dump(skb, cb,
4574 br_dev, dev,
4575 &fidx);
4576 if (err == -EMSGSIZE)
4577 goto out;
4578 }
4579 }
4580
4581 if (dev->netdev_ops->ndo_fdb_dump)
4582 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4583 dev, NULL,
4584 &fidx);
4585 else
4586 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4587 &fidx);
4588 if (err == -EMSGSIZE)
4589 goto out;
4590
4591 cops = NULL;
4592
4593 /* reset fdb offset to 0 for rest of the interfaces */
4594 cb->args[2] = 0;
4595 fidx = 0;
4596cont:
4597 idx++;
4598 }
4599 }
4600
4601out:
4602 cb->args[0] = h;
4603 cb->args[1] = idx;
4604 cb->args[2] = fidx;
4605
4606 return skb->len;
4607}
4608
4609static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4610 struct nlattr **tb, u8 *ndm_flags,
4611 int *br_idx, int *brport_idx, u8 **addr,
4612 u16 *vid, struct netlink_ext_ack *extack)
4613{
4614 struct ndmsg *ndm;
4615 int err, i;
4616
4617 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4618 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4619 return -EINVAL;
4620 }
4621
4622 ndm = nlmsg_data(nlh);
4623 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4624 ndm->ndm_type) {
4625 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4626 return -EINVAL;
4627 }
4628
4629 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4630 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4631 return -EINVAL;
4632 }
4633
4634 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4635 NDA_MAX, nda_policy, extack);
4636 if (err < 0)
4637 return err;
4638
4639 *ndm_flags = ndm->ndm_flags;
4640 *brport_idx = ndm->ndm_ifindex;
4641 for (i = 0; i <= NDA_MAX; ++i) {
4642 if (!tb[i])
4643 continue;
4644
4645 switch (i) {
4646 case NDA_MASTER:
4647 *br_idx = nla_get_u32(tb[i]);
4648 break;
4649 case NDA_LLADDR:
4650 if (nla_len(tb[i]) != ETH_ALEN) {
4651 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4652 return -EINVAL;
4653 }
4654 *addr = nla_data(tb[i]);
4655 break;
4656 case NDA_VLAN:
4657 err = fdb_vid_parse(tb[i], vid, extack);
4658 if (err)
4659 return err;
4660 break;
4661 case NDA_VNI:
4662 break;
4663 default:
4664 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4665 return -EINVAL;
4666 }
4667 }
4668
4669 return 0;
4670}
4671
4672static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4673 struct netlink_ext_ack *extack)
4674{
4675 struct net_device *dev = NULL, *br_dev = NULL;
4676 const struct net_device_ops *ops = NULL;
4677 struct net *net = sock_net(in_skb->sk);
4678 struct nlattr *tb[NDA_MAX + 1];
4679 struct sk_buff *skb;
4680 int brport_idx = 0;
4681 u8 ndm_flags = 0;
4682 int br_idx = 0;
4683 u8 *addr = NULL;
4684 u16 vid = 0;
4685 int err;
4686
4687 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4688 &brport_idx, &addr, &vid, extack);
4689 if (err < 0)
4690 return err;
4691
4692 if (!addr) {
4693 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4694 return -EINVAL;
4695 }
4696
4697 if (brport_idx) {
4698 dev = __dev_get_by_index(net, brport_idx);
4699 if (!dev) {
4700 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4701 return -ENODEV;
4702 }
4703 }
4704
4705 if (br_idx) {
4706 if (dev) {
4707 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4708 return -EINVAL;
4709 }
4710
4711 br_dev = __dev_get_by_index(net, br_idx);
4712 if (!br_dev) {
4713 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4714 return -EINVAL;
4715 }
4716 ops = br_dev->netdev_ops;
4717 }
4718
4719 if (dev) {
4720 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4721 if (!netif_is_bridge_port(dev)) {
4722 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4723 return -EINVAL;
4724 }
4725 br_dev = netdev_master_upper_dev_get(dev);
4726 if (!br_dev) {
4727 NL_SET_ERR_MSG(extack, "Master of device not found");
4728 return -EINVAL;
4729 }
4730 ops = br_dev->netdev_ops;
4731 } else {
4732 if (!(ndm_flags & NTF_SELF)) {
4733 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4734 return -EINVAL;
4735 }
4736 ops = dev->netdev_ops;
4737 }
4738 }
4739
4740 if (!br_dev && !dev) {
4741 NL_SET_ERR_MSG(extack, "No device specified");
4742 return -ENODEV;
4743 }
4744
4745 if (!ops || !ops->ndo_fdb_get) {
4746 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4747 return -EOPNOTSUPP;
4748 }
4749
4750 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4751 if (!skb)
4752 return -ENOBUFS;
4753
4754 if (br_dev)
4755 dev = br_dev;
4756 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4757 NETLINK_CB(in_skb).portid,
4758 nlh->nlmsg_seq, extack);
4759 if (err)
4760 goto out;
4761
4762 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4763out:
4764 kfree_skb(skb);
4765 return err;
4766}
4767
4768static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4769 unsigned int attrnum, unsigned int flag)
4770{
4771 if (mask & flag)
4772 return nla_put_u8(skb, attrnum, !!(flags & flag));
4773 return 0;
4774}
4775
4776int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4777 struct net_device *dev, u16 mode,
4778 u32 flags, u32 mask, int nlflags,
4779 u32 filter_mask,
4780 int (*vlan_fill)(struct sk_buff *skb,
4781 struct net_device *dev,
4782 u32 filter_mask))
4783{
4784 struct nlmsghdr *nlh;
4785 struct ifinfomsg *ifm;
4786 struct nlattr *br_afspec;
4787 struct nlattr *protinfo;
4788 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4789 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4790 int err = 0;
4791
4792 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4793 if (nlh == NULL)
4794 return -EMSGSIZE;
4795
4796 ifm = nlmsg_data(nlh);
4797 ifm->ifi_family = AF_BRIDGE;
4798 ifm->__ifi_pad = 0;
4799 ifm->ifi_type = dev->type;
4800 ifm->ifi_index = dev->ifindex;
4801 ifm->ifi_flags = dev_get_flags(dev);
4802 ifm->ifi_change = 0;
4803
4804
4805 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4806 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4807 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4808 (br_dev &&
4809 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4810 (dev->addr_len &&
4811 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4812 (dev->ifindex != dev_get_iflink(dev) &&
4813 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4814 goto nla_put_failure;
4815
4816 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4817 if (!br_afspec)
4818 goto nla_put_failure;
4819
4820 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4821 nla_nest_cancel(skb, br_afspec);
4822 goto nla_put_failure;
4823 }
4824
4825 if (mode != BRIDGE_MODE_UNDEF) {
4826 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4827 nla_nest_cancel(skb, br_afspec);
4828 goto nla_put_failure;
4829 }
4830 }
4831 if (vlan_fill) {
4832 err = vlan_fill(skb, dev, filter_mask);
4833 if (err) {
4834 nla_nest_cancel(skb, br_afspec);
4835 goto nla_put_failure;
4836 }
4837 }
4838 nla_nest_end(skb, br_afspec);
4839
4840 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4841 if (!protinfo)
4842 goto nla_put_failure;
4843
4844 if (brport_nla_put_flag(skb, flags, mask,
4845 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4846 brport_nla_put_flag(skb, flags, mask,
4847 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4848 brport_nla_put_flag(skb, flags, mask,
4849 IFLA_BRPORT_FAST_LEAVE,
4850 BR_MULTICAST_FAST_LEAVE) ||
4851 brport_nla_put_flag(skb, flags, mask,
4852 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4853 brport_nla_put_flag(skb, flags, mask,
4854 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4855 brport_nla_put_flag(skb, flags, mask,
4856 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4857 brport_nla_put_flag(skb, flags, mask,
4858 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4859 brport_nla_put_flag(skb, flags, mask,
4860 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4861 brport_nla_put_flag(skb, flags, mask,
4862 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4863 brport_nla_put_flag(skb, flags, mask,
4864 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4865 nla_nest_cancel(skb, protinfo);
4866 goto nla_put_failure;
4867 }
4868
4869 nla_nest_end(skb, protinfo);
4870
4871 nlmsg_end(skb, nlh);
4872 return 0;
4873nla_put_failure:
4874 nlmsg_cancel(skb, nlh);
4875 return err ? err : -EMSGSIZE;
4876}
4877EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4878
4879static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4880 bool strict_check, u32 *filter_mask,
4881 struct netlink_ext_ack *extack)
4882{
4883 struct nlattr *tb[IFLA_MAX+1];
4884 int err, i;
4885
4886 if (strict_check) {
4887 struct ifinfomsg *ifm;
4888
4889 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4890 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4891 return -EINVAL;
4892 }
4893
4894 ifm = nlmsg_data(nlh);
4895 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4896 ifm->ifi_change || ifm->ifi_index) {
4897 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4898 return -EINVAL;
4899 }
4900
4901 err = nlmsg_parse_deprecated_strict(nlh,
4902 sizeof(struct ifinfomsg),
4903 tb, IFLA_MAX, ifla_policy,
4904 extack);
4905 } else {
4906 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4907 tb, IFLA_MAX, ifla_policy,
4908 extack);
4909 }
4910 if (err < 0)
4911 return err;
4912
4913 /* new attributes should only be added with strict checking */
4914 for (i = 0; i <= IFLA_MAX; ++i) {
4915 if (!tb[i])
4916 continue;
4917
4918 switch (i) {
4919 case IFLA_EXT_MASK:
4920 *filter_mask = nla_get_u32(tb[i]);
4921 break;
4922 default:
4923 if (strict_check) {
4924 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4925 return -EINVAL;
4926 }
4927 }
4928 }
4929
4930 return 0;
4931}
4932
4933static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4934{
4935 const struct nlmsghdr *nlh = cb->nlh;
4936 struct net *net = sock_net(skb->sk);
4937 struct net_device *dev;
4938 int idx = 0;
4939 u32 portid = NETLINK_CB(cb->skb).portid;
4940 u32 seq = nlh->nlmsg_seq;
4941 u32 filter_mask = 0;
4942 int err;
4943
4944 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4945 cb->extack);
4946 if (err < 0 && cb->strict_check)
4947 return err;
4948
4949 rcu_read_lock();
4950 for_each_netdev_rcu(net, dev) {
4951 const struct net_device_ops *ops = dev->netdev_ops;
4952 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4953
4954 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4955 if (idx >= cb->args[0]) {
4956 err = br_dev->netdev_ops->ndo_bridge_getlink(
4957 skb, portid, seq, dev,
4958 filter_mask, NLM_F_MULTI);
4959 if (err < 0 && err != -EOPNOTSUPP) {
4960 if (likely(skb->len))
4961 break;
4962
4963 goto out_err;
4964 }
4965 }
4966 idx++;
4967 }
4968
4969 if (ops->ndo_bridge_getlink) {
4970 if (idx >= cb->args[0]) {
4971 err = ops->ndo_bridge_getlink(skb, portid,
4972 seq, dev,
4973 filter_mask,
4974 NLM_F_MULTI);
4975 if (err < 0 && err != -EOPNOTSUPP) {
4976 if (likely(skb->len))
4977 break;
4978
4979 goto out_err;
4980 }
4981 }
4982 idx++;
4983 }
4984 }
4985 err = skb->len;
4986out_err:
4987 rcu_read_unlock();
4988 cb->args[0] = idx;
4989
4990 return err;
4991}
4992
4993static inline size_t bridge_nlmsg_size(void)
4994{
4995 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4996 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4997 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4998 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
4999 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5000 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5001 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5002 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5003 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5004 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5005 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5006}
5007
5008static int rtnl_bridge_notify(struct net_device *dev)
5009{
5010 struct net *net = dev_net(dev);
5011 struct sk_buff *skb;
5012 int err = -EOPNOTSUPP;
5013
5014 if (!dev->netdev_ops->ndo_bridge_getlink)
5015 return 0;
5016
5017 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5018 if (!skb) {
5019 err = -ENOMEM;
5020 goto errout;
5021 }
5022
5023 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5024 if (err < 0)
5025 goto errout;
5026
5027 /* Notification info is only filled for bridge ports, not the bridge
5028 * device itself. Therefore, a zero notification length is valid and
5029 * should not result in an error.
5030 */
5031 if (!skb->len)
5032 goto errout;
5033
5034 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5035 return 0;
5036errout:
5037 WARN_ON(err == -EMSGSIZE);
5038 kfree_skb(skb);
5039 if (err)
5040 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5041 return err;
5042}
5043
5044static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5045 struct netlink_ext_ack *extack)
5046{
5047 struct net *net = sock_net(skb->sk);
5048 struct ifinfomsg *ifm;
5049 struct net_device *dev;
5050 struct nlattr *br_spec, *attr = NULL;
5051 int rem, err = -EOPNOTSUPP;
5052 u16 flags = 0;
5053 bool have_flags = false;
5054
5055 if (nlmsg_len(nlh) < sizeof(*ifm))
5056 return -EINVAL;
5057
5058 ifm = nlmsg_data(nlh);
5059 if (ifm->ifi_family != AF_BRIDGE)
5060 return -EPFNOSUPPORT;
5061
5062 dev = __dev_get_by_index(net, ifm->ifi_index);
5063 if (!dev) {
5064 NL_SET_ERR_MSG(extack, "unknown ifindex");
5065 return -ENODEV;
5066 }
5067
5068 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5069 if (br_spec) {
5070 nla_for_each_nested(attr, br_spec, rem) {
5071 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5072 if (nla_len(attr) < sizeof(flags))
5073 return -EINVAL;
5074
5075 have_flags = true;
5076 flags = nla_get_u16(attr);
5077 break;
5078 }
5079 }
5080 }
5081
5082 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5083 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5084
5085 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5086 err = -EOPNOTSUPP;
5087 goto out;
5088 }
5089
5090 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5091 extack);
5092 if (err)
5093 goto out;
5094
5095 flags &= ~BRIDGE_FLAGS_MASTER;
5096 }
5097
5098 if ((flags & BRIDGE_FLAGS_SELF)) {
5099 if (!dev->netdev_ops->ndo_bridge_setlink)
5100 err = -EOPNOTSUPP;
5101 else
5102 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5103 flags,
5104 extack);
5105 if (!err) {
5106 flags &= ~BRIDGE_FLAGS_SELF;
5107
5108 /* Generate event to notify upper layer of bridge
5109 * change
5110 */
5111 err = rtnl_bridge_notify(dev);
5112 }
5113 }
5114
5115 if (have_flags)
5116 memcpy(nla_data(attr), &flags, sizeof(flags));
5117out:
5118 return err;
5119}
5120
5121static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5122 struct netlink_ext_ack *extack)
5123{
5124 struct net *net = sock_net(skb->sk);
5125 struct ifinfomsg *ifm;
5126 struct net_device *dev;
5127 struct nlattr *br_spec, *attr = NULL;
5128 int rem, err = -EOPNOTSUPP;
5129 u16 flags = 0;
5130 bool have_flags = false;
5131
5132 if (nlmsg_len(nlh) < sizeof(*ifm))
5133 return -EINVAL;
5134
5135 ifm = nlmsg_data(nlh);
5136 if (ifm->ifi_family != AF_BRIDGE)
5137 return -EPFNOSUPPORT;
5138
5139 dev = __dev_get_by_index(net, ifm->ifi_index);
5140 if (!dev) {
5141 NL_SET_ERR_MSG(extack, "unknown ifindex");
5142 return -ENODEV;
5143 }
5144
5145 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5146 if (br_spec) {
5147 nla_for_each_nested(attr, br_spec, rem) {
5148 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5149 if (nla_len(attr) < sizeof(flags))
5150 return -EINVAL;
5151
5152 have_flags = true;
5153 flags = nla_get_u16(attr);
5154 break;
5155 }
5156 }
5157 }
5158
5159 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5160 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5161
5162 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5163 err = -EOPNOTSUPP;
5164 goto out;
5165 }
5166
5167 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5168 if (err)
5169 goto out;
5170
5171 flags &= ~BRIDGE_FLAGS_MASTER;
5172 }
5173
5174 if ((flags & BRIDGE_FLAGS_SELF)) {
5175 if (!dev->netdev_ops->ndo_bridge_dellink)
5176 err = -EOPNOTSUPP;
5177 else
5178 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5179 flags);
5180
5181 if (!err) {
5182 flags &= ~BRIDGE_FLAGS_SELF;
5183
5184 /* Generate event to notify upper layer of bridge
5185 * change
5186 */
5187 err = rtnl_bridge_notify(dev);
5188 }
5189 }
5190
5191 if (have_flags)
5192 memcpy(nla_data(attr), &flags, sizeof(flags));
5193out:
5194 return err;
5195}
5196
5197static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5198{
5199 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5200 (!idxattr || idxattr == attrid);
5201}
5202
5203static bool
5204rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5205{
5206 return dev->netdev_ops &&
5207 dev->netdev_ops->ndo_has_offload_stats &&
5208 dev->netdev_ops->ndo_get_offload_stats &&
5209 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5210}
5211
5212static unsigned int
5213rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5214{
5215 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5216 sizeof(struct rtnl_link_stats64) : 0;
5217}
5218
5219static int
5220rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5221 struct sk_buff *skb)
5222{
5223 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5224 struct nlattr *attr = NULL;
5225 void *attr_data;
5226 int err;
5227
5228 if (!size)
5229 return -ENODATA;
5230
5231 attr = nla_reserve_64bit(skb, attr_id, size,
5232 IFLA_OFFLOAD_XSTATS_UNSPEC);
5233 if (!attr)
5234 return -EMSGSIZE;
5235
5236 attr_data = nla_data(attr);
5237 memset(attr_data, 0, size);
5238
5239 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5240 if (err)
5241 return err;
5242
5243 return 0;
5244}
5245
5246static unsigned int
5247rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5248 enum netdev_offload_xstats_type type)
5249{
5250 bool enabled = netdev_offload_xstats_enabled(dev, type);
5251
5252 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5253}
5254
5255struct rtnl_offload_xstats_request_used {
5256 bool request;
5257 bool used;
5258};
5259
5260static int
5261rtnl_offload_xstats_get_stats(struct net_device *dev,
5262 enum netdev_offload_xstats_type type,
5263 struct rtnl_offload_xstats_request_used *ru,
5264 struct rtnl_hw_stats64 *stats,
5265 struct netlink_ext_ack *extack)
5266{
5267 bool request;
5268 bool used;
5269 int err;
5270
5271 request = netdev_offload_xstats_enabled(dev, type);
5272 if (!request) {
5273 used = false;
5274 goto out;
5275 }
5276
5277 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5278 if (err)
5279 return err;
5280
5281out:
5282 if (ru) {
5283 ru->request = request;
5284 ru->used = used;
5285 }
5286 return 0;
5287}
5288
5289static int
5290rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5291 struct rtnl_offload_xstats_request_used *ru)
5292{
5293 struct nlattr *nest;
5294
5295 nest = nla_nest_start(skb, attr_id);
5296 if (!nest)
5297 return -EMSGSIZE;
5298
5299 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5300 goto nla_put_failure;
5301
5302 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5303 goto nla_put_failure;
5304
5305 nla_nest_end(skb, nest);
5306 return 0;
5307
5308nla_put_failure:
5309 nla_nest_cancel(skb, nest);
5310 return -EMSGSIZE;
5311}
5312
5313static int
5314rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5315 struct netlink_ext_ack *extack)
5316{
5317 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5318 struct rtnl_offload_xstats_request_used ru_l3;
5319 struct nlattr *nest;
5320 int err;
5321
5322 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5323 if (err)
5324 return err;
5325
5326 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5327 if (!nest)
5328 return -EMSGSIZE;
5329
5330 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5331 IFLA_OFFLOAD_XSTATS_L3_STATS,
5332 &ru_l3))
5333 goto nla_put_failure;
5334
5335 nla_nest_end(skb, nest);
5336 return 0;
5337
5338nla_put_failure:
5339 nla_nest_cancel(skb, nest);
5340 return -EMSGSIZE;
5341}
5342
5343static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5344 int *prividx, u32 off_filter_mask,
5345 struct netlink_ext_ack *extack)
5346{
5347 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5348 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5349 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5350 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5351 bool have_data = false;
5352 int err;
5353
5354 if (*prividx <= attr_id_cpu_hit &&
5355 (off_filter_mask &
5356 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5357 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5358 if (!err) {
5359 have_data = true;
5360 } else if (err != -ENODATA) {
5361 *prividx = attr_id_cpu_hit;
5362 return err;
5363 }
5364 }
5365
5366 if (*prividx <= attr_id_hw_s_info &&
5367 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5368 *prividx = attr_id_hw_s_info;
5369
5370 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5371 if (err)
5372 return err;
5373
5374 have_data = true;
5375 *prividx = 0;
5376 }
5377
5378 if (*prividx <= attr_id_l3_stats &&
5379 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5380 unsigned int size_l3;
5381 struct nlattr *attr;
5382
5383 *prividx = attr_id_l3_stats;
5384
5385 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5386 if (!size_l3)
5387 goto skip_l3_stats;
5388 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5389 IFLA_OFFLOAD_XSTATS_UNSPEC);
5390 if (!attr)
5391 return -EMSGSIZE;
5392
5393 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5394 nla_data(attr), extack);
5395 if (err)
5396 return err;
5397
5398 have_data = true;
5399skip_l3_stats:
5400 *prividx = 0;
5401 }
5402
5403 if (!have_data)
5404 return -ENODATA;
5405
5406 *prividx = 0;
5407 return 0;
5408}
5409
5410static unsigned int
5411rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5412 enum netdev_offload_xstats_type type)
5413{
5414 bool enabled = netdev_offload_xstats_enabled(dev, type);
5415
5416 return nla_total_size(0) +
5417 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5418 nla_total_size(sizeof(u8)) +
5419 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5420 (enabled ? nla_total_size(sizeof(u8)) : 0) +
5421 0;
5422}
5423
5424static unsigned int
5425rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5426{
5427 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5428
5429 return nla_total_size(0) +
5430 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5431 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5432 0;
5433}
5434
5435static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5436 u32 off_filter_mask)
5437{
5438 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5439 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5440 int nla_size = 0;
5441 int size;
5442
5443 if (off_filter_mask &
5444 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5445 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5446 nla_size += nla_total_size_64bit(size);
5447 }
5448
5449 if (off_filter_mask &
5450 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5451 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5452
5453 if (off_filter_mask &
5454 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5455 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5456 nla_size += nla_total_size_64bit(size);
5457 }
5458
5459 if (nla_size != 0)
5460 nla_size += nla_total_size(0);
5461
5462 return nla_size;
5463}
5464
5465struct rtnl_stats_dump_filters {
5466 /* mask[0] filters outer attributes. Then individual nests have their
5467 * filtering mask at the index of the nested attribute.
5468 */
5469 u32 mask[IFLA_STATS_MAX + 1];
5470};
5471
5472static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5473 int type, u32 pid, u32 seq, u32 change,
5474 unsigned int flags,
5475 const struct rtnl_stats_dump_filters *filters,
5476 int *idxattr, int *prividx,
5477 struct netlink_ext_ack *extack)
5478{
5479 unsigned int filter_mask = filters->mask[0];
5480 struct if_stats_msg *ifsm;
5481 struct nlmsghdr *nlh;
5482 struct nlattr *attr;
5483 int s_prividx = *prividx;
5484 int err;
5485
5486 ASSERT_RTNL();
5487
5488 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5489 if (!nlh)
5490 return -EMSGSIZE;
5491
5492 ifsm = nlmsg_data(nlh);
5493 ifsm->family = PF_UNSPEC;
5494 ifsm->pad1 = 0;
5495 ifsm->pad2 = 0;
5496 ifsm->ifindex = dev->ifindex;
5497 ifsm->filter_mask = filter_mask;
5498
5499 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5500 struct rtnl_link_stats64 *sp;
5501
5502 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5503 sizeof(struct rtnl_link_stats64),
5504 IFLA_STATS_UNSPEC);
5505 if (!attr) {
5506 err = -EMSGSIZE;
5507 goto nla_put_failure;
5508 }
5509
5510 sp = nla_data(attr);
5511 dev_get_stats(dev, sp);
5512 }
5513
5514 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5515 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5516
5517 if (ops && ops->fill_linkxstats) {
5518 *idxattr = IFLA_STATS_LINK_XSTATS;
5519 attr = nla_nest_start_noflag(skb,
5520 IFLA_STATS_LINK_XSTATS);
5521 if (!attr) {
5522 err = -EMSGSIZE;
5523 goto nla_put_failure;
5524 }
5525
5526 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5527 nla_nest_end(skb, attr);
5528 if (err)
5529 goto nla_put_failure;
5530 *idxattr = 0;
5531 }
5532 }
5533
5534 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5535 *idxattr)) {
5536 const struct rtnl_link_ops *ops = NULL;
5537 const struct net_device *master;
5538
5539 master = netdev_master_upper_dev_get(dev);
5540 if (master)
5541 ops = master->rtnl_link_ops;
5542 if (ops && ops->fill_linkxstats) {
5543 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5544 attr = nla_nest_start_noflag(skb,
5545 IFLA_STATS_LINK_XSTATS_SLAVE);
5546 if (!attr) {
5547 err = -EMSGSIZE;
5548 goto nla_put_failure;
5549 }
5550
5551 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5552 nla_nest_end(skb, attr);
5553 if (err)
5554 goto nla_put_failure;
5555 *idxattr = 0;
5556 }
5557 }
5558
5559 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5560 *idxattr)) {
5561 u32 off_filter_mask;
5562
5563 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5564 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5565 attr = nla_nest_start_noflag(skb,
5566 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5567 if (!attr) {
5568 err = -EMSGSIZE;
5569 goto nla_put_failure;
5570 }
5571
5572 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5573 off_filter_mask, extack);
5574 if (err == -ENODATA)
5575 nla_nest_cancel(skb, attr);
5576 else
5577 nla_nest_end(skb, attr);
5578
5579 if (err && err != -ENODATA)
5580 goto nla_put_failure;
5581 *idxattr = 0;
5582 }
5583
5584 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5585 struct rtnl_af_ops *af_ops;
5586
5587 *idxattr = IFLA_STATS_AF_SPEC;
5588 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5589 if (!attr) {
5590 err = -EMSGSIZE;
5591 goto nla_put_failure;
5592 }
5593
5594 rcu_read_lock();
5595 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5596 if (af_ops->fill_stats_af) {
5597 struct nlattr *af;
5598
5599 af = nla_nest_start_noflag(skb,
5600 af_ops->family);
5601 if (!af) {
5602 rcu_read_unlock();
5603 err = -EMSGSIZE;
5604 goto nla_put_failure;
5605 }
5606 err = af_ops->fill_stats_af(skb, dev);
5607
5608 if (err == -ENODATA) {
5609 nla_nest_cancel(skb, af);
5610 } else if (err < 0) {
5611 rcu_read_unlock();
5612 goto nla_put_failure;
5613 }
5614
5615 nla_nest_end(skb, af);
5616 }
5617 }
5618 rcu_read_unlock();
5619
5620 nla_nest_end(skb, attr);
5621
5622 *idxattr = 0;
5623 }
5624
5625 nlmsg_end(skb, nlh);
5626
5627 return 0;
5628
5629nla_put_failure:
5630 /* not a multi message or no progress mean a real error */
5631 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5632 nlmsg_cancel(skb, nlh);
5633 else
5634 nlmsg_end(skb, nlh);
5635
5636 return err;
5637}
5638
5639static size_t if_nlmsg_stats_size(const struct net_device *dev,
5640 const struct rtnl_stats_dump_filters *filters)
5641{
5642 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5643 unsigned int filter_mask = filters->mask[0];
5644
5645 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5646 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5647
5648 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5649 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5650 int attr = IFLA_STATS_LINK_XSTATS;
5651
5652 if (ops && ops->get_linkxstats_size) {
5653 size += nla_total_size(ops->get_linkxstats_size(dev,
5654 attr));
5655 /* for IFLA_STATS_LINK_XSTATS */
5656 size += nla_total_size(0);
5657 }
5658 }
5659
5660 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5661 struct net_device *_dev = (struct net_device *)dev;
5662 const struct rtnl_link_ops *ops = NULL;
5663 const struct net_device *master;
5664
5665 /* netdev_master_upper_dev_get can't take const */
5666 master = netdev_master_upper_dev_get(_dev);
5667 if (master)
5668 ops = master->rtnl_link_ops;
5669 if (ops && ops->get_linkxstats_size) {
5670 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5671
5672 size += nla_total_size(ops->get_linkxstats_size(dev,
5673 attr));
5674 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5675 size += nla_total_size(0);
5676 }
5677 }
5678
5679 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5680 u32 off_filter_mask;
5681
5682 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5683 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5684 }
5685
5686 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5687 struct rtnl_af_ops *af_ops;
5688
5689 /* for IFLA_STATS_AF_SPEC */
5690 size += nla_total_size(0);
5691
5692 rcu_read_lock();
5693 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5694 if (af_ops->get_stats_af_size) {
5695 size += nla_total_size(
5696 af_ops->get_stats_af_size(dev));
5697
5698 /* for AF_* */
5699 size += nla_total_size(0);
5700 }
5701 }
5702 rcu_read_unlock();
5703 }
5704
5705 return size;
5706}
5707
5708#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5709
5710static const struct nla_policy
5711rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5712 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5713 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5714};
5715
5716static const struct nla_policy
5717rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5718 [IFLA_STATS_GET_FILTERS] =
5719 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5720};
5721
5722static const struct nla_policy
5723ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5724 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5725};
5726
5727static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5728 struct rtnl_stats_dump_filters *filters,
5729 struct netlink_ext_ack *extack)
5730{
5731 struct nlattr *tb[IFLA_STATS_MAX + 1];
5732 int err;
5733 int at;
5734
5735 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5736 rtnl_stats_get_policy_filters, extack);
5737 if (err < 0)
5738 return err;
5739
5740 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5741 if (tb[at]) {
5742 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5743 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5744 return -EINVAL;
5745 }
5746 filters->mask[at] = nla_get_u32(tb[at]);
5747 }
5748 }
5749
5750 return 0;
5751}
5752
5753static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5754 u32 filter_mask,
5755 struct rtnl_stats_dump_filters *filters,
5756 struct netlink_ext_ack *extack)
5757{
5758 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5759 int err;
5760 int i;
5761
5762 filters->mask[0] = filter_mask;
5763 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5764 filters->mask[i] = -1U;
5765
5766 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5767 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5768 if (err < 0)
5769 return err;
5770
5771 if (tb[IFLA_STATS_GET_FILTERS]) {
5772 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5773 filters, extack);
5774 if (err)
5775 return err;
5776 }
5777
5778 return 0;
5779}
5780
5781static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5782 bool is_dump, struct netlink_ext_ack *extack)
5783{
5784 struct if_stats_msg *ifsm;
5785
5786 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5787 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5788 return -EINVAL;
5789 }
5790
5791 if (!strict_check)
5792 return 0;
5793
5794 ifsm = nlmsg_data(nlh);
5795
5796 /* only requests using strict checks can pass data to influence
5797 * the dump. The legacy exception is filter_mask.
5798 */
5799 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5800 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5801 return -EINVAL;
5802 }
5803 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5804 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5805 return -EINVAL;
5806 }
5807
5808 return 0;
5809}
5810
5811static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5812 struct netlink_ext_ack *extack)
5813{
5814 struct rtnl_stats_dump_filters filters;
5815 struct net *net = sock_net(skb->sk);
5816 struct net_device *dev = NULL;
5817 int idxattr = 0, prividx = 0;
5818 struct if_stats_msg *ifsm;
5819 struct sk_buff *nskb;
5820 int err;
5821
5822 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5823 false, extack);
5824 if (err)
5825 return err;
5826
5827 ifsm = nlmsg_data(nlh);
5828 if (ifsm->ifindex > 0)
5829 dev = __dev_get_by_index(net, ifsm->ifindex);
5830 else
5831 return -EINVAL;
5832
5833 if (!dev)
5834 return -ENODEV;
5835
5836 if (!ifsm->filter_mask) {
5837 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5838 return -EINVAL;
5839 }
5840
5841 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5842 if (err)
5843 return err;
5844
5845 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5846 if (!nskb)
5847 return -ENOBUFS;
5848
5849 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5850 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5851 0, &filters, &idxattr, &prividx, extack);
5852 if (err < 0) {
5853 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5854 WARN_ON(err == -EMSGSIZE);
5855 kfree_skb(nskb);
5856 } else {
5857 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5858 }
5859
5860 return err;
5861}
5862
5863static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5864{
5865 struct netlink_ext_ack *extack = cb->extack;
5866 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5867 struct rtnl_stats_dump_filters filters;
5868 struct net *net = sock_net(skb->sk);
5869 unsigned int flags = NLM_F_MULTI;
5870 struct if_stats_msg *ifsm;
5871 struct hlist_head *head;
5872 struct net_device *dev;
5873 int idx = 0;
5874
5875 s_h = cb->args[0];
5876 s_idx = cb->args[1];
5877 s_idxattr = cb->args[2];
5878 s_prividx = cb->args[3];
5879
5880 cb->seq = net->dev_base_seq;
5881
5882 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5883 if (err)
5884 return err;
5885
5886 ifsm = nlmsg_data(cb->nlh);
5887 if (!ifsm->filter_mask) {
5888 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5889 return -EINVAL;
5890 }
5891
5892 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5893 extack);
5894 if (err)
5895 return err;
5896
5897 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5898 idx = 0;
5899 head = &net->dev_index_head[h];
5900 hlist_for_each_entry(dev, head, index_hlist) {
5901 if (idx < s_idx)
5902 goto cont;
5903 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5904 NETLINK_CB(cb->skb).portid,
5905 cb->nlh->nlmsg_seq, 0,
5906 flags, &filters,
5907 &s_idxattr, &s_prividx,
5908 extack);
5909 /* If we ran out of room on the first message,
5910 * we're in trouble
5911 */
5912 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5913
5914 if (err < 0)
5915 goto out;
5916 s_prividx = 0;
5917 s_idxattr = 0;
5918 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5919cont:
5920 idx++;
5921 }
5922 }
5923out:
5924 cb->args[3] = s_prividx;
5925 cb->args[2] = s_idxattr;
5926 cb->args[1] = idx;
5927 cb->args[0] = h;
5928
5929 return skb->len;
5930}
5931
5932void rtnl_offload_xstats_notify(struct net_device *dev)
5933{
5934 struct rtnl_stats_dump_filters response_filters = {};
5935 struct net *net = dev_net(dev);
5936 int idxattr = 0, prividx = 0;
5937 struct sk_buff *skb;
5938 int err = -ENOBUFS;
5939
5940 ASSERT_RTNL();
5941
5942 response_filters.mask[0] |=
5943 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5944 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5945 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5946
5947 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5948 GFP_KERNEL);
5949 if (!skb)
5950 goto errout;
5951
5952 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5953 &response_filters, &idxattr, &prividx, NULL);
5954 if (err < 0) {
5955 kfree_skb(skb);
5956 goto errout;
5957 }
5958
5959 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
5960 return;
5961
5962errout:
5963 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
5964}
5965EXPORT_SYMBOL(rtnl_offload_xstats_notify);
5966
5967static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
5968 struct netlink_ext_ack *extack)
5969{
5970 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5971 struct rtnl_stats_dump_filters response_filters = {};
5972 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5973 struct net *net = sock_net(skb->sk);
5974 struct net_device *dev = NULL;
5975 struct if_stats_msg *ifsm;
5976 bool notify = false;
5977 int err;
5978
5979 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5980 false, extack);
5981 if (err)
5982 return err;
5983
5984 ifsm = nlmsg_data(nlh);
5985 if (ifsm->family != AF_UNSPEC) {
5986 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
5987 return -EINVAL;
5988 }
5989
5990 if (ifsm->ifindex > 0)
5991 dev = __dev_get_by_index(net, ifsm->ifindex);
5992 else
5993 return -EINVAL;
5994
5995 if (!dev)
5996 return -ENODEV;
5997
5998 if (ifsm->filter_mask) {
5999 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6000 return -EINVAL;
6001 }
6002
6003 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6004 ifla_stats_set_policy, extack);
6005 if (err < 0)
6006 return err;
6007
6008 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6009 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6010
6011 if (req)
6012 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6013 else
6014 err = netdev_offload_xstats_disable(dev, t_l3);
6015
6016 if (!err)
6017 notify = true;
6018 else if (err != -EALREADY)
6019 return err;
6020
6021 response_filters.mask[0] |=
6022 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6023 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6024 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6025 }
6026
6027 if (notify)
6028 rtnl_offload_xstats_notify(dev);
6029
6030 return 0;
6031}
6032
6033/* Process one rtnetlink message. */
6034
6035static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6036 struct netlink_ext_ack *extack)
6037{
6038 struct net *net = sock_net(skb->sk);
6039 struct rtnl_link *link;
6040 enum rtnl_kinds kind;
6041 struct module *owner;
6042 int err = -EOPNOTSUPP;
6043 rtnl_doit_func doit;
6044 unsigned int flags;
6045 int family;
6046 int type;
6047
6048 type = nlh->nlmsg_type;
6049 if (type > RTM_MAX)
6050 return -EOPNOTSUPP;
6051
6052 type -= RTM_BASE;
6053
6054 /* All the messages must have at least 1 byte length */
6055 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6056 return 0;
6057
6058 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6059 kind = rtnl_msgtype_kind(type);
6060
6061 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6062 return -EPERM;
6063
6064 rcu_read_lock();
6065 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6066 struct sock *rtnl;
6067 rtnl_dumpit_func dumpit;
6068 u32 min_dump_alloc = 0;
6069
6070 link = rtnl_get_link(family, type);
6071 if (!link || !link->dumpit) {
6072 family = PF_UNSPEC;
6073 link = rtnl_get_link(family, type);
6074 if (!link || !link->dumpit)
6075 goto err_unlock;
6076 }
6077 owner = link->owner;
6078 dumpit = link->dumpit;
6079
6080 if (type == RTM_GETLINK - RTM_BASE)
6081 min_dump_alloc = rtnl_calcit(skb, nlh);
6082
6083 err = 0;
6084 /* need to do this before rcu_read_unlock() */
6085 if (!try_module_get(owner))
6086 err = -EPROTONOSUPPORT;
6087
6088 rcu_read_unlock();
6089
6090 rtnl = net->rtnl;
6091 if (err == 0) {
6092 struct netlink_dump_control c = {
6093 .dump = dumpit,
6094 .min_dump_alloc = min_dump_alloc,
6095 .module = owner,
6096 };
6097 err = netlink_dump_start(rtnl, skb, nlh, &c);
6098 /* netlink_dump_start() will keep a reference on
6099 * module if dump is still in progress.
6100 */
6101 module_put(owner);
6102 }
6103 return err;
6104 }
6105
6106 link = rtnl_get_link(family, type);
6107 if (!link || !link->doit) {
6108 family = PF_UNSPEC;
6109 link = rtnl_get_link(PF_UNSPEC, type);
6110 if (!link || !link->doit)
6111 goto out_unlock;
6112 }
6113
6114 owner = link->owner;
6115 if (!try_module_get(owner)) {
6116 err = -EPROTONOSUPPORT;
6117 goto out_unlock;
6118 }
6119
6120 flags = link->flags;
6121 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6122 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6123 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6124 module_put(owner);
6125 goto err_unlock;
6126 }
6127
6128 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6129 doit = link->doit;
6130 rcu_read_unlock();
6131 if (doit)
6132 err = doit(skb, nlh, extack);
6133 module_put(owner);
6134 return err;
6135 }
6136 rcu_read_unlock();
6137
6138 rtnl_lock();
6139 link = rtnl_get_link(family, type);
6140 if (link && link->doit)
6141 err = link->doit(skb, nlh, extack);
6142 rtnl_unlock();
6143
6144 module_put(owner);
6145
6146 return err;
6147
6148out_unlock:
6149 rcu_read_unlock();
6150 return err;
6151
6152err_unlock:
6153 rcu_read_unlock();
6154 return -EOPNOTSUPP;
6155}
6156
6157static void rtnetlink_rcv(struct sk_buff *skb)
6158{
6159 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6160}
6161
6162static int rtnetlink_bind(struct net *net, int group)
6163{
6164 switch (group) {
6165 case RTNLGRP_IPV4_MROUTE_R:
6166 case RTNLGRP_IPV6_MROUTE_R:
6167 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6168 return -EPERM;
6169 break;
6170 }
6171 return 0;
6172}
6173
6174static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6175{
6176 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6177
6178 switch (event) {
6179 case NETDEV_REBOOT:
6180 case NETDEV_CHANGEMTU:
6181 case NETDEV_CHANGEADDR:
6182 case NETDEV_CHANGENAME:
6183 case NETDEV_FEAT_CHANGE:
6184 case NETDEV_BONDING_FAILOVER:
6185 case NETDEV_POST_TYPE_CHANGE:
6186 case NETDEV_NOTIFY_PEERS:
6187 case NETDEV_CHANGEUPPER:
6188 case NETDEV_RESEND_IGMP:
6189 case NETDEV_CHANGEINFODATA:
6190 case NETDEV_CHANGELOWERSTATE:
6191 case NETDEV_CHANGE_TX_QUEUE_LEN:
6192 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6193 GFP_KERNEL, NULL, 0, 0, NULL);
6194 break;
6195 default:
6196 break;
6197 }
6198 return NOTIFY_DONE;
6199}
6200
6201static struct notifier_block rtnetlink_dev_notifier = {
6202 .notifier_call = rtnetlink_event,
6203};
6204
6205
6206static int __net_init rtnetlink_net_init(struct net *net)
6207{
6208 struct sock *sk;
6209 struct netlink_kernel_cfg cfg = {
6210 .groups = RTNLGRP_MAX,
6211 .input = rtnetlink_rcv,
6212 .cb_mutex = &rtnl_mutex,
6213 .flags = NL_CFG_F_NONROOT_RECV,
6214 .bind = rtnetlink_bind,
6215 };
6216
6217 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6218 if (!sk)
6219 return -ENOMEM;
6220 net->rtnl = sk;
6221 return 0;
6222}
6223
6224static void __net_exit rtnetlink_net_exit(struct net *net)
6225{
6226 netlink_kernel_release(net->rtnl);
6227 net->rtnl = NULL;
6228}
6229
6230static struct pernet_operations rtnetlink_net_ops = {
6231 .init = rtnetlink_net_init,
6232 .exit = rtnetlink_net_exit,
6233};
6234
6235void __init rtnetlink_init(void)
6236{
6237 if (register_pernet_subsys(&rtnetlink_net_ops))
6238 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6239
6240 register_netdevice_notifier(&rtnetlink_dev_notifier);
6241
6242 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6243 rtnl_dump_ifinfo, 0);
6244 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6245 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6246 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6247
6248 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6249 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6250 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6251
6252 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6253 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6254
6255 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6256 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6257 RTNL_FLAG_BULK_DEL_SUPPORTED);
6258 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6259
6260 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6261 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6262 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6263
6264 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6265 0);
6266 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6267}