Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
13 */
14
15#include <linux/bitops.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
21#include <linux/timer.h>
22#include <linux/string.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/fcntl.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/capability.h>
30#include <linux/skbuff.h>
31#include <linux/init.h>
32#include <linux/security.h>
33#include <linux/mutex.h>
34#include <linux/if_addr.h>
35#include <linux/if_bridge.h>
36#include <linux/if_vlan.h>
37#include <linux/pci.h>
38#include <linux/etherdevice.h>
39#include <linux/bpf.h>
40
41#include <linux/uaccess.h>
42
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/arp.h>
48#include <net/route.h>
49#include <net/udp.h>
50#include <net/tcp.h>
51#include <net/sock.h>
52#include <net/pkt_sched.h>
53#include <net/fib_rules.h>
54#include <net/rtnetlink.h>
55#include <net/net_namespace.h>
56#include <net/devlink.h>
57
58#include "dev.h"
59
60#define RTNL_MAX_TYPE 50
61#define RTNL_SLAVE_MAX_TYPE 40
62
63struct rtnl_link {
64 rtnl_doit_func doit;
65 rtnl_dumpit_func dumpit;
66 struct module *owner;
67 unsigned int flags;
68 struct rcu_head rcu;
69};
70
71static DEFINE_MUTEX(rtnl_mutex);
72
73void rtnl_lock(void)
74{
75 mutex_lock(&rtnl_mutex);
76}
77EXPORT_SYMBOL(rtnl_lock);
78
79int rtnl_lock_killable(void)
80{
81 return mutex_lock_killable(&rtnl_mutex);
82}
83EXPORT_SYMBOL(rtnl_lock_killable);
84
85static struct sk_buff *defer_kfree_skb_list;
86void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
87{
88 if (head && tail) {
89 tail->next = defer_kfree_skb_list;
90 defer_kfree_skb_list = head;
91 }
92}
93EXPORT_SYMBOL(rtnl_kfree_skbs);
94
95void __rtnl_unlock(void)
96{
97 struct sk_buff *head = defer_kfree_skb_list;
98
99 defer_kfree_skb_list = NULL;
100
101 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
102 * is used. In some places, e.g. in cfg80211, we have code that will do
103 * something like
104 * rtnl_lock()
105 * wiphy_lock()
106 * ...
107 * rtnl_unlock()
108 *
109 * and because netdev_run_todo() acquires the RTNL for items on the list
110 * we could cause a situation such as this:
111 * Thread 1 Thread 2
112 * rtnl_lock()
113 * unregister_netdevice()
114 * __rtnl_unlock()
115 * rtnl_lock()
116 * wiphy_lock()
117 * rtnl_unlock()
118 * netdev_run_todo()
119 * __rtnl_unlock()
120 *
121 * // list not empty now
122 * // because of thread 2
123 * rtnl_lock()
124 * while (!list_empty(...))
125 * rtnl_lock()
126 * wiphy_lock()
127 * **** DEADLOCK ****
128 *
129 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
130 * it's not used in cases where something is added to do the list.
131 */
132 WARN_ON(!list_empty(&net_todo_list));
133
134 mutex_unlock(&rtnl_mutex);
135
136 while (head) {
137 struct sk_buff *next = head->next;
138
139 kfree_skb(head);
140 cond_resched();
141 head = next;
142 }
143}
144
145void rtnl_unlock(void)
146{
147 /* This fellow will unlock it for us. */
148 netdev_run_todo();
149}
150EXPORT_SYMBOL(rtnl_unlock);
151
152int rtnl_trylock(void)
153{
154 return mutex_trylock(&rtnl_mutex);
155}
156EXPORT_SYMBOL(rtnl_trylock);
157
158int rtnl_is_locked(void)
159{
160 return mutex_is_locked(&rtnl_mutex);
161}
162EXPORT_SYMBOL(rtnl_is_locked);
163
164bool refcount_dec_and_rtnl_lock(refcount_t *r)
165{
166 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
167}
168EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
169
170#ifdef CONFIG_PROVE_LOCKING
171bool lockdep_rtnl_is_held(void)
172{
173 return lockdep_is_held(&rtnl_mutex);
174}
175EXPORT_SYMBOL(lockdep_rtnl_is_held);
176#endif /* #ifdef CONFIG_PROVE_LOCKING */
177
178static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
179
180static inline int rtm_msgindex(int msgtype)
181{
182 int msgindex = msgtype - RTM_BASE;
183
184 /*
185 * msgindex < 0 implies someone tried to register a netlink
186 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
187 * the message type has not been added to linux/rtnetlink.h
188 */
189 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
190
191 return msgindex;
192}
193
194static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
195{
196 struct rtnl_link __rcu **tab;
197
198 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
199 protocol = PF_UNSPEC;
200
201 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
202 if (!tab)
203 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
204
205 return rcu_dereference_rtnl(tab[msgtype]);
206}
207
208static int rtnl_register_internal(struct module *owner,
209 int protocol, int msgtype,
210 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
211 unsigned int flags)
212{
213 struct rtnl_link *link, *old;
214 struct rtnl_link __rcu **tab;
215 int msgindex;
216 int ret = -ENOBUFS;
217
218 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
219 msgindex = rtm_msgindex(msgtype);
220
221 rtnl_lock();
222 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
223 if (tab == NULL) {
224 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
225 if (!tab)
226 goto unlock;
227
228 /* ensures we see the 0 stores */
229 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
230 }
231
232 old = rtnl_dereference(tab[msgindex]);
233 if (old) {
234 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
235 if (!link)
236 goto unlock;
237 } else {
238 link = kzalloc(sizeof(*link), GFP_KERNEL);
239 if (!link)
240 goto unlock;
241 }
242
243 WARN_ON(link->owner && link->owner != owner);
244 link->owner = owner;
245
246 WARN_ON(doit && link->doit && link->doit != doit);
247 if (doit)
248 link->doit = doit;
249 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
250 if (dumpit)
251 link->dumpit = dumpit;
252
253 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
254 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
255 link->flags |= flags;
256
257 /* publish protocol:msgtype */
258 rcu_assign_pointer(tab[msgindex], link);
259 ret = 0;
260 if (old)
261 kfree_rcu(old, rcu);
262unlock:
263 rtnl_unlock();
264 return ret;
265}
266
267/**
268 * rtnl_register_module - Register a rtnetlink message type
269 *
270 * @owner: module registering the hook (THIS_MODULE)
271 * @protocol: Protocol family or PF_UNSPEC
272 * @msgtype: rtnetlink message type
273 * @doit: Function pointer called for each request message
274 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
275 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
276 *
277 * Like rtnl_register, but for use by removable modules.
278 */
279int rtnl_register_module(struct module *owner,
280 int protocol, int msgtype,
281 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
282 unsigned int flags)
283{
284 return rtnl_register_internal(owner, protocol, msgtype,
285 doit, dumpit, flags);
286}
287EXPORT_SYMBOL_GPL(rtnl_register_module);
288
289/**
290 * rtnl_register - Register a rtnetlink message type
291 * @protocol: Protocol family or PF_UNSPEC
292 * @msgtype: rtnetlink message type
293 * @doit: Function pointer called for each request message
294 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
295 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
296 *
297 * Registers the specified function pointers (at least one of them has
298 * to be non-NULL) to be called whenever a request message for the
299 * specified protocol family and message type is received.
300 *
301 * The special protocol family PF_UNSPEC may be used to define fallback
302 * function pointers for the case when no entry for the specific protocol
303 * family exists.
304 */
305void rtnl_register(int protocol, int msgtype,
306 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
307 unsigned int flags)
308{
309 int err;
310
311 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
312 flags);
313 if (err)
314 pr_err("Unable to register rtnetlink message handler, "
315 "protocol = %d, message type = %d\n", protocol, msgtype);
316}
317
318/**
319 * rtnl_unregister - Unregister a rtnetlink message type
320 * @protocol: Protocol family or PF_UNSPEC
321 * @msgtype: rtnetlink message type
322 *
323 * Returns 0 on success or a negative error code.
324 */
325int rtnl_unregister(int protocol, int msgtype)
326{
327 struct rtnl_link __rcu **tab;
328 struct rtnl_link *link;
329 int msgindex;
330
331 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
332 msgindex = rtm_msgindex(msgtype);
333
334 rtnl_lock();
335 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
336 if (!tab) {
337 rtnl_unlock();
338 return -ENOENT;
339 }
340
341 link = rtnl_dereference(tab[msgindex]);
342 RCU_INIT_POINTER(tab[msgindex], NULL);
343 rtnl_unlock();
344
345 kfree_rcu(link, rcu);
346
347 return 0;
348}
349EXPORT_SYMBOL_GPL(rtnl_unregister);
350
351/**
352 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
353 * @protocol : Protocol family or PF_UNSPEC
354 *
355 * Identical to calling rtnl_unregster() for all registered message types
356 * of a certain protocol family.
357 */
358void rtnl_unregister_all(int protocol)
359{
360 struct rtnl_link __rcu **tab;
361 struct rtnl_link *link;
362 int msgindex;
363
364 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
365
366 rtnl_lock();
367 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
368 if (!tab) {
369 rtnl_unlock();
370 return;
371 }
372 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
373 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
374 link = rtnl_dereference(tab[msgindex]);
375 if (!link)
376 continue;
377
378 RCU_INIT_POINTER(tab[msgindex], NULL);
379 kfree_rcu(link, rcu);
380 }
381 rtnl_unlock();
382
383 synchronize_net();
384
385 kfree(tab);
386}
387EXPORT_SYMBOL_GPL(rtnl_unregister_all);
388
389static LIST_HEAD(link_ops);
390
391static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
392{
393 const struct rtnl_link_ops *ops;
394
395 list_for_each_entry(ops, &link_ops, list) {
396 if (!strcmp(ops->kind, kind))
397 return ops;
398 }
399 return NULL;
400}
401
402/**
403 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
404 * @ops: struct rtnl_link_ops * to register
405 *
406 * The caller must hold the rtnl_mutex. This function should be used
407 * by drivers that create devices during module initialization. It
408 * must be called before registering the devices.
409 *
410 * Returns 0 on success or a negative error code.
411 */
412int __rtnl_link_register(struct rtnl_link_ops *ops)
413{
414 if (rtnl_link_ops_get(ops->kind))
415 return -EEXIST;
416
417 /* The check for alloc/setup is here because if ops
418 * does not have that filled up, it is not possible
419 * to use the ops for creating device. So do not
420 * fill up dellink as well. That disables rtnl_dellink.
421 */
422 if ((ops->alloc || ops->setup) && !ops->dellink)
423 ops->dellink = unregister_netdevice_queue;
424
425 list_add_tail(&ops->list, &link_ops);
426 return 0;
427}
428EXPORT_SYMBOL_GPL(__rtnl_link_register);
429
430/**
431 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
432 * @ops: struct rtnl_link_ops * to register
433 *
434 * Returns 0 on success or a negative error code.
435 */
436int rtnl_link_register(struct rtnl_link_ops *ops)
437{
438 int err;
439
440 /* Sanity-check max sizes to avoid stack buffer overflow. */
441 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
442 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
443 return -EINVAL;
444
445 rtnl_lock();
446 err = __rtnl_link_register(ops);
447 rtnl_unlock();
448 return err;
449}
450EXPORT_SYMBOL_GPL(rtnl_link_register);
451
452static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
453{
454 struct net_device *dev;
455 LIST_HEAD(list_kill);
456
457 for_each_netdev(net, dev) {
458 if (dev->rtnl_link_ops == ops)
459 ops->dellink(dev, &list_kill);
460 }
461 unregister_netdevice_many(&list_kill);
462}
463
464/**
465 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
466 * @ops: struct rtnl_link_ops * to unregister
467 *
468 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
469 * integrity (hold pernet_ops_rwsem for writing to close the race
470 * with setup_net() and cleanup_net()).
471 */
472void __rtnl_link_unregister(struct rtnl_link_ops *ops)
473{
474 struct net *net;
475
476 for_each_net(net) {
477 __rtnl_kill_links(net, ops);
478 }
479 list_del(&ops->list);
480}
481EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
482
483/* Return with the rtnl_lock held when there are no network
484 * devices unregistering in any network namespace.
485 */
486static void rtnl_lock_unregistering_all(void)
487{
488 struct net *net;
489 bool unregistering;
490 DEFINE_WAIT_FUNC(wait, woken_wake_function);
491
492 add_wait_queue(&netdev_unregistering_wq, &wait);
493 for (;;) {
494 unregistering = false;
495 rtnl_lock();
496 /* We held write locked pernet_ops_rwsem, and parallel
497 * setup_net() and cleanup_net() are not possible.
498 */
499 for_each_net(net) {
500 if (atomic_read(&net->dev_unreg_count) > 0) {
501 unregistering = true;
502 break;
503 }
504 }
505 if (!unregistering)
506 break;
507 __rtnl_unlock();
508
509 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
510 }
511 remove_wait_queue(&netdev_unregistering_wq, &wait);
512}
513
514/**
515 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
516 * @ops: struct rtnl_link_ops * to unregister
517 */
518void rtnl_link_unregister(struct rtnl_link_ops *ops)
519{
520 /* Close the race with setup_net() and cleanup_net() */
521 down_write(&pernet_ops_rwsem);
522 rtnl_lock_unregistering_all();
523 __rtnl_link_unregister(ops);
524 rtnl_unlock();
525 up_write(&pernet_ops_rwsem);
526}
527EXPORT_SYMBOL_GPL(rtnl_link_unregister);
528
529static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
530{
531 struct net_device *master_dev;
532 const struct rtnl_link_ops *ops;
533 size_t size = 0;
534
535 rcu_read_lock();
536
537 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
538 if (!master_dev)
539 goto out;
540
541 ops = master_dev->rtnl_link_ops;
542 if (!ops || !ops->get_slave_size)
543 goto out;
544 /* IFLA_INFO_SLAVE_DATA + nested data */
545 size = nla_total_size(sizeof(struct nlattr)) +
546 ops->get_slave_size(master_dev, dev);
547
548out:
549 rcu_read_unlock();
550 return size;
551}
552
553static size_t rtnl_link_get_size(const struct net_device *dev)
554{
555 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
556 size_t size;
557
558 if (!ops)
559 return 0;
560
561 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
562 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
563
564 if (ops->get_size)
565 /* IFLA_INFO_DATA + nested data */
566 size += nla_total_size(sizeof(struct nlattr)) +
567 ops->get_size(dev);
568
569 if (ops->get_xstats_size)
570 /* IFLA_INFO_XSTATS */
571 size += nla_total_size(ops->get_xstats_size(dev));
572
573 size += rtnl_link_get_slave_info_data_size(dev);
574
575 return size;
576}
577
578static LIST_HEAD(rtnl_af_ops);
579
580static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
581{
582 const struct rtnl_af_ops *ops;
583
584 ASSERT_RTNL();
585
586 list_for_each_entry(ops, &rtnl_af_ops, list) {
587 if (ops->family == family)
588 return ops;
589 }
590
591 return NULL;
592}
593
594/**
595 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
596 * @ops: struct rtnl_af_ops * to register
597 *
598 * Returns 0 on success or a negative error code.
599 */
600void rtnl_af_register(struct rtnl_af_ops *ops)
601{
602 rtnl_lock();
603 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
604 rtnl_unlock();
605}
606EXPORT_SYMBOL_GPL(rtnl_af_register);
607
608/**
609 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
610 * @ops: struct rtnl_af_ops * to unregister
611 */
612void rtnl_af_unregister(struct rtnl_af_ops *ops)
613{
614 rtnl_lock();
615 list_del_rcu(&ops->list);
616 rtnl_unlock();
617
618 synchronize_rcu();
619}
620EXPORT_SYMBOL_GPL(rtnl_af_unregister);
621
622static size_t rtnl_link_get_af_size(const struct net_device *dev,
623 u32 ext_filter_mask)
624{
625 struct rtnl_af_ops *af_ops;
626 size_t size;
627
628 /* IFLA_AF_SPEC */
629 size = nla_total_size(sizeof(struct nlattr));
630
631 rcu_read_lock();
632 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
633 if (af_ops->get_link_af_size) {
634 /* AF_* + nested data */
635 size += nla_total_size(sizeof(struct nlattr)) +
636 af_ops->get_link_af_size(dev, ext_filter_mask);
637 }
638 }
639 rcu_read_unlock();
640
641 return size;
642}
643
644static bool rtnl_have_link_slave_info(const struct net_device *dev)
645{
646 struct net_device *master_dev;
647 bool ret = false;
648
649 rcu_read_lock();
650
651 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
652 if (master_dev && master_dev->rtnl_link_ops)
653 ret = true;
654 rcu_read_unlock();
655 return ret;
656}
657
658static int rtnl_link_slave_info_fill(struct sk_buff *skb,
659 const struct net_device *dev)
660{
661 struct net_device *master_dev;
662 const struct rtnl_link_ops *ops;
663 struct nlattr *slave_data;
664 int err;
665
666 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
667 if (!master_dev)
668 return 0;
669 ops = master_dev->rtnl_link_ops;
670 if (!ops)
671 return 0;
672 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
673 return -EMSGSIZE;
674 if (ops->fill_slave_info) {
675 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
676 if (!slave_data)
677 return -EMSGSIZE;
678 err = ops->fill_slave_info(skb, master_dev, dev);
679 if (err < 0)
680 goto err_cancel_slave_data;
681 nla_nest_end(skb, slave_data);
682 }
683 return 0;
684
685err_cancel_slave_data:
686 nla_nest_cancel(skb, slave_data);
687 return err;
688}
689
690static int rtnl_link_info_fill(struct sk_buff *skb,
691 const struct net_device *dev)
692{
693 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
694 struct nlattr *data;
695 int err;
696
697 if (!ops)
698 return 0;
699 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
700 return -EMSGSIZE;
701 if (ops->fill_xstats) {
702 err = ops->fill_xstats(skb, dev);
703 if (err < 0)
704 return err;
705 }
706 if (ops->fill_info) {
707 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
708 if (data == NULL)
709 return -EMSGSIZE;
710 err = ops->fill_info(skb, dev);
711 if (err < 0)
712 goto err_cancel_data;
713 nla_nest_end(skb, data);
714 }
715 return 0;
716
717err_cancel_data:
718 nla_nest_cancel(skb, data);
719 return err;
720}
721
722static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
723{
724 struct nlattr *linkinfo;
725 int err = -EMSGSIZE;
726
727 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
728 if (linkinfo == NULL)
729 goto out;
730
731 err = rtnl_link_info_fill(skb, dev);
732 if (err < 0)
733 goto err_cancel_link;
734
735 err = rtnl_link_slave_info_fill(skb, dev);
736 if (err < 0)
737 goto err_cancel_link;
738
739 nla_nest_end(skb, linkinfo);
740 return 0;
741
742err_cancel_link:
743 nla_nest_cancel(skb, linkinfo);
744out:
745 return err;
746}
747
748int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
749{
750 struct sock *rtnl = net->rtnl;
751
752 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
753}
754
755int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
756{
757 struct sock *rtnl = net->rtnl;
758
759 return nlmsg_unicast(rtnl, skb, pid);
760}
761EXPORT_SYMBOL(rtnl_unicast);
762
763void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
764 const struct nlmsghdr *nlh, gfp_t flags)
765{
766 struct sock *rtnl = net->rtnl;
767
768 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
769}
770EXPORT_SYMBOL(rtnl_notify);
771
772void rtnl_set_sk_err(struct net *net, u32 group, int error)
773{
774 struct sock *rtnl = net->rtnl;
775
776 netlink_set_err(rtnl, 0, group, error);
777}
778EXPORT_SYMBOL(rtnl_set_sk_err);
779
780int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
781{
782 struct nlattr *mx;
783 int i, valid = 0;
784
785 /* nothing is dumped for dst_default_metrics, so just skip the loop */
786 if (metrics == dst_default_metrics.metrics)
787 return 0;
788
789 mx = nla_nest_start_noflag(skb, RTA_METRICS);
790 if (mx == NULL)
791 return -ENOBUFS;
792
793 for (i = 0; i < RTAX_MAX; i++) {
794 if (metrics[i]) {
795 if (i == RTAX_CC_ALGO - 1) {
796 char tmp[TCP_CA_NAME_MAX], *name;
797
798 name = tcp_ca_get_name_by_key(metrics[i], tmp);
799 if (!name)
800 continue;
801 if (nla_put_string(skb, i + 1, name))
802 goto nla_put_failure;
803 } else if (i == RTAX_FEATURES - 1) {
804 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
805
806 if (!user_features)
807 continue;
808 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
809 if (nla_put_u32(skb, i + 1, user_features))
810 goto nla_put_failure;
811 } else {
812 if (nla_put_u32(skb, i + 1, metrics[i]))
813 goto nla_put_failure;
814 }
815 valid++;
816 }
817 }
818
819 if (!valid) {
820 nla_nest_cancel(skb, mx);
821 return 0;
822 }
823
824 return nla_nest_end(skb, mx);
825
826nla_put_failure:
827 nla_nest_cancel(skb, mx);
828 return -EMSGSIZE;
829}
830EXPORT_SYMBOL(rtnetlink_put_metrics);
831
832int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
833 long expires, u32 error)
834{
835 struct rta_cacheinfo ci = {
836 .rta_error = error,
837 .rta_id = id,
838 };
839
840 if (dst) {
841 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
842 ci.rta_used = dst->__use;
843 ci.rta_clntref = atomic_read(&dst->__refcnt);
844 }
845 if (expires) {
846 unsigned long clock;
847
848 clock = jiffies_to_clock_t(abs(expires));
849 clock = min_t(unsigned long, clock, INT_MAX);
850 ci.rta_expires = (expires > 0) ? clock : -clock;
851 }
852 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
853}
854EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
855
856static void set_operstate(struct net_device *dev, unsigned char transition)
857{
858 unsigned char operstate = dev->operstate;
859
860 switch (transition) {
861 case IF_OPER_UP:
862 if ((operstate == IF_OPER_DORMANT ||
863 operstate == IF_OPER_TESTING ||
864 operstate == IF_OPER_UNKNOWN) &&
865 !netif_dormant(dev) && !netif_testing(dev))
866 operstate = IF_OPER_UP;
867 break;
868
869 case IF_OPER_TESTING:
870 if (netif_oper_up(dev))
871 operstate = IF_OPER_TESTING;
872 break;
873
874 case IF_OPER_DORMANT:
875 if (netif_oper_up(dev))
876 operstate = IF_OPER_DORMANT;
877 break;
878 }
879
880 if (dev->operstate != operstate) {
881 write_lock(&dev_base_lock);
882 dev->operstate = operstate;
883 write_unlock(&dev_base_lock);
884 netdev_state_change(dev);
885 }
886}
887
888static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
889{
890 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
891 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
892}
893
894static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
895 const struct ifinfomsg *ifm)
896{
897 unsigned int flags = ifm->ifi_flags;
898
899 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
900 if (ifm->ifi_change)
901 flags = (flags & ifm->ifi_change) |
902 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
903
904 return flags;
905}
906
907static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
908 const struct rtnl_link_stats64 *b)
909{
910 a->rx_packets = b->rx_packets;
911 a->tx_packets = b->tx_packets;
912 a->rx_bytes = b->rx_bytes;
913 a->tx_bytes = b->tx_bytes;
914 a->rx_errors = b->rx_errors;
915 a->tx_errors = b->tx_errors;
916 a->rx_dropped = b->rx_dropped;
917 a->tx_dropped = b->tx_dropped;
918
919 a->multicast = b->multicast;
920 a->collisions = b->collisions;
921
922 a->rx_length_errors = b->rx_length_errors;
923 a->rx_over_errors = b->rx_over_errors;
924 a->rx_crc_errors = b->rx_crc_errors;
925 a->rx_frame_errors = b->rx_frame_errors;
926 a->rx_fifo_errors = b->rx_fifo_errors;
927 a->rx_missed_errors = b->rx_missed_errors;
928
929 a->tx_aborted_errors = b->tx_aborted_errors;
930 a->tx_carrier_errors = b->tx_carrier_errors;
931 a->tx_fifo_errors = b->tx_fifo_errors;
932 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
933 a->tx_window_errors = b->tx_window_errors;
934
935 a->rx_compressed = b->rx_compressed;
936 a->tx_compressed = b->tx_compressed;
937
938 a->rx_nohandler = b->rx_nohandler;
939}
940
941/* All VF info */
942static inline int rtnl_vfinfo_size(const struct net_device *dev,
943 u32 ext_filter_mask)
944{
945 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
946 int num_vfs = dev_num_vf(dev->dev.parent);
947 size_t size = nla_total_size(0);
948 size += num_vfs *
949 (nla_total_size(0) +
950 nla_total_size(sizeof(struct ifla_vf_mac)) +
951 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
952 nla_total_size(sizeof(struct ifla_vf_vlan)) +
953 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
954 nla_total_size(MAX_VLAN_LIST_LEN *
955 sizeof(struct ifla_vf_vlan_info)) +
956 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
957 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
958 nla_total_size(sizeof(struct ifla_vf_rate)) +
959 nla_total_size(sizeof(struct ifla_vf_link_state)) +
960 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
961 nla_total_size(0) + /* nest IFLA_VF_STATS */
962 /* IFLA_VF_STATS_RX_PACKETS */
963 nla_total_size_64bit(sizeof(__u64)) +
964 /* IFLA_VF_STATS_TX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_RX_BYTES */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_TX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_BROADCAST */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_MULTICAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_RX_DROPPED */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_TX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)) +
978 nla_total_size(sizeof(struct ifla_vf_trust)));
979 return size;
980 } else
981 return 0;
982}
983
984static size_t rtnl_port_size(const struct net_device *dev,
985 u32 ext_filter_mask)
986{
987 size_t port_size = nla_total_size(4) /* PORT_VF */
988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
991 + nla_total_size(1) /* PROT_VDP_REQUEST */
992 + nla_total_size(2); /* PORT_VDP_RESPONSE */
993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
995 + port_size;
996 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
997 + port_size;
998
999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000 !(ext_filter_mask & RTEXT_FILTER_VF))
1001 return 0;
1002 if (dev_num_vf(dev->dev.parent))
1003 return port_self_size + vf_ports_size +
1004 vf_port_size * dev_num_vf(dev->dev.parent);
1005 else
1006 return port_self_size;
1007}
1008
1009static size_t rtnl_xdp_size(void)
1010{
1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1012 nla_total_size(1) + /* XDP_ATTACHED */
1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1015
1016 return xdp_size;
1017}
1018
1019static size_t rtnl_prop_list_size(const struct net_device *dev)
1020{
1021 struct netdev_name_node *name_node;
1022 size_t size;
1023
1024 if (list_empty(&dev->name_node->list))
1025 return 0;
1026 size = nla_total_size(0);
1027 list_for_each_entry(name_node, &dev->name_node->list, list)
1028 size += nla_total_size(ALTIFNAMSIZ);
1029 return size;
1030}
1031
1032static size_t rtnl_proto_down_size(const struct net_device *dev)
1033{
1034 size_t size = nla_total_size(1);
1035
1036 if (dev->proto_down_reason)
1037 size += nla_total_size(0) + nla_total_size(4);
1038
1039 return size;
1040}
1041
1042static size_t rtnl_devlink_port_size(const struct net_device *dev)
1043{
1044 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1045
1046 if (dev->devlink_port)
1047 size += devlink_nl_port_handle_size(dev->devlink_port);
1048
1049 return size;
1050}
1051
1052static noinline size_t if_nlmsg_size(const struct net_device *dev,
1053 u32 ext_filter_mask)
1054{
1055 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1056 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1057 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1058 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1059 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1060 + nla_total_size(sizeof(struct rtnl_link_stats))
1061 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1062 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1063 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1064 + nla_total_size(4) /* IFLA_TXQLEN */
1065 + nla_total_size(4) /* IFLA_WEIGHT */
1066 + nla_total_size(4) /* IFLA_MTU */
1067 + nla_total_size(4) /* IFLA_LINK */
1068 + nla_total_size(4) /* IFLA_MASTER */
1069 + nla_total_size(1) /* IFLA_CARRIER */
1070 + nla_total_size(4) /* IFLA_PROMISCUITY */
1071 + nla_total_size(4) /* IFLA_ALLMULTI */
1072 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1073 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1074 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1075 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1076 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1077 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1078 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1079 + nla_total_size(1) /* IFLA_OPERSTATE */
1080 + nla_total_size(1) /* IFLA_LINKMODE */
1081 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1082 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1083 + nla_total_size(4) /* IFLA_GROUP */
1084 + nla_total_size(ext_filter_mask
1085 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1086 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1087 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1088 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1089 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1090 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1091 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1092 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1093 + rtnl_xdp_size() /* IFLA_XDP */
1094 + nla_total_size(4) /* IFLA_EVENT */
1095 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1096 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1097 + rtnl_proto_down_size(dev) /* proto down */
1098 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1099 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1100 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1101 + nla_total_size(4) /* IFLA_MIN_MTU */
1102 + nla_total_size(4) /* IFLA_MAX_MTU */
1103 + rtnl_prop_list_size(dev)
1104 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1105 + rtnl_devlink_port_size(dev)
1106 + 0;
1107}
1108
1109static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1110{
1111 struct nlattr *vf_ports;
1112 struct nlattr *vf_port;
1113 int vf;
1114 int err;
1115
1116 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1117 if (!vf_ports)
1118 return -EMSGSIZE;
1119
1120 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1121 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1122 if (!vf_port)
1123 goto nla_put_failure;
1124 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1125 goto nla_put_failure;
1126 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1127 if (err == -EMSGSIZE)
1128 goto nla_put_failure;
1129 if (err) {
1130 nla_nest_cancel(skb, vf_port);
1131 continue;
1132 }
1133 nla_nest_end(skb, vf_port);
1134 }
1135
1136 nla_nest_end(skb, vf_ports);
1137
1138 return 0;
1139
1140nla_put_failure:
1141 nla_nest_cancel(skb, vf_ports);
1142 return -EMSGSIZE;
1143}
1144
1145static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1146{
1147 struct nlattr *port_self;
1148 int err;
1149
1150 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1151 if (!port_self)
1152 return -EMSGSIZE;
1153
1154 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1155 if (err) {
1156 nla_nest_cancel(skb, port_self);
1157 return (err == -EMSGSIZE) ? err : 0;
1158 }
1159
1160 nla_nest_end(skb, port_self);
1161
1162 return 0;
1163}
1164
1165static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1166 u32 ext_filter_mask)
1167{
1168 int err;
1169
1170 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1171 !(ext_filter_mask & RTEXT_FILTER_VF))
1172 return 0;
1173
1174 err = rtnl_port_self_fill(skb, dev);
1175 if (err)
1176 return err;
1177
1178 if (dev_num_vf(dev->dev.parent)) {
1179 err = rtnl_vf_ports_fill(skb, dev);
1180 if (err)
1181 return err;
1182 }
1183
1184 return 0;
1185}
1186
1187static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1188{
1189 int err;
1190 struct netdev_phys_item_id ppid;
1191
1192 err = dev_get_phys_port_id(dev, &ppid);
1193 if (err) {
1194 if (err == -EOPNOTSUPP)
1195 return 0;
1196 return err;
1197 }
1198
1199 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1200 return -EMSGSIZE;
1201
1202 return 0;
1203}
1204
1205static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1206{
1207 char name[IFNAMSIZ];
1208 int err;
1209
1210 err = dev_get_phys_port_name(dev, name, sizeof(name));
1211 if (err) {
1212 if (err == -EOPNOTSUPP)
1213 return 0;
1214 return err;
1215 }
1216
1217 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1218 return -EMSGSIZE;
1219
1220 return 0;
1221}
1222
1223static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1224{
1225 struct netdev_phys_item_id ppid = { };
1226 int err;
1227
1228 err = dev_get_port_parent_id(dev, &ppid, false);
1229 if (err) {
1230 if (err == -EOPNOTSUPP)
1231 return 0;
1232 return err;
1233 }
1234
1235 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1236 return -EMSGSIZE;
1237
1238 return 0;
1239}
1240
1241static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1242 struct net_device *dev)
1243{
1244 struct rtnl_link_stats64 *sp;
1245 struct nlattr *attr;
1246
1247 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1248 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1249 if (!attr)
1250 return -EMSGSIZE;
1251
1252 sp = nla_data(attr);
1253 dev_get_stats(dev, sp);
1254
1255 attr = nla_reserve(skb, IFLA_STATS,
1256 sizeof(struct rtnl_link_stats));
1257 if (!attr)
1258 return -EMSGSIZE;
1259
1260 copy_rtnl_link_stats(nla_data(attr), sp);
1261
1262 return 0;
1263}
1264
1265static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1266 struct net_device *dev,
1267 int vfs_num,
1268 struct nlattr *vfinfo)
1269{
1270 struct ifla_vf_rss_query_en vf_rss_query_en;
1271 struct nlattr *vf, *vfstats, *vfvlanlist;
1272 struct ifla_vf_link_state vf_linkstate;
1273 struct ifla_vf_vlan_info vf_vlan_info;
1274 struct ifla_vf_spoofchk vf_spoofchk;
1275 struct ifla_vf_tx_rate vf_tx_rate;
1276 struct ifla_vf_stats vf_stats;
1277 struct ifla_vf_trust vf_trust;
1278 struct ifla_vf_vlan vf_vlan;
1279 struct ifla_vf_rate vf_rate;
1280 struct ifla_vf_mac vf_mac;
1281 struct ifla_vf_broadcast vf_broadcast;
1282 struct ifla_vf_info ivi;
1283 struct ifla_vf_guid node_guid;
1284 struct ifla_vf_guid port_guid;
1285
1286 memset(&ivi, 0, sizeof(ivi));
1287
1288 /* Not all SR-IOV capable drivers support the
1289 * spoofcheck and "RSS query enable" query. Preset to
1290 * -1 so the user space tool can detect that the driver
1291 * didn't report anything.
1292 */
1293 ivi.spoofchk = -1;
1294 ivi.rss_query_en = -1;
1295 ivi.trusted = -1;
1296 /* The default value for VF link state is "auto"
1297 * IFLA_VF_LINK_STATE_AUTO which equals zero
1298 */
1299 ivi.linkstate = 0;
1300 /* VLAN Protocol by default is 802.1Q */
1301 ivi.vlan_proto = htons(ETH_P_8021Q);
1302 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1303 return 0;
1304
1305 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1306 memset(&node_guid, 0, sizeof(node_guid));
1307 memset(&port_guid, 0, sizeof(port_guid));
1308
1309 vf_mac.vf =
1310 vf_vlan.vf =
1311 vf_vlan_info.vf =
1312 vf_rate.vf =
1313 vf_tx_rate.vf =
1314 vf_spoofchk.vf =
1315 vf_linkstate.vf =
1316 vf_rss_query_en.vf =
1317 vf_trust.vf =
1318 node_guid.vf =
1319 port_guid.vf = ivi.vf;
1320
1321 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1322 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1323 vf_vlan.vlan = ivi.vlan;
1324 vf_vlan.qos = ivi.qos;
1325 vf_vlan_info.vlan = ivi.vlan;
1326 vf_vlan_info.qos = ivi.qos;
1327 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1328 vf_tx_rate.rate = ivi.max_tx_rate;
1329 vf_rate.min_tx_rate = ivi.min_tx_rate;
1330 vf_rate.max_tx_rate = ivi.max_tx_rate;
1331 vf_spoofchk.setting = ivi.spoofchk;
1332 vf_linkstate.link_state = ivi.linkstate;
1333 vf_rss_query_en.setting = ivi.rss_query_en;
1334 vf_trust.setting = ivi.trusted;
1335 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1336 if (!vf)
1337 goto nla_put_vfinfo_failure;
1338 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1339 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1340 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1341 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1342 &vf_rate) ||
1343 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1344 &vf_tx_rate) ||
1345 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1346 &vf_spoofchk) ||
1347 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1348 &vf_linkstate) ||
1349 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1350 sizeof(vf_rss_query_en),
1351 &vf_rss_query_en) ||
1352 nla_put(skb, IFLA_VF_TRUST,
1353 sizeof(vf_trust), &vf_trust))
1354 goto nla_put_vf_failure;
1355
1356 if (dev->netdev_ops->ndo_get_vf_guid &&
1357 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1358 &port_guid)) {
1359 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1360 &node_guid) ||
1361 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1362 &port_guid))
1363 goto nla_put_vf_failure;
1364 }
1365 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1366 if (!vfvlanlist)
1367 goto nla_put_vf_failure;
1368 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1369 &vf_vlan_info)) {
1370 nla_nest_cancel(skb, vfvlanlist);
1371 goto nla_put_vf_failure;
1372 }
1373 nla_nest_end(skb, vfvlanlist);
1374 memset(&vf_stats, 0, sizeof(vf_stats));
1375 if (dev->netdev_ops->ndo_get_vf_stats)
1376 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1377 &vf_stats);
1378 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1379 if (!vfstats)
1380 goto nla_put_vf_failure;
1381 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1382 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1383 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1384 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1385 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1386 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1387 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1388 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1389 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1390 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1391 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1392 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1393 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1394 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1395 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1396 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1397 nla_nest_cancel(skb, vfstats);
1398 goto nla_put_vf_failure;
1399 }
1400 nla_nest_end(skb, vfstats);
1401 nla_nest_end(skb, vf);
1402 return 0;
1403
1404nla_put_vf_failure:
1405 nla_nest_cancel(skb, vf);
1406nla_put_vfinfo_failure:
1407 nla_nest_cancel(skb, vfinfo);
1408 return -EMSGSIZE;
1409}
1410
1411static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1412 struct net_device *dev,
1413 u32 ext_filter_mask)
1414{
1415 struct nlattr *vfinfo;
1416 int i, num_vfs;
1417
1418 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1419 return 0;
1420
1421 num_vfs = dev_num_vf(dev->dev.parent);
1422 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1423 return -EMSGSIZE;
1424
1425 if (!dev->netdev_ops->ndo_get_vf_config)
1426 return 0;
1427
1428 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1429 if (!vfinfo)
1430 return -EMSGSIZE;
1431
1432 for (i = 0; i < num_vfs; i++) {
1433 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1434 return -EMSGSIZE;
1435 }
1436
1437 nla_nest_end(skb, vfinfo);
1438 return 0;
1439}
1440
1441static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1442{
1443 struct rtnl_link_ifmap map;
1444
1445 memset(&map, 0, sizeof(map));
1446 map.mem_start = dev->mem_start;
1447 map.mem_end = dev->mem_end;
1448 map.base_addr = dev->base_addr;
1449 map.irq = dev->irq;
1450 map.dma = dev->dma;
1451 map.port = dev->if_port;
1452
1453 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1454 return -EMSGSIZE;
1455
1456 return 0;
1457}
1458
1459static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1460{
1461 const struct bpf_prog *generic_xdp_prog;
1462
1463 ASSERT_RTNL();
1464
1465 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1466 if (!generic_xdp_prog)
1467 return 0;
1468 return generic_xdp_prog->aux->id;
1469}
1470
1471static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1472{
1473 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1474}
1475
1476static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1477{
1478 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1479}
1480
1481static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1482 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1483 u32 (*get_prog_id)(struct net_device *dev))
1484{
1485 u32 curr_id;
1486 int err;
1487
1488 curr_id = get_prog_id(dev);
1489 if (!curr_id)
1490 return 0;
1491
1492 *prog_id = curr_id;
1493 err = nla_put_u32(skb, attr, curr_id);
1494 if (err)
1495 return err;
1496
1497 if (*mode != XDP_ATTACHED_NONE)
1498 *mode = XDP_ATTACHED_MULTI;
1499 else
1500 *mode = tgt_mode;
1501
1502 return 0;
1503}
1504
1505static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1506{
1507 struct nlattr *xdp;
1508 u32 prog_id;
1509 int err;
1510 u8 mode;
1511
1512 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1513 if (!xdp)
1514 return -EMSGSIZE;
1515
1516 prog_id = 0;
1517 mode = XDP_ATTACHED_NONE;
1518 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1519 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1520 if (err)
1521 goto err_cancel;
1522 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1523 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1524 if (err)
1525 goto err_cancel;
1526 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1527 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1528 if (err)
1529 goto err_cancel;
1530
1531 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1532 if (err)
1533 goto err_cancel;
1534
1535 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1536 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1537 if (err)
1538 goto err_cancel;
1539 }
1540
1541 nla_nest_end(skb, xdp);
1542 return 0;
1543
1544err_cancel:
1545 nla_nest_cancel(skb, xdp);
1546 return err;
1547}
1548
1549static u32 rtnl_get_event(unsigned long event)
1550{
1551 u32 rtnl_event_type = IFLA_EVENT_NONE;
1552
1553 switch (event) {
1554 case NETDEV_REBOOT:
1555 rtnl_event_type = IFLA_EVENT_REBOOT;
1556 break;
1557 case NETDEV_FEAT_CHANGE:
1558 rtnl_event_type = IFLA_EVENT_FEATURES;
1559 break;
1560 case NETDEV_BONDING_FAILOVER:
1561 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1562 break;
1563 case NETDEV_NOTIFY_PEERS:
1564 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1565 break;
1566 case NETDEV_RESEND_IGMP:
1567 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1568 break;
1569 case NETDEV_CHANGEINFODATA:
1570 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1571 break;
1572 default:
1573 break;
1574 }
1575
1576 return rtnl_event_type;
1577}
1578
1579static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1580{
1581 const struct net_device *upper_dev;
1582 int ret = 0;
1583
1584 rcu_read_lock();
1585
1586 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1587 if (upper_dev)
1588 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1589
1590 rcu_read_unlock();
1591 return ret;
1592}
1593
1594static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1595 bool force)
1596{
1597 int ifindex = dev_get_iflink(dev);
1598
1599 if (force || dev->ifindex != ifindex)
1600 return nla_put_u32(skb, IFLA_LINK, ifindex);
1601
1602 return 0;
1603}
1604
1605static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1606 struct net_device *dev)
1607{
1608 char buf[IFALIASZ];
1609 int ret;
1610
1611 ret = dev_get_alias(dev, buf, sizeof(buf));
1612 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1613}
1614
1615static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1616 const struct net_device *dev,
1617 struct net *src_net, gfp_t gfp)
1618{
1619 bool put_iflink = false;
1620
1621 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1622 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1623
1624 if (!net_eq(dev_net(dev), link_net)) {
1625 int id = peernet2id_alloc(src_net, link_net, gfp);
1626
1627 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1628 return -EMSGSIZE;
1629
1630 put_iflink = true;
1631 }
1632 }
1633
1634 return nla_put_iflink(skb, dev, put_iflink);
1635}
1636
1637static int rtnl_fill_link_af(struct sk_buff *skb,
1638 const struct net_device *dev,
1639 u32 ext_filter_mask)
1640{
1641 const struct rtnl_af_ops *af_ops;
1642 struct nlattr *af_spec;
1643
1644 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1645 if (!af_spec)
1646 return -EMSGSIZE;
1647
1648 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1649 struct nlattr *af;
1650 int err;
1651
1652 if (!af_ops->fill_link_af)
1653 continue;
1654
1655 af = nla_nest_start_noflag(skb, af_ops->family);
1656 if (!af)
1657 return -EMSGSIZE;
1658
1659 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1660 /*
1661 * Caller may return ENODATA to indicate that there
1662 * was no data to be dumped. This is not an error, it
1663 * means we should trim the attribute header and
1664 * continue.
1665 */
1666 if (err == -ENODATA)
1667 nla_nest_cancel(skb, af);
1668 else if (err < 0)
1669 return -EMSGSIZE;
1670
1671 nla_nest_end(skb, af);
1672 }
1673
1674 nla_nest_end(skb, af_spec);
1675 return 0;
1676}
1677
1678static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1679 const struct net_device *dev)
1680{
1681 struct netdev_name_node *name_node;
1682 int count = 0;
1683
1684 list_for_each_entry(name_node, &dev->name_node->list, list) {
1685 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1686 return -EMSGSIZE;
1687 count++;
1688 }
1689 return count;
1690}
1691
1692static int rtnl_fill_prop_list(struct sk_buff *skb,
1693 const struct net_device *dev)
1694{
1695 struct nlattr *prop_list;
1696 int ret;
1697
1698 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1699 if (!prop_list)
1700 return -EMSGSIZE;
1701
1702 ret = rtnl_fill_alt_ifnames(skb, dev);
1703 if (ret <= 0)
1704 goto nest_cancel;
1705
1706 nla_nest_end(skb, prop_list);
1707 return 0;
1708
1709nest_cancel:
1710 nla_nest_cancel(skb, prop_list);
1711 return ret;
1712}
1713
1714static int rtnl_fill_proto_down(struct sk_buff *skb,
1715 const struct net_device *dev)
1716{
1717 struct nlattr *pr;
1718 u32 preason;
1719
1720 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1721 goto nla_put_failure;
1722
1723 preason = dev->proto_down_reason;
1724 if (!preason)
1725 return 0;
1726
1727 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1728 if (!pr)
1729 return -EMSGSIZE;
1730
1731 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1732 nla_nest_cancel(skb, pr);
1733 goto nla_put_failure;
1734 }
1735
1736 nla_nest_end(skb, pr);
1737 return 0;
1738
1739nla_put_failure:
1740 return -EMSGSIZE;
1741}
1742
1743static int rtnl_fill_devlink_port(struct sk_buff *skb,
1744 const struct net_device *dev)
1745{
1746 struct nlattr *devlink_port_nest;
1747 int ret;
1748
1749 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1750 if (!devlink_port_nest)
1751 return -EMSGSIZE;
1752
1753 if (dev->devlink_port) {
1754 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1755 if (ret < 0)
1756 goto nest_cancel;
1757 }
1758
1759 nla_nest_end(skb, devlink_port_nest);
1760 return 0;
1761
1762nest_cancel:
1763 nla_nest_cancel(skb, devlink_port_nest);
1764 return ret;
1765}
1766
1767static int rtnl_fill_ifinfo(struct sk_buff *skb,
1768 struct net_device *dev, struct net *src_net,
1769 int type, u32 pid, u32 seq, u32 change,
1770 unsigned int flags, u32 ext_filter_mask,
1771 u32 event, int *new_nsid, int new_ifindex,
1772 int tgt_netnsid, gfp_t gfp)
1773{
1774 struct ifinfomsg *ifm;
1775 struct nlmsghdr *nlh;
1776 struct Qdisc *qdisc;
1777
1778 ASSERT_RTNL();
1779 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1780 if (nlh == NULL)
1781 return -EMSGSIZE;
1782
1783 ifm = nlmsg_data(nlh);
1784 ifm->ifi_family = AF_UNSPEC;
1785 ifm->__ifi_pad = 0;
1786 ifm->ifi_type = dev->type;
1787 ifm->ifi_index = dev->ifindex;
1788 ifm->ifi_flags = dev_get_flags(dev);
1789 ifm->ifi_change = change;
1790
1791 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1792 goto nla_put_failure;
1793
1794 qdisc = rtnl_dereference(dev->qdisc);
1795 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1796 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1797 nla_put_u8(skb, IFLA_OPERSTATE,
1798 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1799 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1800 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1801 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1802 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1803 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1804 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1805 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1806 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1807 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1808 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1809 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1810 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1811 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1812#ifdef CONFIG_RPS
1813 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1814#endif
1815 put_master_ifindex(skb, dev) ||
1816 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1817 (qdisc &&
1818 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1819 nla_put_ifalias(skb, dev) ||
1820 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1821 atomic_read(&dev->carrier_up_count) +
1822 atomic_read(&dev->carrier_down_count)) ||
1823 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1824 atomic_read(&dev->carrier_up_count)) ||
1825 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1826 atomic_read(&dev->carrier_down_count)))
1827 goto nla_put_failure;
1828
1829 if (rtnl_fill_proto_down(skb, dev))
1830 goto nla_put_failure;
1831
1832 if (event != IFLA_EVENT_NONE) {
1833 if (nla_put_u32(skb, IFLA_EVENT, event))
1834 goto nla_put_failure;
1835 }
1836
1837 if (rtnl_fill_link_ifmap(skb, dev))
1838 goto nla_put_failure;
1839
1840 if (dev->addr_len) {
1841 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1842 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1843 goto nla_put_failure;
1844 }
1845
1846 if (rtnl_phys_port_id_fill(skb, dev))
1847 goto nla_put_failure;
1848
1849 if (rtnl_phys_port_name_fill(skb, dev))
1850 goto nla_put_failure;
1851
1852 if (rtnl_phys_switch_id_fill(skb, dev))
1853 goto nla_put_failure;
1854
1855 if (rtnl_fill_stats(skb, dev))
1856 goto nla_put_failure;
1857
1858 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1859 goto nla_put_failure;
1860
1861 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1862 goto nla_put_failure;
1863
1864 if (rtnl_xdp_fill(skb, dev))
1865 goto nla_put_failure;
1866
1867 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1868 if (rtnl_link_fill(skb, dev) < 0)
1869 goto nla_put_failure;
1870 }
1871
1872 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1873 goto nla_put_failure;
1874
1875 if (new_nsid &&
1876 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1877 goto nla_put_failure;
1878 if (new_ifindex &&
1879 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1880 goto nla_put_failure;
1881
1882 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1883 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1884 goto nla_put_failure;
1885
1886 rcu_read_lock();
1887 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1888 goto nla_put_failure_rcu;
1889 rcu_read_unlock();
1890
1891 if (rtnl_fill_prop_list(skb, dev))
1892 goto nla_put_failure;
1893
1894 if (dev->dev.parent &&
1895 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1896 dev_name(dev->dev.parent)))
1897 goto nla_put_failure;
1898
1899 if (dev->dev.parent && dev->dev.parent->bus &&
1900 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1901 dev->dev.parent->bus->name))
1902 goto nla_put_failure;
1903
1904 if (rtnl_fill_devlink_port(skb, dev))
1905 goto nla_put_failure;
1906
1907 nlmsg_end(skb, nlh);
1908 return 0;
1909
1910nla_put_failure_rcu:
1911 rcu_read_unlock();
1912nla_put_failure:
1913 nlmsg_cancel(skb, nlh);
1914 return -EMSGSIZE;
1915}
1916
1917static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1918 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1919 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1920 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1921 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1922 [IFLA_MTU] = { .type = NLA_U32 },
1923 [IFLA_LINK] = { .type = NLA_U32 },
1924 [IFLA_MASTER] = { .type = NLA_U32 },
1925 [IFLA_CARRIER] = { .type = NLA_U8 },
1926 [IFLA_TXQLEN] = { .type = NLA_U32 },
1927 [IFLA_WEIGHT] = { .type = NLA_U32 },
1928 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1929 [IFLA_LINKMODE] = { .type = NLA_U8 },
1930 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1931 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1932 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1933 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1934 * allow 0-length string (needed to remove an alias).
1935 */
1936 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1937 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1938 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1939 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1940 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1941 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1942 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1943 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1944 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1945 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1946 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1947 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1948 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1949 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1950 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1951 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1952 [IFLA_XDP] = { .type = NLA_NESTED },
1953 [IFLA_EVENT] = { .type = NLA_U32 },
1954 [IFLA_GROUP] = { .type = NLA_U32 },
1955 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1956 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1957 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1958 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1959 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1960 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
1961 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
1962 .len = ALTIFNAMSIZ - 1 },
1963 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
1964 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1965 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
1966 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
1967 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
1968 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
1969 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
1970 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
1971};
1972
1973static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1974 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1975 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1976 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1977 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1978};
1979
1980static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1981 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1982 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
1983 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1984 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1985 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1986 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1987 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1988 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1989 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1990 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1991 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1992 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1993 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1994};
1995
1996static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1997 [IFLA_PORT_VF] = { .type = NLA_U32 },
1998 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1999 .len = PORT_PROFILE_MAX },
2000 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2001 .len = PORT_UUID_MAX },
2002 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2003 .len = PORT_UUID_MAX },
2004 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2005 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
2006
2007 /* Unused, but we need to keep it here since user space could
2008 * fill it. It's also broken with regard to NLA_BINARY use in
2009 * combination with structs.
2010 */
2011 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2012 .len = sizeof(struct ifla_port_vsi) },
2013};
2014
2015static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2016 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2017 [IFLA_XDP_FD] = { .type = NLA_S32 },
2018 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
2019 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
2020 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
2021 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
2022};
2023
2024static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2025{
2026 const struct rtnl_link_ops *ops = NULL;
2027 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2028
2029 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2030 return NULL;
2031
2032 if (linfo[IFLA_INFO_KIND]) {
2033 char kind[MODULE_NAME_LEN];
2034
2035 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2036 ops = rtnl_link_ops_get(kind);
2037 }
2038
2039 return ops;
2040}
2041
2042static bool link_master_filtered(struct net_device *dev, int master_idx)
2043{
2044 struct net_device *master;
2045
2046 if (!master_idx)
2047 return false;
2048
2049 master = netdev_master_upper_dev_get(dev);
2050
2051 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2052 * another invalid value for ifindex to denote "no master".
2053 */
2054 if (master_idx == -1)
2055 return !!master;
2056
2057 if (!master || master->ifindex != master_idx)
2058 return true;
2059
2060 return false;
2061}
2062
2063static bool link_kind_filtered(const struct net_device *dev,
2064 const struct rtnl_link_ops *kind_ops)
2065{
2066 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2067 return true;
2068
2069 return false;
2070}
2071
2072static bool link_dump_filtered(struct net_device *dev,
2073 int master_idx,
2074 const struct rtnl_link_ops *kind_ops)
2075{
2076 if (link_master_filtered(dev, master_idx) ||
2077 link_kind_filtered(dev, kind_ops))
2078 return true;
2079
2080 return false;
2081}
2082
2083/**
2084 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2085 * @sk: netlink socket
2086 * @netnsid: network namespace identifier
2087 *
2088 * Returns the network namespace identified by netnsid on success or an error
2089 * pointer on failure.
2090 */
2091struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2092{
2093 struct net *net;
2094
2095 net = get_net_ns_by_id(sock_net(sk), netnsid);
2096 if (!net)
2097 return ERR_PTR(-EINVAL);
2098
2099 /* For now, the caller is required to have CAP_NET_ADMIN in
2100 * the user namespace owning the target net ns.
2101 */
2102 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2103 put_net(net);
2104 return ERR_PTR(-EACCES);
2105 }
2106 return net;
2107}
2108EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2109
2110static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2111 bool strict_check, struct nlattr **tb,
2112 struct netlink_ext_ack *extack)
2113{
2114 int hdrlen;
2115
2116 if (strict_check) {
2117 struct ifinfomsg *ifm;
2118
2119 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2120 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2121 return -EINVAL;
2122 }
2123
2124 ifm = nlmsg_data(nlh);
2125 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2126 ifm->ifi_change) {
2127 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2128 return -EINVAL;
2129 }
2130 if (ifm->ifi_index) {
2131 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2132 return -EINVAL;
2133 }
2134
2135 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2136 IFLA_MAX, ifla_policy,
2137 extack);
2138 }
2139
2140 /* A hack to preserve kernel<->userspace interface.
2141 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2142 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2143 * what iproute2 < v3.9.0 used.
2144 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2145 * attribute, its netlink message is shorter than struct ifinfomsg.
2146 */
2147 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2148 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2149
2150 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2151 extack);
2152}
2153
2154static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2155{
2156 struct netlink_ext_ack *extack = cb->extack;
2157 const struct nlmsghdr *nlh = cb->nlh;
2158 struct net *net = sock_net(skb->sk);
2159 struct net *tgt_net = net;
2160 int h, s_h;
2161 int idx = 0, s_idx;
2162 struct net_device *dev;
2163 struct hlist_head *head;
2164 struct nlattr *tb[IFLA_MAX+1];
2165 u32 ext_filter_mask = 0;
2166 const struct rtnl_link_ops *kind_ops = NULL;
2167 unsigned int flags = NLM_F_MULTI;
2168 int master_idx = 0;
2169 int netnsid = -1;
2170 int err, i;
2171
2172 s_h = cb->args[0];
2173 s_idx = cb->args[1];
2174
2175 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2176 if (err < 0) {
2177 if (cb->strict_check)
2178 return err;
2179
2180 goto walk_entries;
2181 }
2182
2183 for (i = 0; i <= IFLA_MAX; ++i) {
2184 if (!tb[i])
2185 continue;
2186
2187 /* new attributes should only be added with strict checking */
2188 switch (i) {
2189 case IFLA_TARGET_NETNSID:
2190 netnsid = nla_get_s32(tb[i]);
2191 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2192 if (IS_ERR(tgt_net)) {
2193 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2194 return PTR_ERR(tgt_net);
2195 }
2196 break;
2197 case IFLA_EXT_MASK:
2198 ext_filter_mask = nla_get_u32(tb[i]);
2199 break;
2200 case IFLA_MASTER:
2201 master_idx = nla_get_u32(tb[i]);
2202 break;
2203 case IFLA_LINKINFO:
2204 kind_ops = linkinfo_to_kind_ops(tb[i]);
2205 break;
2206 default:
2207 if (cb->strict_check) {
2208 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2209 return -EINVAL;
2210 }
2211 }
2212 }
2213
2214 if (master_idx || kind_ops)
2215 flags |= NLM_F_DUMP_FILTERED;
2216
2217walk_entries:
2218 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2219 idx = 0;
2220 head = &tgt_net->dev_index_head[h];
2221 hlist_for_each_entry(dev, head, index_hlist) {
2222 if (link_dump_filtered(dev, master_idx, kind_ops))
2223 goto cont;
2224 if (idx < s_idx)
2225 goto cont;
2226 err = rtnl_fill_ifinfo(skb, dev, net,
2227 RTM_NEWLINK,
2228 NETLINK_CB(cb->skb).portid,
2229 nlh->nlmsg_seq, 0, flags,
2230 ext_filter_mask, 0, NULL, 0,
2231 netnsid, GFP_KERNEL);
2232
2233 if (err < 0) {
2234 if (likely(skb->len))
2235 goto out;
2236
2237 goto out_err;
2238 }
2239cont:
2240 idx++;
2241 }
2242 }
2243out:
2244 err = skb->len;
2245out_err:
2246 cb->args[1] = idx;
2247 cb->args[0] = h;
2248 cb->seq = tgt_net->dev_base_seq;
2249 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2250 if (netnsid >= 0)
2251 put_net(tgt_net);
2252
2253 return err;
2254}
2255
2256int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2257 struct netlink_ext_ack *exterr)
2258{
2259 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2260 exterr);
2261}
2262EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2263
2264struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2265{
2266 struct net *net;
2267 /* Examine the link attributes and figure out which
2268 * network namespace we are talking about.
2269 */
2270 if (tb[IFLA_NET_NS_PID])
2271 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2272 else if (tb[IFLA_NET_NS_FD])
2273 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2274 else
2275 net = get_net(src_net);
2276 return net;
2277}
2278EXPORT_SYMBOL(rtnl_link_get_net);
2279
2280/* Figure out which network namespace we are talking about by
2281 * examining the link attributes in the following order:
2282 *
2283 * 1. IFLA_NET_NS_PID
2284 * 2. IFLA_NET_NS_FD
2285 * 3. IFLA_TARGET_NETNSID
2286 */
2287static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2288 struct nlattr *tb[])
2289{
2290 struct net *net;
2291
2292 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2293 return rtnl_link_get_net(src_net, tb);
2294
2295 if (!tb[IFLA_TARGET_NETNSID])
2296 return get_net(src_net);
2297
2298 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2299 if (!net)
2300 return ERR_PTR(-EINVAL);
2301
2302 return net;
2303}
2304
2305static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2306 struct net *src_net,
2307 struct nlattr *tb[], int cap)
2308{
2309 struct net *net;
2310
2311 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2312 if (IS_ERR(net))
2313 return net;
2314
2315 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2316 put_net(net);
2317 return ERR_PTR(-EPERM);
2318 }
2319
2320 return net;
2321}
2322
2323/* Verify that rtnetlink requests do not pass additional properties
2324 * potentially referring to different network namespaces.
2325 */
2326static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2327 struct netlink_ext_ack *extack,
2328 bool netns_id_only)
2329{
2330
2331 if (netns_id_only) {
2332 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2333 return 0;
2334
2335 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2336 return -EOPNOTSUPP;
2337 }
2338
2339 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2340 goto invalid_attr;
2341
2342 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2343 goto invalid_attr;
2344
2345 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2346 goto invalid_attr;
2347
2348 return 0;
2349
2350invalid_attr:
2351 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2352 return -EINVAL;
2353}
2354
2355static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2356 int max_tx_rate)
2357{
2358 const struct net_device_ops *ops = dev->netdev_ops;
2359
2360 if (!ops->ndo_set_vf_rate)
2361 return -EOPNOTSUPP;
2362 if (max_tx_rate && max_tx_rate < min_tx_rate)
2363 return -EINVAL;
2364
2365 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2366}
2367
2368static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2369 struct netlink_ext_ack *extack)
2370{
2371 if (dev) {
2372 if (tb[IFLA_ADDRESS] &&
2373 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2374 return -EINVAL;
2375
2376 if (tb[IFLA_BROADCAST] &&
2377 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2378 return -EINVAL;
2379 }
2380
2381 if (tb[IFLA_AF_SPEC]) {
2382 struct nlattr *af;
2383 int rem, err;
2384
2385 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2386 const struct rtnl_af_ops *af_ops;
2387
2388 af_ops = rtnl_af_lookup(nla_type(af));
2389 if (!af_ops)
2390 return -EAFNOSUPPORT;
2391
2392 if (!af_ops->set_link_af)
2393 return -EOPNOTSUPP;
2394
2395 if (af_ops->validate_link_af) {
2396 err = af_ops->validate_link_af(dev, af, extack);
2397 if (err < 0)
2398 return err;
2399 }
2400 }
2401 }
2402
2403 return 0;
2404}
2405
2406static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2407 int guid_type)
2408{
2409 const struct net_device_ops *ops = dev->netdev_ops;
2410
2411 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2412}
2413
2414static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2415{
2416 if (dev->type != ARPHRD_INFINIBAND)
2417 return -EOPNOTSUPP;
2418
2419 return handle_infiniband_guid(dev, ivt, guid_type);
2420}
2421
2422static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2423{
2424 const struct net_device_ops *ops = dev->netdev_ops;
2425 int err = -EINVAL;
2426
2427 if (tb[IFLA_VF_MAC]) {
2428 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2429
2430 if (ivm->vf >= INT_MAX)
2431 return -EINVAL;
2432 err = -EOPNOTSUPP;
2433 if (ops->ndo_set_vf_mac)
2434 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2435 ivm->mac);
2436 if (err < 0)
2437 return err;
2438 }
2439
2440 if (tb[IFLA_VF_VLAN]) {
2441 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2442
2443 if (ivv->vf >= INT_MAX)
2444 return -EINVAL;
2445 err = -EOPNOTSUPP;
2446 if (ops->ndo_set_vf_vlan)
2447 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2448 ivv->qos,
2449 htons(ETH_P_8021Q));
2450 if (err < 0)
2451 return err;
2452 }
2453
2454 if (tb[IFLA_VF_VLAN_LIST]) {
2455 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2456 struct nlattr *attr;
2457 int rem, len = 0;
2458
2459 err = -EOPNOTSUPP;
2460 if (!ops->ndo_set_vf_vlan)
2461 return err;
2462
2463 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2464 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2465 nla_len(attr) < NLA_HDRLEN) {
2466 return -EINVAL;
2467 }
2468 if (len >= MAX_VLAN_LIST_LEN)
2469 return -EOPNOTSUPP;
2470 ivvl[len] = nla_data(attr);
2471
2472 len++;
2473 }
2474 if (len == 0)
2475 return -EINVAL;
2476
2477 if (ivvl[0]->vf >= INT_MAX)
2478 return -EINVAL;
2479 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2480 ivvl[0]->qos, ivvl[0]->vlan_proto);
2481 if (err < 0)
2482 return err;
2483 }
2484
2485 if (tb[IFLA_VF_TX_RATE]) {
2486 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2487 struct ifla_vf_info ivf;
2488
2489 if (ivt->vf >= INT_MAX)
2490 return -EINVAL;
2491 err = -EOPNOTSUPP;
2492 if (ops->ndo_get_vf_config)
2493 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2494 if (err < 0)
2495 return err;
2496
2497 err = rtnl_set_vf_rate(dev, ivt->vf,
2498 ivf.min_tx_rate, ivt->rate);
2499 if (err < 0)
2500 return err;
2501 }
2502
2503 if (tb[IFLA_VF_RATE]) {
2504 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2505
2506 if (ivt->vf >= INT_MAX)
2507 return -EINVAL;
2508
2509 err = rtnl_set_vf_rate(dev, ivt->vf,
2510 ivt->min_tx_rate, ivt->max_tx_rate);
2511 if (err < 0)
2512 return err;
2513 }
2514
2515 if (tb[IFLA_VF_SPOOFCHK]) {
2516 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2517
2518 if (ivs->vf >= INT_MAX)
2519 return -EINVAL;
2520 err = -EOPNOTSUPP;
2521 if (ops->ndo_set_vf_spoofchk)
2522 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2523 ivs->setting);
2524 if (err < 0)
2525 return err;
2526 }
2527
2528 if (tb[IFLA_VF_LINK_STATE]) {
2529 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2530
2531 if (ivl->vf >= INT_MAX)
2532 return -EINVAL;
2533 err = -EOPNOTSUPP;
2534 if (ops->ndo_set_vf_link_state)
2535 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2536 ivl->link_state);
2537 if (err < 0)
2538 return err;
2539 }
2540
2541 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2542 struct ifla_vf_rss_query_en *ivrssq_en;
2543
2544 err = -EOPNOTSUPP;
2545 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2546 if (ivrssq_en->vf >= INT_MAX)
2547 return -EINVAL;
2548 if (ops->ndo_set_vf_rss_query_en)
2549 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2550 ivrssq_en->setting);
2551 if (err < 0)
2552 return err;
2553 }
2554
2555 if (tb[IFLA_VF_TRUST]) {
2556 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2557
2558 if (ivt->vf >= INT_MAX)
2559 return -EINVAL;
2560 err = -EOPNOTSUPP;
2561 if (ops->ndo_set_vf_trust)
2562 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2563 if (err < 0)
2564 return err;
2565 }
2566
2567 if (tb[IFLA_VF_IB_NODE_GUID]) {
2568 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2569
2570 if (ivt->vf >= INT_MAX)
2571 return -EINVAL;
2572 if (!ops->ndo_set_vf_guid)
2573 return -EOPNOTSUPP;
2574 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2575 }
2576
2577 if (tb[IFLA_VF_IB_PORT_GUID]) {
2578 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2579
2580 if (ivt->vf >= INT_MAX)
2581 return -EINVAL;
2582 if (!ops->ndo_set_vf_guid)
2583 return -EOPNOTSUPP;
2584
2585 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2586 }
2587
2588 return err;
2589}
2590
2591static int do_set_master(struct net_device *dev, int ifindex,
2592 struct netlink_ext_ack *extack)
2593{
2594 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2595 const struct net_device_ops *ops;
2596 int err;
2597
2598 if (upper_dev) {
2599 if (upper_dev->ifindex == ifindex)
2600 return 0;
2601 ops = upper_dev->netdev_ops;
2602 if (ops->ndo_del_slave) {
2603 err = ops->ndo_del_slave(upper_dev, dev);
2604 if (err)
2605 return err;
2606 } else {
2607 return -EOPNOTSUPP;
2608 }
2609 }
2610
2611 if (ifindex) {
2612 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2613 if (!upper_dev)
2614 return -EINVAL;
2615 ops = upper_dev->netdev_ops;
2616 if (ops->ndo_add_slave) {
2617 err = ops->ndo_add_slave(upper_dev, dev, extack);
2618 if (err)
2619 return err;
2620 } else {
2621 return -EOPNOTSUPP;
2622 }
2623 }
2624 return 0;
2625}
2626
2627static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2628 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2629 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2630};
2631
2632static int do_set_proto_down(struct net_device *dev,
2633 struct nlattr *nl_proto_down,
2634 struct nlattr *nl_proto_down_reason,
2635 struct netlink_ext_ack *extack)
2636{
2637 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2638 unsigned long mask = 0;
2639 u32 value;
2640 bool proto_down;
2641 int err;
2642
2643 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2644 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2645 return -EOPNOTSUPP;
2646 }
2647
2648 if (nl_proto_down_reason) {
2649 err = nla_parse_nested_deprecated(pdreason,
2650 IFLA_PROTO_DOWN_REASON_MAX,
2651 nl_proto_down_reason,
2652 ifla_proto_down_reason_policy,
2653 NULL);
2654 if (err < 0)
2655 return err;
2656
2657 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2658 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2659 return -EINVAL;
2660 }
2661
2662 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2663
2664 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2665 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2666
2667 dev_change_proto_down_reason(dev, mask, value);
2668 }
2669
2670 if (nl_proto_down) {
2671 proto_down = nla_get_u8(nl_proto_down);
2672
2673 /* Don't turn off protodown if there are active reasons */
2674 if (!proto_down && dev->proto_down_reason) {
2675 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2676 return -EBUSY;
2677 }
2678 err = dev_change_proto_down(dev,
2679 proto_down);
2680 if (err)
2681 return err;
2682 }
2683
2684 return 0;
2685}
2686
2687#define DO_SETLINK_MODIFIED 0x01
2688/* notify flag means notify + modified. */
2689#define DO_SETLINK_NOTIFY 0x03
2690static int do_setlink(const struct sk_buff *skb,
2691 struct net_device *dev, struct ifinfomsg *ifm,
2692 struct netlink_ext_ack *extack,
2693 struct nlattr **tb, int status)
2694{
2695 const struct net_device_ops *ops = dev->netdev_ops;
2696 char ifname[IFNAMSIZ];
2697 int err;
2698
2699 err = validate_linkmsg(dev, tb, extack);
2700 if (err < 0)
2701 return err;
2702
2703 if (tb[IFLA_IFNAME])
2704 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2705 else
2706 ifname[0] = '\0';
2707
2708 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2709 const char *pat = ifname[0] ? ifname : NULL;
2710 struct net *net;
2711 int new_ifindex;
2712
2713 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2714 tb, CAP_NET_ADMIN);
2715 if (IS_ERR(net)) {
2716 err = PTR_ERR(net);
2717 goto errout;
2718 }
2719
2720 if (tb[IFLA_NEW_IFINDEX])
2721 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2722 else
2723 new_ifindex = 0;
2724
2725 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2726 put_net(net);
2727 if (err)
2728 goto errout;
2729 status |= DO_SETLINK_MODIFIED;
2730 }
2731
2732 if (tb[IFLA_MAP]) {
2733 struct rtnl_link_ifmap *u_map;
2734 struct ifmap k_map;
2735
2736 if (!ops->ndo_set_config) {
2737 err = -EOPNOTSUPP;
2738 goto errout;
2739 }
2740
2741 if (!netif_device_present(dev)) {
2742 err = -ENODEV;
2743 goto errout;
2744 }
2745
2746 u_map = nla_data(tb[IFLA_MAP]);
2747 k_map.mem_start = (unsigned long) u_map->mem_start;
2748 k_map.mem_end = (unsigned long) u_map->mem_end;
2749 k_map.base_addr = (unsigned short) u_map->base_addr;
2750 k_map.irq = (unsigned char) u_map->irq;
2751 k_map.dma = (unsigned char) u_map->dma;
2752 k_map.port = (unsigned char) u_map->port;
2753
2754 err = ops->ndo_set_config(dev, &k_map);
2755 if (err < 0)
2756 goto errout;
2757
2758 status |= DO_SETLINK_NOTIFY;
2759 }
2760
2761 if (tb[IFLA_ADDRESS]) {
2762 struct sockaddr *sa;
2763 int len;
2764
2765 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2766 sizeof(*sa));
2767 sa = kmalloc(len, GFP_KERNEL);
2768 if (!sa) {
2769 err = -ENOMEM;
2770 goto errout;
2771 }
2772 sa->sa_family = dev->type;
2773 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2774 dev->addr_len);
2775 err = dev_set_mac_address_user(dev, sa, extack);
2776 kfree(sa);
2777 if (err)
2778 goto errout;
2779 status |= DO_SETLINK_MODIFIED;
2780 }
2781
2782 if (tb[IFLA_MTU]) {
2783 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2784 if (err < 0)
2785 goto errout;
2786 status |= DO_SETLINK_MODIFIED;
2787 }
2788
2789 if (tb[IFLA_GROUP]) {
2790 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2791 status |= DO_SETLINK_NOTIFY;
2792 }
2793
2794 /*
2795 * Interface selected by interface index but interface
2796 * name provided implies that a name change has been
2797 * requested.
2798 */
2799 if (ifm->ifi_index > 0 && ifname[0]) {
2800 err = dev_change_name(dev, ifname);
2801 if (err < 0)
2802 goto errout;
2803 status |= DO_SETLINK_MODIFIED;
2804 }
2805
2806 if (tb[IFLA_IFALIAS]) {
2807 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2808 nla_len(tb[IFLA_IFALIAS]));
2809 if (err < 0)
2810 goto errout;
2811 status |= DO_SETLINK_NOTIFY;
2812 }
2813
2814 if (tb[IFLA_BROADCAST]) {
2815 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2816 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2817 }
2818
2819 if (tb[IFLA_MASTER]) {
2820 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2821 if (err)
2822 goto errout;
2823 status |= DO_SETLINK_MODIFIED;
2824 }
2825
2826 if (ifm->ifi_flags || ifm->ifi_change) {
2827 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2828 extack);
2829 if (err < 0)
2830 goto errout;
2831 }
2832
2833 if (tb[IFLA_CARRIER]) {
2834 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2835 if (err)
2836 goto errout;
2837 status |= DO_SETLINK_MODIFIED;
2838 }
2839
2840 if (tb[IFLA_TXQLEN]) {
2841 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2842
2843 err = dev_change_tx_queue_len(dev, value);
2844 if (err)
2845 goto errout;
2846 status |= DO_SETLINK_MODIFIED;
2847 }
2848
2849 if (tb[IFLA_GSO_MAX_SIZE]) {
2850 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2851
2852 if (max_size > dev->tso_max_size) {
2853 err = -EINVAL;
2854 goto errout;
2855 }
2856
2857 if (dev->gso_max_size ^ max_size) {
2858 netif_set_gso_max_size(dev, max_size);
2859 status |= DO_SETLINK_MODIFIED;
2860 }
2861 }
2862
2863 if (tb[IFLA_GSO_MAX_SEGS]) {
2864 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2865
2866 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2867 err = -EINVAL;
2868 goto errout;
2869 }
2870
2871 if (dev->gso_max_segs ^ max_segs) {
2872 netif_set_gso_max_segs(dev, max_segs);
2873 status |= DO_SETLINK_MODIFIED;
2874 }
2875 }
2876
2877 if (tb[IFLA_GRO_MAX_SIZE]) {
2878 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2879
2880 if (dev->gro_max_size ^ gro_max_size) {
2881 netif_set_gro_max_size(dev, gro_max_size);
2882 status |= DO_SETLINK_MODIFIED;
2883 }
2884 }
2885
2886 if (tb[IFLA_OPERSTATE])
2887 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2888
2889 if (tb[IFLA_LINKMODE]) {
2890 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2891
2892 write_lock(&dev_base_lock);
2893 if (dev->link_mode ^ value)
2894 status |= DO_SETLINK_NOTIFY;
2895 dev->link_mode = value;
2896 write_unlock(&dev_base_lock);
2897 }
2898
2899 if (tb[IFLA_VFINFO_LIST]) {
2900 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2901 struct nlattr *attr;
2902 int rem;
2903
2904 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2905 if (nla_type(attr) != IFLA_VF_INFO ||
2906 nla_len(attr) < NLA_HDRLEN) {
2907 err = -EINVAL;
2908 goto errout;
2909 }
2910 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2911 attr,
2912 ifla_vf_policy,
2913 NULL);
2914 if (err < 0)
2915 goto errout;
2916 err = do_setvfinfo(dev, vfinfo);
2917 if (err < 0)
2918 goto errout;
2919 status |= DO_SETLINK_NOTIFY;
2920 }
2921 }
2922 err = 0;
2923
2924 if (tb[IFLA_VF_PORTS]) {
2925 struct nlattr *port[IFLA_PORT_MAX+1];
2926 struct nlattr *attr;
2927 int vf;
2928 int rem;
2929
2930 err = -EOPNOTSUPP;
2931 if (!ops->ndo_set_vf_port)
2932 goto errout;
2933
2934 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2935 if (nla_type(attr) != IFLA_VF_PORT ||
2936 nla_len(attr) < NLA_HDRLEN) {
2937 err = -EINVAL;
2938 goto errout;
2939 }
2940 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2941 attr,
2942 ifla_port_policy,
2943 NULL);
2944 if (err < 0)
2945 goto errout;
2946 if (!port[IFLA_PORT_VF]) {
2947 err = -EOPNOTSUPP;
2948 goto errout;
2949 }
2950 vf = nla_get_u32(port[IFLA_PORT_VF]);
2951 err = ops->ndo_set_vf_port(dev, vf, port);
2952 if (err < 0)
2953 goto errout;
2954 status |= DO_SETLINK_NOTIFY;
2955 }
2956 }
2957 err = 0;
2958
2959 if (tb[IFLA_PORT_SELF]) {
2960 struct nlattr *port[IFLA_PORT_MAX+1];
2961
2962 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2963 tb[IFLA_PORT_SELF],
2964 ifla_port_policy, NULL);
2965 if (err < 0)
2966 goto errout;
2967
2968 err = -EOPNOTSUPP;
2969 if (ops->ndo_set_vf_port)
2970 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2971 if (err < 0)
2972 goto errout;
2973 status |= DO_SETLINK_NOTIFY;
2974 }
2975
2976 if (tb[IFLA_AF_SPEC]) {
2977 struct nlattr *af;
2978 int rem;
2979
2980 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2981 const struct rtnl_af_ops *af_ops;
2982
2983 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2984
2985 err = af_ops->set_link_af(dev, af, extack);
2986 if (err < 0)
2987 goto errout;
2988
2989 status |= DO_SETLINK_NOTIFY;
2990 }
2991 }
2992 err = 0;
2993
2994 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2995 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2996 tb[IFLA_PROTO_DOWN_REASON], extack);
2997 if (err)
2998 goto errout;
2999 status |= DO_SETLINK_NOTIFY;
3000 }
3001
3002 if (tb[IFLA_XDP]) {
3003 struct nlattr *xdp[IFLA_XDP_MAX + 1];
3004 u32 xdp_flags = 0;
3005
3006 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3007 tb[IFLA_XDP],
3008 ifla_xdp_policy, NULL);
3009 if (err < 0)
3010 goto errout;
3011
3012 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3013 err = -EINVAL;
3014 goto errout;
3015 }
3016
3017 if (xdp[IFLA_XDP_FLAGS]) {
3018 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3019 if (xdp_flags & ~XDP_FLAGS_MASK) {
3020 err = -EINVAL;
3021 goto errout;
3022 }
3023 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3024 err = -EINVAL;
3025 goto errout;
3026 }
3027 }
3028
3029 if (xdp[IFLA_XDP_FD]) {
3030 int expected_fd = -1;
3031
3032 if (xdp_flags & XDP_FLAGS_REPLACE) {
3033 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3034 err = -EINVAL;
3035 goto errout;
3036 }
3037 expected_fd =
3038 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3039 }
3040
3041 err = dev_change_xdp_fd(dev, extack,
3042 nla_get_s32(xdp[IFLA_XDP_FD]),
3043 expected_fd,
3044 xdp_flags);
3045 if (err)
3046 goto errout;
3047 status |= DO_SETLINK_NOTIFY;
3048 }
3049 }
3050
3051errout:
3052 if (status & DO_SETLINK_MODIFIED) {
3053 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3054 netdev_state_change(dev);
3055
3056 if (err < 0)
3057 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3058 dev->name);
3059 }
3060
3061 return err;
3062}
3063
3064static struct net_device *rtnl_dev_get(struct net *net,
3065 struct nlattr *tb[])
3066{
3067 char ifname[ALTIFNAMSIZ];
3068
3069 if (tb[IFLA_IFNAME])
3070 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3071 else if (tb[IFLA_ALT_IFNAME])
3072 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3073 else
3074 return NULL;
3075
3076 return __dev_get_by_name(net, ifname);
3077}
3078
3079static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3080 struct netlink_ext_ack *extack)
3081{
3082 struct net *net = sock_net(skb->sk);
3083 struct ifinfomsg *ifm;
3084 struct net_device *dev;
3085 int err;
3086 struct nlattr *tb[IFLA_MAX+1];
3087
3088 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3089 ifla_policy, extack);
3090 if (err < 0)
3091 goto errout;
3092
3093 err = rtnl_ensure_unique_netns(tb, extack, false);
3094 if (err < 0)
3095 goto errout;
3096
3097 err = -EINVAL;
3098 ifm = nlmsg_data(nlh);
3099 if (ifm->ifi_index > 0)
3100 dev = __dev_get_by_index(net, ifm->ifi_index);
3101 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3102 dev = rtnl_dev_get(net, tb);
3103 else
3104 goto errout;
3105
3106 if (dev == NULL) {
3107 err = -ENODEV;
3108 goto errout;
3109 }
3110
3111 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3112errout:
3113 return err;
3114}
3115
3116static int rtnl_group_dellink(const struct net *net, int group)
3117{
3118 struct net_device *dev, *aux;
3119 LIST_HEAD(list_kill);
3120 bool found = false;
3121
3122 if (!group)
3123 return -EPERM;
3124
3125 for_each_netdev(net, dev) {
3126 if (dev->group == group) {
3127 const struct rtnl_link_ops *ops;
3128
3129 found = true;
3130 ops = dev->rtnl_link_ops;
3131 if (!ops || !ops->dellink)
3132 return -EOPNOTSUPP;
3133 }
3134 }
3135
3136 if (!found)
3137 return -ENODEV;
3138
3139 for_each_netdev_safe(net, dev, aux) {
3140 if (dev->group == group) {
3141 const struct rtnl_link_ops *ops;
3142
3143 ops = dev->rtnl_link_ops;
3144 ops->dellink(dev, &list_kill);
3145 }
3146 }
3147 unregister_netdevice_many(&list_kill);
3148
3149 return 0;
3150}
3151
3152int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3153{
3154 const struct rtnl_link_ops *ops;
3155 LIST_HEAD(list_kill);
3156
3157 ops = dev->rtnl_link_ops;
3158 if (!ops || !ops->dellink)
3159 return -EOPNOTSUPP;
3160
3161 ops->dellink(dev, &list_kill);
3162 unregister_netdevice_many_notify(&list_kill, portid, nlh);
3163
3164 return 0;
3165}
3166EXPORT_SYMBOL_GPL(rtnl_delete_link);
3167
3168static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3169 struct netlink_ext_ack *extack)
3170{
3171 struct net *net = sock_net(skb->sk);
3172 u32 portid = NETLINK_CB(skb).portid;
3173 struct net *tgt_net = net;
3174 struct net_device *dev = NULL;
3175 struct ifinfomsg *ifm;
3176 struct nlattr *tb[IFLA_MAX+1];
3177 int err;
3178 int netnsid = -1;
3179
3180 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3181 ifla_policy, extack);
3182 if (err < 0)
3183 return err;
3184
3185 err = rtnl_ensure_unique_netns(tb, extack, true);
3186 if (err < 0)
3187 return err;
3188
3189 if (tb[IFLA_TARGET_NETNSID]) {
3190 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3191 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3192 if (IS_ERR(tgt_net))
3193 return PTR_ERR(tgt_net);
3194 }
3195
3196 err = -EINVAL;
3197 ifm = nlmsg_data(nlh);
3198 if (ifm->ifi_index > 0)
3199 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3200 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3201 dev = rtnl_dev_get(net, tb);
3202 else if (tb[IFLA_GROUP])
3203 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3204 else
3205 goto out;
3206
3207 if (!dev) {
3208 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3209 err = -ENODEV;
3210
3211 goto out;
3212 }
3213
3214 err = rtnl_delete_link(dev, portid, nlh);
3215
3216out:
3217 if (netnsid >= 0)
3218 put_net(tgt_net);
3219
3220 return err;
3221}
3222
3223int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3224 u32 portid, const struct nlmsghdr *nlh)
3225{
3226 unsigned int old_flags;
3227 int err;
3228
3229 old_flags = dev->flags;
3230 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3231 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3232 NULL);
3233 if (err < 0)
3234 return err;
3235 }
3236
3237 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3238 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3239 } else {
3240 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3241 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3242 }
3243 return 0;
3244}
3245EXPORT_SYMBOL(rtnl_configure_link);
3246
3247struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3248 unsigned char name_assign_type,
3249 const struct rtnl_link_ops *ops,
3250 struct nlattr *tb[],
3251 struct netlink_ext_ack *extack)
3252{
3253 struct net_device *dev;
3254 unsigned int num_tx_queues = 1;
3255 unsigned int num_rx_queues = 1;
3256
3257 if (tb[IFLA_NUM_TX_QUEUES])
3258 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3259 else if (ops->get_num_tx_queues)
3260 num_tx_queues = ops->get_num_tx_queues();
3261
3262 if (tb[IFLA_NUM_RX_QUEUES])
3263 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3264 else if (ops->get_num_rx_queues)
3265 num_rx_queues = ops->get_num_rx_queues();
3266
3267 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3268 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3269 return ERR_PTR(-EINVAL);
3270 }
3271
3272 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3273 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3274 return ERR_PTR(-EINVAL);
3275 }
3276
3277 if (ops->alloc) {
3278 dev = ops->alloc(tb, ifname, name_assign_type,
3279 num_tx_queues, num_rx_queues);
3280 if (IS_ERR(dev))
3281 return dev;
3282 } else {
3283 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3284 name_assign_type, ops->setup,
3285 num_tx_queues, num_rx_queues);
3286 }
3287
3288 if (!dev)
3289 return ERR_PTR(-ENOMEM);
3290
3291 dev_net_set(dev, net);
3292 dev->rtnl_link_ops = ops;
3293 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3294
3295 if (tb[IFLA_MTU]) {
3296 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3297 int err;
3298
3299 err = dev_validate_mtu(dev, mtu, extack);
3300 if (err) {
3301 free_netdev(dev);
3302 return ERR_PTR(err);
3303 }
3304 dev->mtu = mtu;
3305 }
3306 if (tb[IFLA_ADDRESS]) {
3307 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3308 nla_len(tb[IFLA_ADDRESS]));
3309 dev->addr_assign_type = NET_ADDR_SET;
3310 }
3311 if (tb[IFLA_BROADCAST])
3312 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3313 nla_len(tb[IFLA_BROADCAST]));
3314 if (tb[IFLA_TXQLEN])
3315 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3316 if (tb[IFLA_OPERSTATE])
3317 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3318 if (tb[IFLA_LINKMODE])
3319 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3320 if (tb[IFLA_GROUP])
3321 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3322 if (tb[IFLA_GSO_MAX_SIZE])
3323 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3324 if (tb[IFLA_GSO_MAX_SEGS])
3325 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3326 if (tb[IFLA_GRO_MAX_SIZE])
3327 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3328
3329 return dev;
3330}
3331EXPORT_SYMBOL(rtnl_create_link);
3332
3333static int rtnl_group_changelink(const struct sk_buff *skb,
3334 struct net *net, int group,
3335 struct ifinfomsg *ifm,
3336 struct netlink_ext_ack *extack,
3337 struct nlattr **tb)
3338{
3339 struct net_device *dev, *aux;
3340 int err;
3341
3342 for_each_netdev_safe(net, dev, aux) {
3343 if (dev->group == group) {
3344 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3345 if (err < 0)
3346 return err;
3347 }
3348 }
3349
3350 return 0;
3351}
3352
3353static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3354 const struct rtnl_link_ops *ops,
3355 const struct nlmsghdr *nlh,
3356 struct nlattr **tb, struct nlattr **data,
3357 struct netlink_ext_ack *extack)
3358{
3359 unsigned char name_assign_type = NET_NAME_USER;
3360 struct net *net = sock_net(skb->sk);
3361 u32 portid = NETLINK_CB(skb).portid;
3362 struct net *dest_net, *link_net;
3363 struct net_device *dev;
3364 char ifname[IFNAMSIZ];
3365 int err;
3366
3367 if (!ops->alloc && !ops->setup)
3368 return -EOPNOTSUPP;
3369
3370 if (tb[IFLA_IFNAME]) {
3371 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3372 } else {
3373 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3374 name_assign_type = NET_NAME_ENUM;
3375 }
3376
3377 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3378 if (IS_ERR(dest_net))
3379 return PTR_ERR(dest_net);
3380
3381 if (tb[IFLA_LINK_NETNSID]) {
3382 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3383
3384 link_net = get_net_ns_by_id(dest_net, id);
3385 if (!link_net) {
3386 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3387 err = -EINVAL;
3388 goto out;
3389 }
3390 err = -EPERM;
3391 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3392 goto out;
3393 } else {
3394 link_net = NULL;
3395 }
3396
3397 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3398 name_assign_type, ops, tb, extack);
3399 if (IS_ERR(dev)) {
3400 err = PTR_ERR(dev);
3401 goto out;
3402 }
3403
3404 dev->ifindex = ifm->ifi_index;
3405
3406 if (ops->newlink)
3407 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3408 else
3409 err = register_netdevice(dev);
3410 if (err < 0) {
3411 free_netdev(dev);
3412 goto out;
3413 }
3414
3415 err = rtnl_configure_link(dev, ifm, portid, nlh);
3416 if (err < 0)
3417 goto out_unregister;
3418 if (link_net) {
3419 err = dev_change_net_namespace(dev, dest_net, ifname);
3420 if (err < 0)
3421 goto out_unregister;
3422 }
3423 if (tb[IFLA_MASTER]) {
3424 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3425 if (err)
3426 goto out_unregister;
3427 }
3428out:
3429 if (link_net)
3430 put_net(link_net);
3431 put_net(dest_net);
3432 return err;
3433out_unregister:
3434 if (ops->newlink) {
3435 LIST_HEAD(list_kill);
3436
3437 ops->dellink(dev, &list_kill);
3438 unregister_netdevice_many(&list_kill);
3439 } else {
3440 unregister_netdevice(dev);
3441 }
3442 goto out;
3443}
3444
3445struct rtnl_newlink_tbs {
3446 struct nlattr *tb[IFLA_MAX + 1];
3447 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3448 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3449};
3450
3451static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3452 struct rtnl_newlink_tbs *tbs,
3453 struct netlink_ext_ack *extack)
3454{
3455 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3456 struct nlattr ** const tb = tbs->tb;
3457 const struct rtnl_link_ops *m_ops;
3458 struct net_device *master_dev;
3459 struct net *net = sock_net(skb->sk);
3460 const struct rtnl_link_ops *ops;
3461 struct nlattr **slave_data;
3462 char kind[MODULE_NAME_LEN];
3463 struct net_device *dev;
3464 struct ifinfomsg *ifm;
3465 struct nlattr **data;
3466 bool link_specified;
3467 int err;
3468
3469#ifdef CONFIG_MODULES
3470replay:
3471#endif
3472 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3473 ifla_policy, extack);
3474 if (err < 0)
3475 return err;
3476
3477 err = rtnl_ensure_unique_netns(tb, extack, false);
3478 if (err < 0)
3479 return err;
3480
3481 ifm = nlmsg_data(nlh);
3482 if (ifm->ifi_index > 0) {
3483 link_specified = true;
3484 dev = __dev_get_by_index(net, ifm->ifi_index);
3485 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3486 link_specified = true;
3487 dev = rtnl_dev_get(net, tb);
3488 } else {
3489 link_specified = false;
3490 dev = NULL;
3491 }
3492
3493 master_dev = NULL;
3494 m_ops = NULL;
3495 if (dev) {
3496 master_dev = netdev_master_upper_dev_get(dev);
3497 if (master_dev)
3498 m_ops = master_dev->rtnl_link_ops;
3499 }
3500
3501 err = validate_linkmsg(dev, tb, extack);
3502 if (err < 0)
3503 return err;
3504
3505 if (tb[IFLA_LINKINFO]) {
3506 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3507 tb[IFLA_LINKINFO],
3508 ifla_info_policy, NULL);
3509 if (err < 0)
3510 return err;
3511 } else
3512 memset(linkinfo, 0, sizeof(linkinfo));
3513
3514 if (linkinfo[IFLA_INFO_KIND]) {
3515 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3516 ops = rtnl_link_ops_get(kind);
3517 } else {
3518 kind[0] = '\0';
3519 ops = NULL;
3520 }
3521
3522 data = NULL;
3523 if (ops) {
3524 if (ops->maxtype > RTNL_MAX_TYPE)
3525 return -EINVAL;
3526
3527 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3528 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3529 linkinfo[IFLA_INFO_DATA],
3530 ops->policy, extack);
3531 if (err < 0)
3532 return err;
3533 data = tbs->attr;
3534 }
3535 if (ops->validate) {
3536 err = ops->validate(tb, data, extack);
3537 if (err < 0)
3538 return err;
3539 }
3540 }
3541
3542 slave_data = NULL;
3543 if (m_ops) {
3544 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3545 return -EINVAL;
3546
3547 if (m_ops->slave_maxtype &&
3548 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3549 err = nla_parse_nested_deprecated(tbs->slave_attr,
3550 m_ops->slave_maxtype,
3551 linkinfo[IFLA_INFO_SLAVE_DATA],
3552 m_ops->slave_policy,
3553 extack);
3554 if (err < 0)
3555 return err;
3556 slave_data = tbs->slave_attr;
3557 }
3558 }
3559
3560 if (dev) {
3561 int status = 0;
3562
3563 if (nlh->nlmsg_flags & NLM_F_EXCL)
3564 return -EEXIST;
3565 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3566 return -EOPNOTSUPP;
3567
3568 if (linkinfo[IFLA_INFO_DATA]) {
3569 if (!ops || ops != dev->rtnl_link_ops ||
3570 !ops->changelink)
3571 return -EOPNOTSUPP;
3572
3573 err = ops->changelink(dev, tb, data, extack);
3574 if (err < 0)
3575 return err;
3576 status |= DO_SETLINK_NOTIFY;
3577 }
3578
3579 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3580 if (!m_ops || !m_ops->slave_changelink)
3581 return -EOPNOTSUPP;
3582
3583 err = m_ops->slave_changelink(master_dev, dev, tb,
3584 slave_data, extack);
3585 if (err < 0)
3586 return err;
3587 status |= DO_SETLINK_NOTIFY;
3588 }
3589
3590 return do_setlink(skb, dev, ifm, extack, tb, status);
3591 }
3592
3593 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3594 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3595 * or it's for a group
3596 */
3597 if (link_specified)
3598 return -ENODEV;
3599 if (tb[IFLA_GROUP])
3600 return rtnl_group_changelink(skb, net,
3601 nla_get_u32(tb[IFLA_GROUP]),
3602 ifm, extack, tb);
3603 return -ENODEV;
3604 }
3605
3606 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3607 return -EOPNOTSUPP;
3608
3609 if (!ops) {
3610#ifdef CONFIG_MODULES
3611 if (kind[0]) {
3612 __rtnl_unlock();
3613 request_module("rtnl-link-%s", kind);
3614 rtnl_lock();
3615 ops = rtnl_link_ops_get(kind);
3616 if (ops)
3617 goto replay;
3618 }
3619#endif
3620 NL_SET_ERR_MSG(extack, "Unknown device type");
3621 return -EOPNOTSUPP;
3622 }
3623
3624 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
3625}
3626
3627static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3628 struct netlink_ext_ack *extack)
3629{
3630 struct rtnl_newlink_tbs *tbs;
3631 int ret;
3632
3633 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3634 if (!tbs)
3635 return -ENOMEM;
3636
3637 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3638 kfree(tbs);
3639 return ret;
3640}
3641
3642static int rtnl_valid_getlink_req(struct sk_buff *skb,
3643 const struct nlmsghdr *nlh,
3644 struct nlattr **tb,
3645 struct netlink_ext_ack *extack)
3646{
3647 struct ifinfomsg *ifm;
3648 int i, err;
3649
3650 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3651 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3652 return -EINVAL;
3653 }
3654
3655 if (!netlink_strict_get_check(skb))
3656 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3657 ifla_policy, extack);
3658
3659 ifm = nlmsg_data(nlh);
3660 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3661 ifm->ifi_change) {
3662 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3663 return -EINVAL;
3664 }
3665
3666 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3667 ifla_policy, extack);
3668 if (err)
3669 return err;
3670
3671 for (i = 0; i <= IFLA_MAX; i++) {
3672 if (!tb[i])
3673 continue;
3674
3675 switch (i) {
3676 case IFLA_IFNAME:
3677 case IFLA_ALT_IFNAME:
3678 case IFLA_EXT_MASK:
3679 case IFLA_TARGET_NETNSID:
3680 break;
3681 default:
3682 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3683 return -EINVAL;
3684 }
3685 }
3686
3687 return 0;
3688}
3689
3690static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3691 struct netlink_ext_ack *extack)
3692{
3693 struct net *net = sock_net(skb->sk);
3694 struct net *tgt_net = net;
3695 struct ifinfomsg *ifm;
3696 struct nlattr *tb[IFLA_MAX+1];
3697 struct net_device *dev = NULL;
3698 struct sk_buff *nskb;
3699 int netnsid = -1;
3700 int err;
3701 u32 ext_filter_mask = 0;
3702
3703 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3704 if (err < 0)
3705 return err;
3706
3707 err = rtnl_ensure_unique_netns(tb, extack, true);
3708 if (err < 0)
3709 return err;
3710
3711 if (tb[IFLA_TARGET_NETNSID]) {
3712 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3713 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3714 if (IS_ERR(tgt_net))
3715 return PTR_ERR(tgt_net);
3716 }
3717
3718 if (tb[IFLA_EXT_MASK])
3719 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3720
3721 err = -EINVAL;
3722 ifm = nlmsg_data(nlh);
3723 if (ifm->ifi_index > 0)
3724 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3725 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3726 dev = rtnl_dev_get(tgt_net, tb);
3727 else
3728 goto out;
3729
3730 err = -ENODEV;
3731 if (dev == NULL)
3732 goto out;
3733
3734 err = -ENOBUFS;
3735 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3736 if (nskb == NULL)
3737 goto out;
3738
3739 err = rtnl_fill_ifinfo(nskb, dev, net,
3740 RTM_NEWLINK, NETLINK_CB(skb).portid,
3741 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3742 0, NULL, 0, netnsid, GFP_KERNEL);
3743 if (err < 0) {
3744 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3745 WARN_ON(err == -EMSGSIZE);
3746 kfree_skb(nskb);
3747 } else
3748 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3749out:
3750 if (netnsid >= 0)
3751 put_net(tgt_net);
3752
3753 return err;
3754}
3755
3756static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3757 bool *changed, struct netlink_ext_ack *extack)
3758{
3759 char *alt_ifname;
3760 size_t size;
3761 int err;
3762
3763 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3764 if (err)
3765 return err;
3766
3767 if (cmd == RTM_NEWLINKPROP) {
3768 size = rtnl_prop_list_size(dev);
3769 size += nla_total_size(ALTIFNAMSIZ);
3770 if (size >= U16_MAX) {
3771 NL_SET_ERR_MSG(extack,
3772 "effective property list too long");
3773 return -EINVAL;
3774 }
3775 }
3776
3777 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3778 if (!alt_ifname)
3779 return -ENOMEM;
3780
3781 if (cmd == RTM_NEWLINKPROP) {
3782 err = netdev_name_node_alt_create(dev, alt_ifname);
3783 if (!err)
3784 alt_ifname = NULL;
3785 } else if (cmd == RTM_DELLINKPROP) {
3786 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3787 } else {
3788 WARN_ON_ONCE(1);
3789 err = -EINVAL;
3790 }
3791
3792 kfree(alt_ifname);
3793 if (!err)
3794 *changed = true;
3795 return err;
3796}
3797
3798static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3799 struct netlink_ext_ack *extack)
3800{
3801 struct net *net = sock_net(skb->sk);
3802 struct nlattr *tb[IFLA_MAX + 1];
3803 struct net_device *dev;
3804 struct ifinfomsg *ifm;
3805 bool changed = false;
3806 struct nlattr *attr;
3807 int err, rem;
3808
3809 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3810 if (err)
3811 return err;
3812
3813 err = rtnl_ensure_unique_netns(tb, extack, true);
3814 if (err)
3815 return err;
3816
3817 ifm = nlmsg_data(nlh);
3818 if (ifm->ifi_index > 0)
3819 dev = __dev_get_by_index(net, ifm->ifi_index);
3820 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3821 dev = rtnl_dev_get(net, tb);
3822 else
3823 return -EINVAL;
3824
3825 if (!dev)
3826 return -ENODEV;
3827
3828 if (!tb[IFLA_PROP_LIST])
3829 return 0;
3830
3831 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3832 switch (nla_type(attr)) {
3833 case IFLA_ALT_IFNAME:
3834 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3835 if (err)
3836 return err;
3837 break;
3838 }
3839 }
3840
3841 if (changed)
3842 netdev_state_change(dev);
3843 return 0;
3844}
3845
3846static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3847 struct netlink_ext_ack *extack)
3848{
3849 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3850}
3851
3852static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3853 struct netlink_ext_ack *extack)
3854{
3855 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3856}
3857
3858static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3859{
3860 struct net *net = sock_net(skb->sk);
3861 size_t min_ifinfo_dump_size = 0;
3862 struct nlattr *tb[IFLA_MAX+1];
3863 u32 ext_filter_mask = 0;
3864 struct net_device *dev;
3865 int hdrlen;
3866
3867 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3868 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3869 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3870
3871 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3872 if (tb[IFLA_EXT_MASK])
3873 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3874 }
3875
3876 if (!ext_filter_mask)
3877 return NLMSG_GOODSIZE;
3878 /*
3879 * traverse the list of net devices and compute the minimum
3880 * buffer size based upon the filter mask.
3881 */
3882 rcu_read_lock();
3883 for_each_netdev_rcu(net, dev) {
3884 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3885 if_nlmsg_size(dev, ext_filter_mask));
3886 }
3887 rcu_read_unlock();
3888
3889 return nlmsg_total_size(min_ifinfo_dump_size);
3890}
3891
3892static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3893{
3894 int idx;
3895 int s_idx = cb->family;
3896 int type = cb->nlh->nlmsg_type - RTM_BASE;
3897 int ret = 0;
3898
3899 if (s_idx == 0)
3900 s_idx = 1;
3901
3902 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3903 struct rtnl_link __rcu **tab;
3904 struct rtnl_link *link;
3905 rtnl_dumpit_func dumpit;
3906
3907 if (idx < s_idx || idx == PF_PACKET)
3908 continue;
3909
3910 if (type < 0 || type >= RTM_NR_MSGTYPES)
3911 continue;
3912
3913 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3914 if (!tab)
3915 continue;
3916
3917 link = rcu_dereference_rtnl(tab[type]);
3918 if (!link)
3919 continue;
3920
3921 dumpit = link->dumpit;
3922 if (!dumpit)
3923 continue;
3924
3925 if (idx > s_idx) {
3926 memset(&cb->args[0], 0, sizeof(cb->args));
3927 cb->prev_seq = 0;
3928 cb->seq = 0;
3929 }
3930 ret = dumpit(skb, cb);
3931 if (ret)
3932 break;
3933 }
3934 cb->family = idx;
3935
3936 return skb->len ? : ret;
3937}
3938
3939struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3940 unsigned int change,
3941 u32 event, gfp_t flags, int *new_nsid,
3942 int new_ifindex, u32 portid, u32 seq)
3943{
3944 struct net *net = dev_net(dev);
3945 struct sk_buff *skb;
3946 int err = -ENOBUFS;
3947
3948 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3949 if (skb == NULL)
3950 goto errout;
3951
3952 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3953 type, portid, seq, change, 0, 0, event,
3954 new_nsid, new_ifindex, -1, flags);
3955 if (err < 0) {
3956 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3957 WARN_ON(err == -EMSGSIZE);
3958 kfree_skb(skb);
3959 goto errout;
3960 }
3961 return skb;
3962errout:
3963 if (err < 0)
3964 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3965 return NULL;
3966}
3967
3968void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
3969 u32 portid, const struct nlmsghdr *nlh)
3970{
3971 struct net *net = dev_net(dev);
3972
3973 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
3974}
3975
3976static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3977 unsigned int change, u32 event,
3978 gfp_t flags, int *new_nsid, int new_ifindex,
3979 u32 portid, const struct nlmsghdr *nlh)
3980{
3981 struct sk_buff *skb;
3982
3983 if (dev->reg_state != NETREG_REGISTERED)
3984 return;
3985
3986 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3987 new_ifindex, portid, nlmsg_seq(nlh));
3988 if (skb)
3989 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
3990}
3991
3992void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3993 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
3994{
3995 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3996 NULL, 0, portid, nlh);
3997}
3998
3999void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4000 gfp_t flags, int *new_nsid, int new_ifindex)
4001{
4002 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4003 new_nsid, new_ifindex, 0, NULL);
4004}
4005
4006static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4007 struct net_device *dev,
4008 u8 *addr, u16 vid, u32 pid, u32 seq,
4009 int type, unsigned int flags,
4010 int nlflags, u16 ndm_state)
4011{
4012 struct nlmsghdr *nlh;
4013 struct ndmsg *ndm;
4014
4015 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4016 if (!nlh)
4017 return -EMSGSIZE;
4018
4019 ndm = nlmsg_data(nlh);
4020 ndm->ndm_family = AF_BRIDGE;
4021 ndm->ndm_pad1 = 0;
4022 ndm->ndm_pad2 = 0;
4023 ndm->ndm_flags = flags;
4024 ndm->ndm_type = 0;
4025 ndm->ndm_ifindex = dev->ifindex;
4026 ndm->ndm_state = ndm_state;
4027
4028 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
4029 goto nla_put_failure;
4030 if (vid)
4031 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4032 goto nla_put_failure;
4033
4034 nlmsg_end(skb, nlh);
4035 return 0;
4036
4037nla_put_failure:
4038 nlmsg_cancel(skb, nlh);
4039 return -EMSGSIZE;
4040}
4041
4042static inline size_t rtnl_fdb_nlmsg_size(void)
4043{
4044 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4045 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
4046 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4047 0;
4048}
4049
4050static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4051 u16 ndm_state)
4052{
4053 struct net *net = dev_net(dev);
4054 struct sk_buff *skb;
4055 int err = -ENOBUFS;
4056
4057 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
4058 if (!skb)
4059 goto errout;
4060
4061 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4062 0, 0, type, NTF_SELF, 0, ndm_state);
4063 if (err < 0) {
4064 kfree_skb(skb);
4065 goto errout;
4066 }
4067
4068 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4069 return;
4070errout:
4071 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4072}
4073
4074/*
4075 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4076 */
4077int ndo_dflt_fdb_add(struct ndmsg *ndm,
4078 struct nlattr *tb[],
4079 struct net_device *dev,
4080 const unsigned char *addr, u16 vid,
4081 u16 flags)
4082{
4083 int err = -EINVAL;
4084
4085 /* If aging addresses are supported device will need to
4086 * implement its own handler for this.
4087 */
4088 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4089 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4090 return err;
4091 }
4092
4093 if (tb[NDA_FLAGS_EXT]) {
4094 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4095 return err;
4096 }
4097
4098 if (vid) {
4099 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4100 return err;
4101 }
4102
4103 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4104 err = dev_uc_add_excl(dev, addr);
4105 else if (is_multicast_ether_addr(addr))
4106 err = dev_mc_add_excl(dev, addr);
4107
4108 /* Only return duplicate errors if NLM_F_EXCL is set */
4109 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4110 err = 0;
4111
4112 return err;
4113}
4114EXPORT_SYMBOL(ndo_dflt_fdb_add);
4115
4116static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4117 struct netlink_ext_ack *extack)
4118{
4119 u16 vid = 0;
4120
4121 if (vlan_attr) {
4122 if (nla_len(vlan_attr) != sizeof(u16)) {
4123 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4124 return -EINVAL;
4125 }
4126
4127 vid = nla_get_u16(vlan_attr);
4128
4129 if (!vid || vid >= VLAN_VID_MASK) {
4130 NL_SET_ERR_MSG(extack, "invalid vlan id");
4131 return -EINVAL;
4132 }
4133 }
4134 *p_vid = vid;
4135 return 0;
4136}
4137
4138static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4139 struct netlink_ext_ack *extack)
4140{
4141 struct net *net = sock_net(skb->sk);
4142 struct ndmsg *ndm;
4143 struct nlattr *tb[NDA_MAX+1];
4144 struct net_device *dev;
4145 u8 *addr;
4146 u16 vid;
4147 int err;
4148
4149 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4150 extack);
4151 if (err < 0)
4152 return err;
4153
4154 ndm = nlmsg_data(nlh);
4155 if (ndm->ndm_ifindex == 0) {
4156 NL_SET_ERR_MSG(extack, "invalid ifindex");
4157 return -EINVAL;
4158 }
4159
4160 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4161 if (dev == NULL) {
4162 NL_SET_ERR_MSG(extack, "unknown ifindex");
4163 return -ENODEV;
4164 }
4165
4166 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4167 NL_SET_ERR_MSG(extack, "invalid address");
4168 return -EINVAL;
4169 }
4170
4171 if (dev->type != ARPHRD_ETHER) {
4172 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4173 return -EINVAL;
4174 }
4175
4176 addr = nla_data(tb[NDA_LLADDR]);
4177
4178 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4179 if (err)
4180 return err;
4181
4182 err = -EOPNOTSUPP;
4183
4184 /* Support fdb on master device the net/bridge default case */
4185 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4186 netif_is_bridge_port(dev)) {
4187 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4188 const struct net_device_ops *ops = br_dev->netdev_ops;
4189
4190 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4191 nlh->nlmsg_flags, extack);
4192 if (err)
4193 goto out;
4194 else
4195 ndm->ndm_flags &= ~NTF_MASTER;
4196 }
4197
4198 /* Embedded bridge, macvlan, and any other device support */
4199 if ((ndm->ndm_flags & NTF_SELF)) {
4200 if (dev->netdev_ops->ndo_fdb_add)
4201 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4202 vid,
4203 nlh->nlmsg_flags,
4204 extack);
4205 else
4206 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4207 nlh->nlmsg_flags);
4208
4209 if (!err) {
4210 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4211 ndm->ndm_state);
4212 ndm->ndm_flags &= ~NTF_SELF;
4213 }
4214 }
4215out:
4216 return err;
4217}
4218
4219/*
4220 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4221 */
4222int ndo_dflt_fdb_del(struct ndmsg *ndm,
4223 struct nlattr *tb[],
4224 struct net_device *dev,
4225 const unsigned char *addr, u16 vid)
4226{
4227 int err = -EINVAL;
4228
4229 /* If aging addresses are supported device will need to
4230 * implement its own handler for this.
4231 */
4232 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4233 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4234 return err;
4235 }
4236
4237 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4238 err = dev_uc_del(dev, addr);
4239 else if (is_multicast_ether_addr(addr))
4240 err = dev_mc_del(dev, addr);
4241
4242 return err;
4243}
4244EXPORT_SYMBOL(ndo_dflt_fdb_del);
4245
4246static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4247 [NDA_VLAN] = { .type = NLA_U16 },
4248 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
4249 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
4250 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
4251};
4252
4253static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4254 struct netlink_ext_ack *extack)
4255{
4256 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4257 struct net *net = sock_net(skb->sk);
4258 const struct net_device_ops *ops;
4259 struct ndmsg *ndm;
4260 struct nlattr *tb[NDA_MAX+1];
4261 struct net_device *dev;
4262 __u8 *addr = NULL;
4263 int err;
4264 u16 vid;
4265
4266 if (!netlink_capable(skb, CAP_NET_ADMIN))
4267 return -EPERM;
4268
4269 if (!del_bulk) {
4270 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4271 NULL, extack);
4272 } else {
4273 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4274 fdb_del_bulk_policy, extack);
4275 }
4276 if (err < 0)
4277 return err;
4278
4279 ndm = nlmsg_data(nlh);
4280 if (ndm->ndm_ifindex == 0) {
4281 NL_SET_ERR_MSG(extack, "invalid ifindex");
4282 return -EINVAL;
4283 }
4284
4285 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4286 if (dev == NULL) {
4287 NL_SET_ERR_MSG(extack, "unknown ifindex");
4288 return -ENODEV;
4289 }
4290
4291 if (!del_bulk) {
4292 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4293 NL_SET_ERR_MSG(extack, "invalid address");
4294 return -EINVAL;
4295 }
4296 addr = nla_data(tb[NDA_LLADDR]);
4297 }
4298
4299 if (dev->type != ARPHRD_ETHER) {
4300 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4301 return -EINVAL;
4302 }
4303
4304 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4305 if (err)
4306 return err;
4307
4308 err = -EOPNOTSUPP;
4309
4310 /* Support fdb on master device the net/bridge default case */
4311 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4312 netif_is_bridge_port(dev)) {
4313 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4314
4315 ops = br_dev->netdev_ops;
4316 if (!del_bulk) {
4317 if (ops->ndo_fdb_del)
4318 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4319 } else {
4320 if (ops->ndo_fdb_del_bulk)
4321 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4322 extack);
4323 }
4324
4325 if (err)
4326 goto out;
4327 else
4328 ndm->ndm_flags &= ~NTF_MASTER;
4329 }
4330
4331 /* Embedded bridge, macvlan, and any other device support */
4332 if (ndm->ndm_flags & NTF_SELF) {
4333 ops = dev->netdev_ops;
4334 if (!del_bulk) {
4335 if (ops->ndo_fdb_del)
4336 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4337 else
4338 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4339 } else {
4340 /* in case err was cleared by NTF_MASTER call */
4341 err = -EOPNOTSUPP;
4342 if (ops->ndo_fdb_del_bulk)
4343 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4344 extack);
4345 }
4346
4347 if (!err) {
4348 if (!del_bulk)
4349 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4350 ndm->ndm_state);
4351 ndm->ndm_flags &= ~NTF_SELF;
4352 }
4353 }
4354out:
4355 return err;
4356}
4357
4358static int nlmsg_populate_fdb(struct sk_buff *skb,
4359 struct netlink_callback *cb,
4360 struct net_device *dev,
4361 int *idx,
4362 struct netdev_hw_addr_list *list)
4363{
4364 struct netdev_hw_addr *ha;
4365 int err;
4366 u32 portid, seq;
4367
4368 portid = NETLINK_CB(cb->skb).portid;
4369 seq = cb->nlh->nlmsg_seq;
4370
4371 list_for_each_entry(ha, &list->list, list) {
4372 if (*idx < cb->args[2])
4373 goto skip;
4374
4375 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4376 portid, seq,
4377 RTM_NEWNEIGH, NTF_SELF,
4378 NLM_F_MULTI, NUD_PERMANENT);
4379 if (err < 0)
4380 return err;
4381skip:
4382 *idx += 1;
4383 }
4384 return 0;
4385}
4386
4387/**
4388 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4389 * @skb: socket buffer to store message in
4390 * @cb: netlink callback
4391 * @dev: netdevice
4392 * @filter_dev: ignored
4393 * @idx: the number of FDB table entries dumped is added to *@idx
4394 *
4395 * Default netdevice operation to dump the existing unicast address list.
4396 * Returns number of addresses from list put in skb.
4397 */
4398int ndo_dflt_fdb_dump(struct sk_buff *skb,
4399 struct netlink_callback *cb,
4400 struct net_device *dev,
4401 struct net_device *filter_dev,
4402 int *idx)
4403{
4404 int err;
4405
4406 if (dev->type != ARPHRD_ETHER)
4407 return -EINVAL;
4408
4409 netif_addr_lock_bh(dev);
4410 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4411 if (err)
4412 goto out;
4413 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4414out:
4415 netif_addr_unlock_bh(dev);
4416 return err;
4417}
4418EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4419
4420static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4421 int *br_idx, int *brport_idx,
4422 struct netlink_ext_ack *extack)
4423{
4424 struct nlattr *tb[NDA_MAX + 1];
4425 struct ndmsg *ndm;
4426 int err, i;
4427
4428 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4429 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4430 return -EINVAL;
4431 }
4432
4433 ndm = nlmsg_data(nlh);
4434 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4435 ndm->ndm_flags || ndm->ndm_type) {
4436 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4437 return -EINVAL;
4438 }
4439
4440 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4441 NDA_MAX, NULL, extack);
4442 if (err < 0)
4443 return err;
4444
4445 *brport_idx = ndm->ndm_ifindex;
4446 for (i = 0; i <= NDA_MAX; ++i) {
4447 if (!tb[i])
4448 continue;
4449
4450 switch (i) {
4451 case NDA_IFINDEX:
4452 if (nla_len(tb[i]) != sizeof(u32)) {
4453 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4454 return -EINVAL;
4455 }
4456 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4457 break;
4458 case NDA_MASTER:
4459 if (nla_len(tb[i]) != sizeof(u32)) {
4460 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4461 return -EINVAL;
4462 }
4463 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4464 break;
4465 default:
4466 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4467 return -EINVAL;
4468 }
4469 }
4470
4471 return 0;
4472}
4473
4474static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4475 int *br_idx, int *brport_idx,
4476 struct netlink_ext_ack *extack)
4477{
4478 struct nlattr *tb[IFLA_MAX+1];
4479 int err;
4480
4481 /* A hack to preserve kernel<->userspace interface.
4482 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4483 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4484 * So, check for ndmsg with an optional u32 attribute (not used here).
4485 * Fortunately these sizes don't conflict with the size of ifinfomsg
4486 * with an optional attribute.
4487 */
4488 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4489 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4490 nla_attr_size(sizeof(u32)))) {
4491 struct ifinfomsg *ifm;
4492
4493 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4494 tb, IFLA_MAX, ifla_policy,
4495 extack);
4496 if (err < 0) {
4497 return -EINVAL;
4498 } else if (err == 0) {
4499 if (tb[IFLA_MASTER])
4500 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4501 }
4502
4503 ifm = nlmsg_data(nlh);
4504 *brport_idx = ifm->ifi_index;
4505 }
4506 return 0;
4507}
4508
4509static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4510{
4511 struct net_device *dev;
4512 struct net_device *br_dev = NULL;
4513 const struct net_device_ops *ops = NULL;
4514 const struct net_device_ops *cops = NULL;
4515 struct net *net = sock_net(skb->sk);
4516 struct hlist_head *head;
4517 int brport_idx = 0;
4518 int br_idx = 0;
4519 int h, s_h;
4520 int idx = 0, s_idx;
4521 int err = 0;
4522 int fidx = 0;
4523
4524 if (cb->strict_check)
4525 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4526 cb->extack);
4527 else
4528 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4529 cb->extack);
4530 if (err < 0)
4531 return err;
4532
4533 if (br_idx) {
4534 br_dev = __dev_get_by_index(net, br_idx);
4535 if (!br_dev)
4536 return -ENODEV;
4537
4538 ops = br_dev->netdev_ops;
4539 }
4540
4541 s_h = cb->args[0];
4542 s_idx = cb->args[1];
4543
4544 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4545 idx = 0;
4546 head = &net->dev_index_head[h];
4547 hlist_for_each_entry(dev, head, index_hlist) {
4548
4549 if (brport_idx && (dev->ifindex != brport_idx))
4550 continue;
4551
4552 if (!br_idx) { /* user did not specify a specific bridge */
4553 if (netif_is_bridge_port(dev)) {
4554 br_dev = netdev_master_upper_dev_get(dev);
4555 cops = br_dev->netdev_ops;
4556 }
4557 } else {
4558 if (dev != br_dev &&
4559 !netif_is_bridge_port(dev))
4560 continue;
4561
4562 if (br_dev != netdev_master_upper_dev_get(dev) &&
4563 !netif_is_bridge_master(dev))
4564 continue;
4565 cops = ops;
4566 }
4567
4568 if (idx < s_idx)
4569 goto cont;
4570
4571 if (netif_is_bridge_port(dev)) {
4572 if (cops && cops->ndo_fdb_dump) {
4573 err = cops->ndo_fdb_dump(skb, cb,
4574 br_dev, dev,
4575 &fidx);
4576 if (err == -EMSGSIZE)
4577 goto out;
4578 }
4579 }
4580
4581 if (dev->netdev_ops->ndo_fdb_dump)
4582 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4583 dev, NULL,
4584 &fidx);
4585 else
4586 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4587 &fidx);
4588 if (err == -EMSGSIZE)
4589 goto out;
4590
4591 cops = NULL;
4592
4593 /* reset fdb offset to 0 for rest of the interfaces */
4594 cb->args[2] = 0;
4595 fidx = 0;
4596cont:
4597 idx++;
4598 }
4599 }
4600
4601out:
4602 cb->args[0] = h;
4603 cb->args[1] = idx;
4604 cb->args[2] = fidx;
4605
4606 return skb->len;
4607}
4608
4609static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4610 struct nlattr **tb, u8 *ndm_flags,
4611 int *br_idx, int *brport_idx, u8 **addr,
4612 u16 *vid, struct netlink_ext_ack *extack)
4613{
4614 struct ndmsg *ndm;
4615 int err, i;
4616
4617 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4618 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4619 return -EINVAL;
4620 }
4621
4622 ndm = nlmsg_data(nlh);
4623 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4624 ndm->ndm_type) {
4625 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4626 return -EINVAL;
4627 }
4628
4629 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4630 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4631 return -EINVAL;
4632 }
4633
4634 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4635 NDA_MAX, nda_policy, extack);
4636 if (err < 0)
4637 return err;
4638
4639 *ndm_flags = ndm->ndm_flags;
4640 *brport_idx = ndm->ndm_ifindex;
4641 for (i = 0; i <= NDA_MAX; ++i) {
4642 if (!tb[i])
4643 continue;
4644
4645 switch (i) {
4646 case NDA_MASTER:
4647 *br_idx = nla_get_u32(tb[i]);
4648 break;
4649 case NDA_LLADDR:
4650 if (nla_len(tb[i]) != ETH_ALEN) {
4651 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4652 return -EINVAL;
4653 }
4654 *addr = nla_data(tb[i]);
4655 break;
4656 case NDA_VLAN:
4657 err = fdb_vid_parse(tb[i], vid, extack);
4658 if (err)
4659 return err;
4660 break;
4661 case NDA_VNI:
4662 break;
4663 default:
4664 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4665 return -EINVAL;
4666 }
4667 }
4668
4669 return 0;
4670}
4671
4672static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4673 struct netlink_ext_ack *extack)
4674{
4675 struct net_device *dev = NULL, *br_dev = NULL;
4676 const struct net_device_ops *ops = NULL;
4677 struct net *net = sock_net(in_skb->sk);
4678 struct nlattr *tb[NDA_MAX + 1];
4679 struct sk_buff *skb;
4680 int brport_idx = 0;
4681 u8 ndm_flags = 0;
4682 int br_idx = 0;
4683 u8 *addr = NULL;
4684 u16 vid = 0;
4685 int err;
4686
4687 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4688 &brport_idx, &addr, &vid, extack);
4689 if (err < 0)
4690 return err;
4691
4692 if (!addr) {
4693 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4694 return -EINVAL;
4695 }
4696
4697 if (brport_idx) {
4698 dev = __dev_get_by_index(net, brport_idx);
4699 if (!dev) {
4700 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4701 return -ENODEV;
4702 }
4703 }
4704
4705 if (br_idx) {
4706 if (dev) {
4707 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4708 return -EINVAL;
4709 }
4710
4711 br_dev = __dev_get_by_index(net, br_idx);
4712 if (!br_dev) {
4713 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4714 return -EINVAL;
4715 }
4716 ops = br_dev->netdev_ops;
4717 }
4718
4719 if (dev) {
4720 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4721 if (!netif_is_bridge_port(dev)) {
4722 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4723 return -EINVAL;
4724 }
4725 br_dev = netdev_master_upper_dev_get(dev);
4726 if (!br_dev) {
4727 NL_SET_ERR_MSG(extack, "Master of device not found");
4728 return -EINVAL;
4729 }
4730 ops = br_dev->netdev_ops;
4731 } else {
4732 if (!(ndm_flags & NTF_SELF)) {
4733 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4734 return -EINVAL;
4735 }
4736 ops = dev->netdev_ops;
4737 }
4738 }
4739
4740 if (!br_dev && !dev) {
4741 NL_SET_ERR_MSG(extack, "No device specified");
4742 return -ENODEV;
4743 }
4744
4745 if (!ops || !ops->ndo_fdb_get) {
4746 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4747 return -EOPNOTSUPP;
4748 }
4749
4750 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4751 if (!skb)
4752 return -ENOBUFS;
4753
4754 if (br_dev)
4755 dev = br_dev;
4756 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4757 NETLINK_CB(in_skb).portid,
4758 nlh->nlmsg_seq, extack);
4759 if (err)
4760 goto out;
4761
4762 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4763out:
4764 kfree_skb(skb);
4765 return err;
4766}
4767
4768static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4769 unsigned int attrnum, unsigned int flag)
4770{
4771 if (mask & flag)
4772 return nla_put_u8(skb, attrnum, !!(flags & flag));
4773 return 0;
4774}
4775
4776int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4777 struct net_device *dev, u16 mode,
4778 u32 flags, u32 mask, int nlflags,
4779 u32 filter_mask,
4780 int (*vlan_fill)(struct sk_buff *skb,
4781 struct net_device *dev,
4782 u32 filter_mask))
4783{
4784 struct nlmsghdr *nlh;
4785 struct ifinfomsg *ifm;
4786 struct nlattr *br_afspec;
4787 struct nlattr *protinfo;
4788 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4789 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4790 int err = 0;
4791
4792 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4793 if (nlh == NULL)
4794 return -EMSGSIZE;
4795
4796 ifm = nlmsg_data(nlh);
4797 ifm->ifi_family = AF_BRIDGE;
4798 ifm->__ifi_pad = 0;
4799 ifm->ifi_type = dev->type;
4800 ifm->ifi_index = dev->ifindex;
4801 ifm->ifi_flags = dev_get_flags(dev);
4802 ifm->ifi_change = 0;
4803
4804
4805 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4806 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4807 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4808 (br_dev &&
4809 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4810 (dev->addr_len &&
4811 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4812 (dev->ifindex != dev_get_iflink(dev) &&
4813 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4814 goto nla_put_failure;
4815
4816 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4817 if (!br_afspec)
4818 goto nla_put_failure;
4819
4820 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4821 nla_nest_cancel(skb, br_afspec);
4822 goto nla_put_failure;
4823 }
4824
4825 if (mode != BRIDGE_MODE_UNDEF) {
4826 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4827 nla_nest_cancel(skb, br_afspec);
4828 goto nla_put_failure;
4829 }
4830 }
4831 if (vlan_fill) {
4832 err = vlan_fill(skb, dev, filter_mask);
4833 if (err) {
4834 nla_nest_cancel(skb, br_afspec);
4835 goto nla_put_failure;
4836 }
4837 }
4838 nla_nest_end(skb, br_afspec);
4839
4840 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4841 if (!protinfo)
4842 goto nla_put_failure;
4843
4844 if (brport_nla_put_flag(skb, flags, mask,
4845 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4846 brport_nla_put_flag(skb, flags, mask,
4847 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4848 brport_nla_put_flag(skb, flags, mask,
4849 IFLA_BRPORT_FAST_LEAVE,
4850 BR_MULTICAST_FAST_LEAVE) ||
4851 brport_nla_put_flag(skb, flags, mask,
4852 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4853 brport_nla_put_flag(skb, flags, mask,
4854 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4855 brport_nla_put_flag(skb, flags, mask,
4856 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4857 brport_nla_put_flag(skb, flags, mask,
4858 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4859 brport_nla_put_flag(skb, flags, mask,
4860 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4861 brport_nla_put_flag(skb, flags, mask,
4862 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4863 brport_nla_put_flag(skb, flags, mask,
4864 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4865 nla_nest_cancel(skb, protinfo);
4866 goto nla_put_failure;
4867 }
4868
4869 nla_nest_end(skb, protinfo);
4870
4871 nlmsg_end(skb, nlh);
4872 return 0;
4873nla_put_failure:
4874 nlmsg_cancel(skb, nlh);
4875 return err ? err : -EMSGSIZE;
4876}
4877EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4878
4879static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4880 bool strict_check, u32 *filter_mask,
4881 struct netlink_ext_ack *extack)
4882{
4883 struct nlattr *tb[IFLA_MAX+1];
4884 int err, i;
4885
4886 if (strict_check) {
4887 struct ifinfomsg *ifm;
4888
4889 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4890 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4891 return -EINVAL;
4892 }
4893
4894 ifm = nlmsg_data(nlh);
4895 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4896 ifm->ifi_change || ifm->ifi_index) {
4897 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4898 return -EINVAL;
4899 }
4900
4901 err = nlmsg_parse_deprecated_strict(nlh,
4902 sizeof(struct ifinfomsg),
4903 tb, IFLA_MAX, ifla_policy,
4904 extack);
4905 } else {
4906 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4907 tb, IFLA_MAX, ifla_policy,
4908 extack);
4909 }
4910 if (err < 0)
4911 return err;
4912
4913 /* new attributes should only be added with strict checking */
4914 for (i = 0; i <= IFLA_MAX; ++i) {
4915 if (!tb[i])
4916 continue;
4917
4918 switch (i) {
4919 case IFLA_EXT_MASK:
4920 *filter_mask = nla_get_u32(tb[i]);
4921 break;
4922 default:
4923 if (strict_check) {
4924 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4925 return -EINVAL;
4926 }
4927 }
4928 }
4929
4930 return 0;
4931}
4932
4933static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4934{
4935 const struct nlmsghdr *nlh = cb->nlh;
4936 struct net *net = sock_net(skb->sk);
4937 struct net_device *dev;
4938 int idx = 0;
4939 u32 portid = NETLINK_CB(cb->skb).portid;
4940 u32 seq = nlh->nlmsg_seq;
4941 u32 filter_mask = 0;
4942 int err;
4943
4944 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4945 cb->extack);
4946 if (err < 0 && cb->strict_check)
4947 return err;
4948
4949 rcu_read_lock();
4950 for_each_netdev_rcu(net, dev) {
4951 const struct net_device_ops *ops = dev->netdev_ops;
4952 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4953
4954 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4955 if (idx >= cb->args[0]) {
4956 err = br_dev->netdev_ops->ndo_bridge_getlink(
4957 skb, portid, seq, dev,
4958 filter_mask, NLM_F_MULTI);
4959 if (err < 0 && err != -EOPNOTSUPP) {
4960 if (likely(skb->len))
4961 break;
4962
4963 goto out_err;
4964 }
4965 }
4966 idx++;
4967 }
4968
4969 if (ops->ndo_bridge_getlink) {
4970 if (idx >= cb->args[0]) {
4971 err = ops->ndo_bridge_getlink(skb, portid,
4972 seq, dev,
4973 filter_mask,
4974 NLM_F_MULTI);
4975 if (err < 0 && err != -EOPNOTSUPP) {
4976 if (likely(skb->len))
4977 break;
4978
4979 goto out_err;
4980 }
4981 }
4982 idx++;
4983 }
4984 }
4985 err = skb->len;
4986out_err:
4987 rcu_read_unlock();
4988 cb->args[0] = idx;
4989
4990 return err;
4991}
4992
4993static inline size_t bridge_nlmsg_size(void)
4994{
4995 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4996 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4997 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4998 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
4999 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5000 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5001 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5002 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5003 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5004 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5005 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5006}
5007
5008static int rtnl_bridge_notify(struct net_device *dev)
5009{
5010 struct net *net = dev_net(dev);
5011 struct sk_buff *skb;
5012 int err = -EOPNOTSUPP;
5013
5014 if (!dev->netdev_ops->ndo_bridge_getlink)
5015 return 0;
5016
5017 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5018 if (!skb) {
5019 err = -ENOMEM;
5020 goto errout;
5021 }
5022
5023 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5024 if (err < 0)
5025 goto errout;
5026
5027 /* Notification info is only filled for bridge ports, not the bridge
5028 * device itself. Therefore, a zero notification length is valid and
5029 * should not result in an error.
5030 */
5031 if (!skb->len)
5032 goto errout;
5033
5034 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5035 return 0;
5036errout:
5037 WARN_ON(err == -EMSGSIZE);
5038 kfree_skb(skb);
5039 if (err)
5040 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5041 return err;
5042}
5043
5044static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5045 struct netlink_ext_ack *extack)
5046{
5047 struct net *net = sock_net(skb->sk);
5048 struct ifinfomsg *ifm;
5049 struct net_device *dev;
5050 struct nlattr *br_spec, *attr = NULL;
5051 int rem, err = -EOPNOTSUPP;
5052 u16 flags = 0;
5053 bool have_flags = false;
5054
5055 if (nlmsg_len(nlh) < sizeof(*ifm))
5056 return -EINVAL;
5057
5058 ifm = nlmsg_data(nlh);
5059 if (ifm->ifi_family != AF_BRIDGE)
5060 return -EPFNOSUPPORT;
5061
5062 dev = __dev_get_by_index(net, ifm->ifi_index);
5063 if (!dev) {
5064 NL_SET_ERR_MSG(extack, "unknown ifindex");
5065 return -ENODEV;
5066 }
5067
5068 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5069 if (br_spec) {
5070 nla_for_each_nested(attr, br_spec, rem) {
5071 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5072 if (nla_len(attr) < sizeof(flags))
5073 return -EINVAL;
5074
5075 have_flags = true;
5076 flags = nla_get_u16(attr);
5077 break;
5078 }
5079 }
5080 }
5081
5082 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5083 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5084
5085 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5086 err = -EOPNOTSUPP;
5087 goto out;
5088 }
5089
5090 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5091 extack);
5092 if (err)
5093 goto out;
5094
5095 flags &= ~BRIDGE_FLAGS_MASTER;
5096 }
5097
5098 if ((flags & BRIDGE_FLAGS_SELF)) {
5099 if (!dev->netdev_ops->ndo_bridge_setlink)
5100 err = -EOPNOTSUPP;
5101 else
5102 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5103 flags,
5104 extack);
5105 if (!err) {
5106 flags &= ~BRIDGE_FLAGS_SELF;
5107
5108 /* Generate event to notify upper layer of bridge
5109 * change
5110 */
5111 err = rtnl_bridge_notify(dev);
5112 }
5113 }
5114
5115 if (have_flags)
5116 memcpy(nla_data(attr), &flags, sizeof(flags));
5117out:
5118 return err;
5119}
5120
5121static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5122 struct netlink_ext_ack *extack)
5123{
5124 struct net *net = sock_net(skb->sk);
5125 struct ifinfomsg *ifm;
5126 struct net_device *dev;
5127 struct nlattr *br_spec, *attr = NULL;
5128 int rem, err = -EOPNOTSUPP;
5129 u16 flags = 0;
5130 bool have_flags = false;
5131
5132 if (nlmsg_len(nlh) < sizeof(*ifm))
5133 return -EINVAL;
5134
5135 ifm = nlmsg_data(nlh);
5136 if (ifm->ifi_family != AF_BRIDGE)
5137 return -EPFNOSUPPORT;
5138
5139 dev = __dev_get_by_index(net, ifm->ifi_index);
5140 if (!dev) {
5141 NL_SET_ERR_MSG(extack, "unknown ifindex");
5142 return -ENODEV;
5143 }
5144
5145 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5146 if (br_spec) {
5147 nla_for_each_nested(attr, br_spec, rem) {
5148 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5149 if (nla_len(attr) < sizeof(flags))
5150 return -EINVAL;
5151
5152 have_flags = true;
5153 flags = nla_get_u16(attr);
5154 break;
5155 }
5156 }
5157 }
5158
5159 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5160 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5161
5162 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5163 err = -EOPNOTSUPP;
5164 goto out;
5165 }
5166
5167 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5168 if (err)
5169 goto out;
5170
5171 flags &= ~BRIDGE_FLAGS_MASTER;
5172 }
5173
5174 if ((flags & BRIDGE_FLAGS_SELF)) {
5175 if (!dev->netdev_ops->ndo_bridge_dellink)
5176 err = -EOPNOTSUPP;
5177 else
5178 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5179 flags);
5180
5181 if (!err) {
5182 flags &= ~BRIDGE_FLAGS_SELF;
5183
5184 /* Generate event to notify upper layer of bridge
5185 * change
5186 */
5187 err = rtnl_bridge_notify(dev);
5188 }
5189 }
5190
5191 if (have_flags)
5192 memcpy(nla_data(attr), &flags, sizeof(flags));
5193out:
5194 return err;
5195}
5196
5197static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5198{
5199 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5200 (!idxattr || idxattr == attrid);
5201}
5202
5203static bool
5204rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5205{
5206 return dev->netdev_ops &&
5207 dev->netdev_ops->ndo_has_offload_stats &&
5208 dev->netdev_ops->ndo_get_offload_stats &&
5209 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5210}
5211
5212static unsigned int
5213rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5214{
5215 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5216 sizeof(struct rtnl_link_stats64) : 0;
5217}
5218
5219static int
5220rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5221 struct sk_buff *skb)
5222{
5223 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5224 struct nlattr *attr = NULL;
5225 void *attr_data;
5226 int err;
5227
5228 if (!size)
5229 return -ENODATA;
5230
5231 attr = nla_reserve_64bit(skb, attr_id, size,
5232 IFLA_OFFLOAD_XSTATS_UNSPEC);
5233 if (!attr)
5234 return -EMSGSIZE;
5235
5236 attr_data = nla_data(attr);
5237 memset(attr_data, 0, size);
5238
5239 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5240 if (err)
5241 return err;
5242
5243 return 0;
5244}
5245
5246static unsigned int
5247rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5248 enum netdev_offload_xstats_type type)
5249{
5250 bool enabled = netdev_offload_xstats_enabled(dev, type);
5251
5252 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5253}
5254
5255struct rtnl_offload_xstats_request_used {
5256 bool request;
5257 bool used;
5258};
5259
5260static int
5261rtnl_offload_xstats_get_stats(struct net_device *dev,
5262 enum netdev_offload_xstats_type type,
5263 struct rtnl_offload_xstats_request_used *ru,
5264 struct rtnl_hw_stats64 *stats,
5265 struct netlink_ext_ack *extack)
5266{
5267 bool request;
5268 bool used;
5269 int err;
5270
5271 request = netdev_offload_xstats_enabled(dev, type);
5272 if (!request) {
5273 used = false;
5274 goto out;
5275 }
5276
5277 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5278 if (err)
5279 return err;
5280
5281out:
5282 if (ru) {
5283 ru->request = request;
5284 ru->used = used;
5285 }
5286 return 0;
5287}
5288
5289static int
5290rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5291 struct rtnl_offload_xstats_request_used *ru)
5292{
5293 struct nlattr *nest;
5294
5295 nest = nla_nest_start(skb, attr_id);
5296 if (!nest)
5297 return -EMSGSIZE;
5298
5299 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5300 goto nla_put_failure;
5301
5302 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5303 goto nla_put_failure;
5304
5305 nla_nest_end(skb, nest);
5306 return 0;
5307
5308nla_put_failure:
5309 nla_nest_cancel(skb, nest);
5310 return -EMSGSIZE;
5311}
5312
5313static int
5314rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5315 struct netlink_ext_ack *extack)
5316{
5317 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5318 struct rtnl_offload_xstats_request_used ru_l3;
5319 struct nlattr *nest;
5320 int err;
5321
5322 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5323 if (err)
5324 return err;
5325
5326 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5327 if (!nest)
5328 return -EMSGSIZE;
5329
5330 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5331 IFLA_OFFLOAD_XSTATS_L3_STATS,
5332 &ru_l3))
5333 goto nla_put_failure;
5334
5335 nla_nest_end(skb, nest);
5336 return 0;
5337
5338nla_put_failure:
5339 nla_nest_cancel(skb, nest);
5340 return -EMSGSIZE;
5341}
5342
5343static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5344 int *prividx, u32 off_filter_mask,
5345 struct netlink_ext_ack *extack)
5346{
5347 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5348 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5349 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5350 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5351 bool have_data = false;
5352 int err;
5353
5354 if (*prividx <= attr_id_cpu_hit &&
5355 (off_filter_mask &
5356 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5357 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5358 if (!err) {
5359 have_data = true;
5360 } else if (err != -ENODATA) {
5361 *prividx = attr_id_cpu_hit;
5362 return err;
5363 }
5364 }
5365
5366 if (*prividx <= attr_id_hw_s_info &&
5367 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5368 *prividx = attr_id_hw_s_info;
5369
5370 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5371 if (err)
5372 return err;
5373
5374 have_data = true;
5375 *prividx = 0;
5376 }
5377
5378 if (*prividx <= attr_id_l3_stats &&
5379 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5380 unsigned int size_l3;
5381 struct nlattr *attr;
5382
5383 *prividx = attr_id_l3_stats;
5384
5385 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5386 if (!size_l3)
5387 goto skip_l3_stats;
5388 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5389 IFLA_OFFLOAD_XSTATS_UNSPEC);
5390 if (!attr)
5391 return -EMSGSIZE;
5392
5393 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5394 nla_data(attr), extack);
5395 if (err)
5396 return err;
5397
5398 have_data = true;
5399skip_l3_stats:
5400 *prividx = 0;
5401 }
5402
5403 if (!have_data)
5404 return -ENODATA;
5405
5406 *prividx = 0;
5407 return 0;
5408}
5409
5410static unsigned int
5411rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5412 enum netdev_offload_xstats_type type)
5413{
5414 bool enabled = netdev_offload_xstats_enabled(dev, type);
5415
5416 return nla_total_size(0) +
5417 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5418 nla_total_size(sizeof(u8)) +
5419 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5420 (enabled ? nla_total_size(sizeof(u8)) : 0) +
5421 0;
5422}
5423
5424static unsigned int
5425rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5426{
5427 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5428
5429 return nla_total_size(0) +
5430 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5431 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5432 0;
5433}
5434
5435static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5436 u32 off_filter_mask)
5437{
5438 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5439 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5440 int nla_size = 0;
5441 int size;
5442
5443 if (off_filter_mask &
5444 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5445 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5446 nla_size += nla_total_size_64bit(size);
5447 }
5448
5449 if (off_filter_mask &
5450 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5451 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5452
5453 if (off_filter_mask &
5454 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5455 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5456 nla_size += nla_total_size_64bit(size);
5457 }
5458
5459 if (nla_size != 0)
5460 nla_size += nla_total_size(0);
5461
5462 return nla_size;
5463}
5464
5465struct rtnl_stats_dump_filters {
5466 /* mask[0] filters outer attributes. Then individual nests have their
5467 * filtering mask at the index of the nested attribute.
5468 */
5469 u32 mask[IFLA_STATS_MAX + 1];
5470};
5471
5472static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5473 int type, u32 pid, u32 seq, u32 change,
5474 unsigned int flags,
5475 const struct rtnl_stats_dump_filters *filters,
5476 int *idxattr, int *prividx,
5477 struct netlink_ext_ack *extack)
5478{
5479 unsigned int filter_mask = filters->mask[0];
5480 struct if_stats_msg *ifsm;
5481 struct nlmsghdr *nlh;
5482 struct nlattr *attr;
5483 int s_prividx = *prividx;
5484 int err;
5485
5486 ASSERT_RTNL();
5487
5488 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5489 if (!nlh)
5490 return -EMSGSIZE;
5491
5492 ifsm = nlmsg_data(nlh);
5493 ifsm->family = PF_UNSPEC;
5494 ifsm->pad1 = 0;
5495 ifsm->pad2 = 0;
5496 ifsm->ifindex = dev->ifindex;
5497 ifsm->filter_mask = filter_mask;
5498
5499 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5500 struct rtnl_link_stats64 *sp;
5501
5502 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5503 sizeof(struct rtnl_link_stats64),
5504 IFLA_STATS_UNSPEC);
5505 if (!attr) {
5506 err = -EMSGSIZE;
5507 goto nla_put_failure;
5508 }
5509
5510 sp = nla_data(attr);
5511 dev_get_stats(dev, sp);
5512 }
5513
5514 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5515 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5516
5517 if (ops && ops->fill_linkxstats) {
5518 *idxattr = IFLA_STATS_LINK_XSTATS;
5519 attr = nla_nest_start_noflag(skb,
5520 IFLA_STATS_LINK_XSTATS);
5521 if (!attr) {
5522 err = -EMSGSIZE;
5523 goto nla_put_failure;
5524 }
5525
5526 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5527 nla_nest_end(skb, attr);
5528 if (err)
5529 goto nla_put_failure;
5530 *idxattr = 0;
5531 }
5532 }
5533
5534 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5535 *idxattr)) {
5536 const struct rtnl_link_ops *ops = NULL;
5537 const struct net_device *master;
5538
5539 master = netdev_master_upper_dev_get(dev);
5540 if (master)
5541 ops = master->rtnl_link_ops;
5542 if (ops && ops->fill_linkxstats) {
5543 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5544 attr = nla_nest_start_noflag(skb,
5545 IFLA_STATS_LINK_XSTATS_SLAVE);
5546 if (!attr) {
5547 err = -EMSGSIZE;
5548 goto nla_put_failure;
5549 }
5550
5551 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5552 nla_nest_end(skb, attr);
5553 if (err)
5554 goto nla_put_failure;
5555 *idxattr = 0;
5556 }
5557 }
5558
5559 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5560 *idxattr)) {
5561 u32 off_filter_mask;
5562
5563 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5564 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5565 attr = nla_nest_start_noflag(skb,
5566 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5567 if (!attr) {
5568 err = -EMSGSIZE;
5569 goto nla_put_failure;
5570 }
5571
5572 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5573 off_filter_mask, extack);
5574 if (err == -ENODATA)
5575 nla_nest_cancel(skb, attr);
5576 else
5577 nla_nest_end(skb, attr);
5578
5579 if (err && err != -ENODATA)
5580 goto nla_put_failure;
5581 *idxattr = 0;
5582 }
5583
5584 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5585 struct rtnl_af_ops *af_ops;
5586
5587 *idxattr = IFLA_STATS_AF_SPEC;
5588 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5589 if (!attr) {
5590 err = -EMSGSIZE;
5591 goto nla_put_failure;
5592 }
5593
5594 rcu_read_lock();
5595 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5596 if (af_ops->fill_stats_af) {
5597 struct nlattr *af;
5598
5599 af = nla_nest_start_noflag(skb,
5600 af_ops->family);
5601 if (!af) {
5602 rcu_read_unlock();
5603 err = -EMSGSIZE;
5604 goto nla_put_failure;
5605 }
5606 err = af_ops->fill_stats_af(skb, dev);
5607
5608 if (err == -ENODATA) {
5609 nla_nest_cancel(skb, af);
5610 } else if (err < 0) {
5611 rcu_read_unlock();
5612 goto nla_put_failure;
5613 }
5614
5615 nla_nest_end(skb, af);
5616 }
5617 }
5618 rcu_read_unlock();
5619
5620 nla_nest_end(skb, attr);
5621
5622 *idxattr = 0;
5623 }
5624
5625 nlmsg_end(skb, nlh);
5626
5627 return 0;
5628
5629nla_put_failure:
5630 /* not a multi message or no progress mean a real error */
5631 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5632 nlmsg_cancel(skb, nlh);
5633 else
5634 nlmsg_end(skb, nlh);
5635
5636 return err;
5637}
5638
5639static size_t if_nlmsg_stats_size(const struct net_device *dev,
5640 const struct rtnl_stats_dump_filters *filters)
5641{
5642 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5643 unsigned int filter_mask = filters->mask[0];
5644
5645 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5646 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5647
5648 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5649 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5650 int attr = IFLA_STATS_LINK_XSTATS;
5651
5652 if (ops && ops->get_linkxstats_size) {
5653 size += nla_total_size(ops->get_linkxstats_size(dev,
5654 attr));
5655 /* for IFLA_STATS_LINK_XSTATS */
5656 size += nla_total_size(0);
5657 }
5658 }
5659
5660 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5661 struct net_device *_dev = (struct net_device *)dev;
5662 const struct rtnl_link_ops *ops = NULL;
5663 const struct net_device *master;
5664
5665 /* netdev_master_upper_dev_get can't take const */
5666 master = netdev_master_upper_dev_get(_dev);
5667 if (master)
5668 ops = master->rtnl_link_ops;
5669 if (ops && ops->get_linkxstats_size) {
5670 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5671
5672 size += nla_total_size(ops->get_linkxstats_size(dev,
5673 attr));
5674 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5675 size += nla_total_size(0);
5676 }
5677 }
5678
5679 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5680 u32 off_filter_mask;
5681
5682 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5683 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5684 }
5685
5686 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5687 struct rtnl_af_ops *af_ops;
5688
5689 /* for IFLA_STATS_AF_SPEC */
5690 size += nla_total_size(0);
5691
5692 rcu_read_lock();
5693 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5694 if (af_ops->get_stats_af_size) {
5695 size += nla_total_size(
5696 af_ops->get_stats_af_size(dev));
5697
5698 /* for AF_* */
5699 size += nla_total_size(0);
5700 }
5701 }
5702 rcu_read_unlock();
5703 }
5704
5705 return size;
5706}
5707
5708#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5709
5710static const struct nla_policy
5711rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5712 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5713 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5714};
5715
5716static const struct nla_policy
5717rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5718 [IFLA_STATS_GET_FILTERS] =
5719 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5720};
5721
5722static const struct nla_policy
5723ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5724 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5725};
5726
5727static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5728 struct rtnl_stats_dump_filters *filters,
5729 struct netlink_ext_ack *extack)
5730{
5731 struct nlattr *tb[IFLA_STATS_MAX + 1];
5732 int err;
5733 int at;
5734
5735 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5736 rtnl_stats_get_policy_filters, extack);
5737 if (err < 0)
5738 return err;
5739
5740 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5741 if (tb[at]) {
5742 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5743 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5744 return -EINVAL;
5745 }
5746 filters->mask[at] = nla_get_u32(tb[at]);
5747 }
5748 }
5749
5750 return 0;
5751}
5752
5753static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5754 u32 filter_mask,
5755 struct rtnl_stats_dump_filters *filters,
5756 struct netlink_ext_ack *extack)
5757{
5758 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5759 int err;
5760 int i;
5761
5762 filters->mask[0] = filter_mask;
5763 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5764 filters->mask[i] = -1U;
5765
5766 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5767 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5768 if (err < 0)
5769 return err;
5770
5771 if (tb[IFLA_STATS_GET_FILTERS]) {
5772 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5773 filters, extack);
5774 if (err)
5775 return err;
5776 }
5777
5778 return 0;
5779}
5780
5781static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5782 bool is_dump, struct netlink_ext_ack *extack)
5783{
5784 struct if_stats_msg *ifsm;
5785
5786 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5787 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5788 return -EINVAL;
5789 }
5790
5791 if (!strict_check)
5792 return 0;
5793
5794 ifsm = nlmsg_data(nlh);
5795
5796 /* only requests using strict checks can pass data to influence
5797 * the dump. The legacy exception is filter_mask.
5798 */
5799 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5800 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5801 return -EINVAL;
5802 }
5803 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5804 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5805 return -EINVAL;
5806 }
5807
5808 return 0;
5809}
5810
5811static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5812 struct netlink_ext_ack *extack)
5813{
5814 struct rtnl_stats_dump_filters filters;
5815 struct net *net = sock_net(skb->sk);
5816 struct net_device *dev = NULL;
5817 int idxattr = 0, prividx = 0;
5818 struct if_stats_msg *ifsm;
5819 struct sk_buff *nskb;
5820 int err;
5821
5822 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5823 false, extack);
5824 if (err)
5825 return err;
5826
5827 ifsm = nlmsg_data(nlh);
5828 if (ifsm->ifindex > 0)
5829 dev = __dev_get_by_index(net, ifsm->ifindex);
5830 else
5831 return -EINVAL;
5832
5833 if (!dev)
5834 return -ENODEV;
5835
5836 if (!ifsm->filter_mask) {
5837 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5838 return -EINVAL;
5839 }
5840
5841 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5842 if (err)
5843 return err;
5844
5845 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5846 if (!nskb)
5847 return -ENOBUFS;
5848
5849 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5850 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5851 0, &filters, &idxattr, &prividx, extack);
5852 if (err < 0) {
5853 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5854 WARN_ON(err == -EMSGSIZE);
5855 kfree_skb(nskb);
5856 } else {
5857 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5858 }
5859
5860 return err;
5861}
5862
5863static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5864{
5865 struct netlink_ext_ack *extack = cb->extack;
5866 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5867 struct rtnl_stats_dump_filters filters;
5868 struct net *net = sock_net(skb->sk);
5869 unsigned int flags = NLM_F_MULTI;
5870 struct if_stats_msg *ifsm;
5871 struct hlist_head *head;
5872 struct net_device *dev;
5873 int idx = 0;
5874
5875 s_h = cb->args[0];
5876 s_idx = cb->args[1];
5877 s_idxattr = cb->args[2];
5878 s_prividx = cb->args[3];
5879
5880 cb->seq = net->dev_base_seq;
5881
5882 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5883 if (err)
5884 return err;
5885
5886 ifsm = nlmsg_data(cb->nlh);
5887 if (!ifsm->filter_mask) {
5888 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5889 return -EINVAL;
5890 }
5891
5892 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5893 extack);
5894 if (err)
5895 return err;
5896
5897 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5898 idx = 0;
5899 head = &net->dev_index_head[h];
5900 hlist_for_each_entry(dev, head, index_hlist) {
5901 if (idx < s_idx)
5902 goto cont;
5903 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5904 NETLINK_CB(cb->skb).portid,
5905 cb->nlh->nlmsg_seq, 0,
5906 flags, &filters,
5907 &s_idxattr, &s_prividx,
5908 extack);
5909 /* If we ran out of room on the first message,
5910 * we're in trouble
5911 */
5912 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5913
5914 if (err < 0)
5915 goto out;
5916 s_prividx = 0;
5917 s_idxattr = 0;
5918 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5919cont:
5920 idx++;
5921 }
5922 }
5923out:
5924 cb->args[3] = s_prividx;
5925 cb->args[2] = s_idxattr;
5926 cb->args[1] = idx;
5927 cb->args[0] = h;
5928
5929 return skb->len;
5930}
5931
5932void rtnl_offload_xstats_notify(struct net_device *dev)
5933{
5934 struct rtnl_stats_dump_filters response_filters = {};
5935 struct net *net = dev_net(dev);
5936 int idxattr = 0, prividx = 0;
5937 struct sk_buff *skb;
5938 int err = -ENOBUFS;
5939
5940 ASSERT_RTNL();
5941
5942 response_filters.mask[0] |=
5943 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5944 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5945 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5946
5947 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5948 GFP_KERNEL);
5949 if (!skb)
5950 goto errout;
5951
5952 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5953 &response_filters, &idxattr, &prividx, NULL);
5954 if (err < 0) {
5955 kfree_skb(skb);
5956 goto errout;
5957 }
5958
5959 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
5960 return;
5961
5962errout:
5963 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
5964}
5965EXPORT_SYMBOL(rtnl_offload_xstats_notify);
5966
5967static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
5968 struct netlink_ext_ack *extack)
5969{
5970 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5971 struct rtnl_stats_dump_filters response_filters = {};
5972 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5973 struct net *net = sock_net(skb->sk);
5974 struct net_device *dev = NULL;
5975 struct if_stats_msg *ifsm;
5976 bool notify = false;
5977 int err;
5978
5979 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5980 false, extack);
5981 if (err)
5982 return err;
5983
5984 ifsm = nlmsg_data(nlh);
5985 if (ifsm->family != AF_UNSPEC) {
5986 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
5987 return -EINVAL;
5988 }
5989
5990 if (ifsm->ifindex > 0)
5991 dev = __dev_get_by_index(net, ifsm->ifindex);
5992 else
5993 return -EINVAL;
5994
5995 if (!dev)
5996 return -ENODEV;
5997
5998 if (ifsm->filter_mask) {
5999 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6000 return -EINVAL;
6001 }
6002
6003 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6004 ifla_stats_set_policy, extack);
6005 if (err < 0)
6006 return err;
6007
6008 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6009 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6010
6011 if (req)
6012 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6013 else
6014 err = netdev_offload_xstats_disable(dev, t_l3);
6015
6016 if (!err)
6017 notify = true;
6018 else if (err != -EALREADY)
6019 return err;
6020
6021 response_filters.mask[0] |=
6022 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6023 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6024 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6025 }
6026
6027 if (notify)
6028 rtnl_offload_xstats_notify(dev);
6029
6030 return 0;
6031}
6032
6033/* Process one rtnetlink message. */
6034
6035static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6036 struct netlink_ext_ack *extack)
6037{
6038 struct net *net = sock_net(skb->sk);
6039 struct rtnl_link *link;
6040 enum rtnl_kinds kind;
6041 struct module *owner;
6042 int err = -EOPNOTSUPP;
6043 rtnl_doit_func doit;
6044 unsigned int flags;
6045 int family;
6046 int type;
6047
6048 type = nlh->nlmsg_type;
6049 if (type > RTM_MAX)
6050 return -EOPNOTSUPP;
6051
6052 type -= RTM_BASE;
6053
6054 /* All the messages must have at least 1 byte length */
6055 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6056 return 0;
6057
6058 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6059 kind = rtnl_msgtype_kind(type);
6060
6061 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6062 return -EPERM;
6063
6064 rcu_read_lock();
6065 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6066 struct sock *rtnl;
6067 rtnl_dumpit_func dumpit;
6068 u32 min_dump_alloc = 0;
6069
6070 link = rtnl_get_link(family, type);
6071 if (!link || !link->dumpit) {
6072 family = PF_UNSPEC;
6073 link = rtnl_get_link(family, type);
6074 if (!link || !link->dumpit)
6075 goto err_unlock;
6076 }
6077 owner = link->owner;
6078 dumpit = link->dumpit;
6079
6080 if (type == RTM_GETLINK - RTM_BASE)
6081 min_dump_alloc = rtnl_calcit(skb, nlh);
6082
6083 err = 0;
6084 /* need to do this before rcu_read_unlock() */
6085 if (!try_module_get(owner))
6086 err = -EPROTONOSUPPORT;
6087
6088 rcu_read_unlock();
6089
6090 rtnl = net->rtnl;
6091 if (err == 0) {
6092 struct netlink_dump_control c = {
6093 .dump = dumpit,
6094 .min_dump_alloc = min_dump_alloc,
6095 .module = owner,
6096 };
6097 err = netlink_dump_start(rtnl, skb, nlh, &c);
6098 /* netlink_dump_start() will keep a reference on
6099 * module if dump is still in progress.
6100 */
6101 module_put(owner);
6102 }
6103 return err;
6104 }
6105
6106 link = rtnl_get_link(family, type);
6107 if (!link || !link->doit) {
6108 family = PF_UNSPEC;
6109 link = rtnl_get_link(PF_UNSPEC, type);
6110 if (!link || !link->doit)
6111 goto out_unlock;
6112 }
6113
6114 owner = link->owner;
6115 if (!try_module_get(owner)) {
6116 err = -EPROTONOSUPPORT;
6117 goto out_unlock;
6118 }
6119
6120 flags = link->flags;
6121 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6122 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6123 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6124 module_put(owner);
6125 goto err_unlock;
6126 }
6127
6128 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6129 doit = link->doit;
6130 rcu_read_unlock();
6131 if (doit)
6132 err = doit(skb, nlh, extack);
6133 module_put(owner);
6134 return err;
6135 }
6136 rcu_read_unlock();
6137
6138 rtnl_lock();
6139 link = rtnl_get_link(family, type);
6140 if (link && link->doit)
6141 err = link->doit(skb, nlh, extack);
6142 rtnl_unlock();
6143
6144 module_put(owner);
6145
6146 return err;
6147
6148out_unlock:
6149 rcu_read_unlock();
6150 return err;
6151
6152err_unlock:
6153 rcu_read_unlock();
6154 return -EOPNOTSUPP;
6155}
6156
6157static void rtnetlink_rcv(struct sk_buff *skb)
6158{
6159 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6160}
6161
6162static int rtnetlink_bind(struct net *net, int group)
6163{
6164 switch (group) {
6165 case RTNLGRP_IPV4_MROUTE_R:
6166 case RTNLGRP_IPV6_MROUTE_R:
6167 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6168 return -EPERM;
6169 break;
6170 }
6171 return 0;
6172}
6173
6174static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6175{
6176 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6177
6178 switch (event) {
6179 case NETDEV_REBOOT:
6180 case NETDEV_CHANGEMTU:
6181 case NETDEV_CHANGEADDR:
6182 case NETDEV_CHANGENAME:
6183 case NETDEV_FEAT_CHANGE:
6184 case NETDEV_BONDING_FAILOVER:
6185 case NETDEV_POST_TYPE_CHANGE:
6186 case NETDEV_NOTIFY_PEERS:
6187 case NETDEV_CHANGEUPPER:
6188 case NETDEV_RESEND_IGMP:
6189 case NETDEV_CHANGEINFODATA:
6190 case NETDEV_CHANGELOWERSTATE:
6191 case NETDEV_CHANGE_TX_QUEUE_LEN:
6192 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6193 GFP_KERNEL, NULL, 0, 0, NULL);
6194 break;
6195 default:
6196 break;
6197 }
6198 return NOTIFY_DONE;
6199}
6200
6201static struct notifier_block rtnetlink_dev_notifier = {
6202 .notifier_call = rtnetlink_event,
6203};
6204
6205
6206static int __net_init rtnetlink_net_init(struct net *net)
6207{
6208 struct sock *sk;
6209 struct netlink_kernel_cfg cfg = {
6210 .groups = RTNLGRP_MAX,
6211 .input = rtnetlink_rcv,
6212 .cb_mutex = &rtnl_mutex,
6213 .flags = NL_CFG_F_NONROOT_RECV,
6214 .bind = rtnetlink_bind,
6215 };
6216
6217 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6218 if (!sk)
6219 return -ENOMEM;
6220 net->rtnl = sk;
6221 return 0;
6222}
6223
6224static void __net_exit rtnetlink_net_exit(struct net *net)
6225{
6226 netlink_kernel_release(net->rtnl);
6227 net->rtnl = NULL;
6228}
6229
6230static struct pernet_operations rtnetlink_net_ops = {
6231 .init = rtnetlink_net_init,
6232 .exit = rtnetlink_net_exit,
6233};
6234
6235void __init rtnetlink_init(void)
6236{
6237 if (register_pernet_subsys(&rtnetlink_net_ops))
6238 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6239
6240 register_netdevice_notifier(&rtnetlink_dev_notifier);
6241
6242 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6243 rtnl_dump_ifinfo, 0);
6244 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6245 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6246 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6247
6248 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6249 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6250 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6251
6252 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6253 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6254
6255 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6256 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6257 RTNL_FLAG_BULK_DEL_SUPPORTED);
6258 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6259
6260 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6261 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6262 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6263
6264 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6265 0);
6266 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6267}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
13 */
14
15#include <linux/bitops.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
21#include <linux/timer.h>
22#include <linux/string.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/fcntl.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/capability.h>
30#include <linux/skbuff.h>
31#include <linux/init.h>
32#include <linux/security.h>
33#include <linux/mutex.h>
34#include <linux/if_addr.h>
35#include <linux/if_bridge.h>
36#include <linux/if_vlan.h>
37#include <linux/pci.h>
38#include <linux/etherdevice.h>
39#include <linux/bpf.h>
40
41#include <linux/uaccess.h>
42
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/arp.h>
48#include <net/route.h>
49#include <net/udp.h>
50#include <net/tcp.h>
51#include <net/sock.h>
52#include <net/pkt_sched.h>
53#include <net/fib_rules.h>
54#include <net/rtnetlink.h>
55#include <net/net_namespace.h>
56#include <net/devlink.h>
57#if IS_ENABLED(CONFIG_IPV6)
58#include <net/addrconf.h>
59#endif
60#include <linux/dpll.h>
61
62#include "dev.h"
63
64#define RTNL_MAX_TYPE 50
65#define RTNL_SLAVE_MAX_TYPE 44
66
67struct rtnl_link {
68 rtnl_doit_func doit;
69 rtnl_dumpit_func dumpit;
70 struct module *owner;
71 unsigned int flags;
72 struct rcu_head rcu;
73};
74
75static DEFINE_MUTEX(rtnl_mutex);
76
77void rtnl_lock(void)
78{
79 mutex_lock(&rtnl_mutex);
80}
81EXPORT_SYMBOL(rtnl_lock);
82
83int rtnl_lock_killable(void)
84{
85 return mutex_lock_killable(&rtnl_mutex);
86}
87EXPORT_SYMBOL(rtnl_lock_killable);
88
89static struct sk_buff *defer_kfree_skb_list;
90void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
91{
92 if (head && tail) {
93 tail->next = defer_kfree_skb_list;
94 defer_kfree_skb_list = head;
95 }
96}
97EXPORT_SYMBOL(rtnl_kfree_skbs);
98
99void __rtnl_unlock(void)
100{
101 struct sk_buff *head = defer_kfree_skb_list;
102
103 defer_kfree_skb_list = NULL;
104
105 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
106 * is used. In some places, e.g. in cfg80211, we have code that will do
107 * something like
108 * rtnl_lock()
109 * wiphy_lock()
110 * ...
111 * rtnl_unlock()
112 *
113 * and because netdev_run_todo() acquires the RTNL for items on the list
114 * we could cause a situation such as this:
115 * Thread 1 Thread 2
116 * rtnl_lock()
117 * unregister_netdevice()
118 * __rtnl_unlock()
119 * rtnl_lock()
120 * wiphy_lock()
121 * rtnl_unlock()
122 * netdev_run_todo()
123 * __rtnl_unlock()
124 *
125 * // list not empty now
126 * // because of thread 2
127 * rtnl_lock()
128 * while (!list_empty(...))
129 * rtnl_lock()
130 * wiphy_lock()
131 * **** DEADLOCK ****
132 *
133 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
134 * it's not used in cases where something is added to do the list.
135 */
136 WARN_ON(!list_empty(&net_todo_list));
137
138 mutex_unlock(&rtnl_mutex);
139
140 while (head) {
141 struct sk_buff *next = head->next;
142
143 kfree_skb(head);
144 cond_resched();
145 head = next;
146 }
147}
148
149void rtnl_unlock(void)
150{
151 /* This fellow will unlock it for us. */
152 netdev_run_todo();
153}
154EXPORT_SYMBOL(rtnl_unlock);
155
156int rtnl_trylock(void)
157{
158 return mutex_trylock(&rtnl_mutex);
159}
160EXPORT_SYMBOL(rtnl_trylock);
161
162int rtnl_is_locked(void)
163{
164 return mutex_is_locked(&rtnl_mutex);
165}
166EXPORT_SYMBOL(rtnl_is_locked);
167
168bool refcount_dec_and_rtnl_lock(refcount_t *r)
169{
170 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
171}
172EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
173
174#ifdef CONFIG_PROVE_LOCKING
175bool lockdep_rtnl_is_held(void)
176{
177 return lockdep_is_held(&rtnl_mutex);
178}
179EXPORT_SYMBOL(lockdep_rtnl_is_held);
180#endif /* #ifdef CONFIG_PROVE_LOCKING */
181
182static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
183
184static inline int rtm_msgindex(int msgtype)
185{
186 int msgindex = msgtype - RTM_BASE;
187
188 /*
189 * msgindex < 0 implies someone tried to register a netlink
190 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
191 * the message type has not been added to linux/rtnetlink.h
192 */
193 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
194
195 return msgindex;
196}
197
198static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
199{
200 struct rtnl_link __rcu **tab;
201
202 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
203 protocol = PF_UNSPEC;
204
205 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
206 if (!tab)
207 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
208
209 return rcu_dereference_rtnl(tab[msgtype]);
210}
211
212static int rtnl_register_internal(struct module *owner,
213 int protocol, int msgtype,
214 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
215 unsigned int flags)
216{
217 struct rtnl_link *link, *old;
218 struct rtnl_link __rcu **tab;
219 int msgindex;
220 int ret = -ENOBUFS;
221
222 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
223 msgindex = rtm_msgindex(msgtype);
224
225 rtnl_lock();
226 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
227 if (tab == NULL) {
228 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
229 if (!tab)
230 goto unlock;
231
232 /* ensures we see the 0 stores */
233 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
234 }
235
236 old = rtnl_dereference(tab[msgindex]);
237 if (old) {
238 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
239 if (!link)
240 goto unlock;
241 } else {
242 link = kzalloc(sizeof(*link), GFP_KERNEL);
243 if (!link)
244 goto unlock;
245 }
246
247 WARN_ON(link->owner && link->owner != owner);
248 link->owner = owner;
249
250 WARN_ON(doit && link->doit && link->doit != doit);
251 if (doit)
252 link->doit = doit;
253 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
254 if (dumpit)
255 link->dumpit = dumpit;
256
257 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
258 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
259 link->flags |= flags;
260
261 /* publish protocol:msgtype */
262 rcu_assign_pointer(tab[msgindex], link);
263 ret = 0;
264 if (old)
265 kfree_rcu(old, rcu);
266unlock:
267 rtnl_unlock();
268 return ret;
269}
270
271/**
272 * rtnl_register_module - Register a rtnetlink message type
273 *
274 * @owner: module registering the hook (THIS_MODULE)
275 * @protocol: Protocol family or PF_UNSPEC
276 * @msgtype: rtnetlink message type
277 * @doit: Function pointer called for each request message
278 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
279 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
280 *
281 * Like rtnl_register, but for use by removable modules.
282 */
283int rtnl_register_module(struct module *owner,
284 int protocol, int msgtype,
285 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
286 unsigned int flags)
287{
288 return rtnl_register_internal(owner, protocol, msgtype,
289 doit, dumpit, flags);
290}
291EXPORT_SYMBOL_GPL(rtnl_register_module);
292
293/**
294 * rtnl_register - Register a rtnetlink message type
295 * @protocol: Protocol family or PF_UNSPEC
296 * @msgtype: rtnetlink message type
297 * @doit: Function pointer called for each request message
298 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
299 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
300 *
301 * Registers the specified function pointers (at least one of them has
302 * to be non-NULL) to be called whenever a request message for the
303 * specified protocol family and message type is received.
304 *
305 * The special protocol family PF_UNSPEC may be used to define fallback
306 * function pointers for the case when no entry for the specific protocol
307 * family exists.
308 */
309void rtnl_register(int protocol, int msgtype,
310 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
311 unsigned int flags)
312{
313 int err;
314
315 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
316 flags);
317 if (err)
318 pr_err("Unable to register rtnetlink message handler, "
319 "protocol = %d, message type = %d\n", protocol, msgtype);
320}
321
322/**
323 * rtnl_unregister - Unregister a rtnetlink message type
324 * @protocol: Protocol family or PF_UNSPEC
325 * @msgtype: rtnetlink message type
326 *
327 * Returns 0 on success or a negative error code.
328 */
329int rtnl_unregister(int protocol, int msgtype)
330{
331 struct rtnl_link __rcu **tab;
332 struct rtnl_link *link;
333 int msgindex;
334
335 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
336 msgindex = rtm_msgindex(msgtype);
337
338 rtnl_lock();
339 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
340 if (!tab) {
341 rtnl_unlock();
342 return -ENOENT;
343 }
344
345 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
346 rtnl_unlock();
347
348 kfree_rcu(link, rcu);
349
350 return 0;
351}
352EXPORT_SYMBOL_GPL(rtnl_unregister);
353
354/**
355 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
356 * @protocol : Protocol family or PF_UNSPEC
357 *
358 * Identical to calling rtnl_unregster() for all registered message types
359 * of a certain protocol family.
360 */
361void rtnl_unregister_all(int protocol)
362{
363 struct rtnl_link __rcu **tab;
364 struct rtnl_link *link;
365 int msgindex;
366
367 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
368
369 rtnl_lock();
370 tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL);
371 if (!tab) {
372 rtnl_unlock();
373 return;
374 }
375 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
376 link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
377 kfree_rcu(link, rcu);
378 }
379 rtnl_unlock();
380
381 synchronize_net();
382
383 kfree(tab);
384}
385EXPORT_SYMBOL_GPL(rtnl_unregister_all);
386
387static LIST_HEAD(link_ops);
388
389static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
390{
391 const struct rtnl_link_ops *ops;
392
393 list_for_each_entry(ops, &link_ops, list) {
394 if (!strcmp(ops->kind, kind))
395 return ops;
396 }
397 return NULL;
398}
399
400/**
401 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
402 * @ops: struct rtnl_link_ops * to register
403 *
404 * The caller must hold the rtnl_mutex. This function should be used
405 * by drivers that create devices during module initialization. It
406 * must be called before registering the devices.
407 *
408 * Returns 0 on success or a negative error code.
409 */
410int __rtnl_link_register(struct rtnl_link_ops *ops)
411{
412 if (rtnl_link_ops_get(ops->kind))
413 return -EEXIST;
414
415 /* The check for alloc/setup is here because if ops
416 * does not have that filled up, it is not possible
417 * to use the ops for creating device. So do not
418 * fill up dellink as well. That disables rtnl_dellink.
419 */
420 if ((ops->alloc || ops->setup) && !ops->dellink)
421 ops->dellink = unregister_netdevice_queue;
422
423 list_add_tail(&ops->list, &link_ops);
424 return 0;
425}
426EXPORT_SYMBOL_GPL(__rtnl_link_register);
427
428/**
429 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
430 * @ops: struct rtnl_link_ops * to register
431 *
432 * Returns 0 on success or a negative error code.
433 */
434int rtnl_link_register(struct rtnl_link_ops *ops)
435{
436 int err;
437
438 /* Sanity-check max sizes to avoid stack buffer overflow. */
439 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
440 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
441 return -EINVAL;
442
443 rtnl_lock();
444 err = __rtnl_link_register(ops);
445 rtnl_unlock();
446 return err;
447}
448EXPORT_SYMBOL_GPL(rtnl_link_register);
449
450static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
451{
452 struct net_device *dev;
453 LIST_HEAD(list_kill);
454
455 for_each_netdev(net, dev) {
456 if (dev->rtnl_link_ops == ops)
457 ops->dellink(dev, &list_kill);
458 }
459 unregister_netdevice_many(&list_kill);
460}
461
462/**
463 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
464 * @ops: struct rtnl_link_ops * to unregister
465 *
466 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
467 * integrity (hold pernet_ops_rwsem for writing to close the race
468 * with setup_net() and cleanup_net()).
469 */
470void __rtnl_link_unregister(struct rtnl_link_ops *ops)
471{
472 struct net *net;
473
474 for_each_net(net) {
475 __rtnl_kill_links(net, ops);
476 }
477 list_del(&ops->list);
478}
479EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
480
481/* Return with the rtnl_lock held when there are no network
482 * devices unregistering in any network namespace.
483 */
484static void rtnl_lock_unregistering_all(void)
485{
486 DEFINE_WAIT_FUNC(wait, woken_wake_function);
487
488 add_wait_queue(&netdev_unregistering_wq, &wait);
489 for (;;) {
490 rtnl_lock();
491 /* We held write locked pernet_ops_rwsem, and parallel
492 * setup_net() and cleanup_net() are not possible.
493 */
494 if (!atomic_read(&dev_unreg_count))
495 break;
496 __rtnl_unlock();
497
498 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
499 }
500 remove_wait_queue(&netdev_unregistering_wq, &wait);
501}
502
503/**
504 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
505 * @ops: struct rtnl_link_ops * to unregister
506 */
507void rtnl_link_unregister(struct rtnl_link_ops *ops)
508{
509 /* Close the race with setup_net() and cleanup_net() */
510 down_write(&pernet_ops_rwsem);
511 rtnl_lock_unregistering_all();
512 __rtnl_link_unregister(ops);
513 rtnl_unlock();
514 up_write(&pernet_ops_rwsem);
515}
516EXPORT_SYMBOL_GPL(rtnl_link_unregister);
517
518static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
519{
520 struct net_device *master_dev;
521 const struct rtnl_link_ops *ops;
522 size_t size = 0;
523
524 rcu_read_lock();
525
526 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
527 if (!master_dev)
528 goto out;
529
530 ops = master_dev->rtnl_link_ops;
531 if (!ops || !ops->get_slave_size)
532 goto out;
533 /* IFLA_INFO_SLAVE_DATA + nested data */
534 size = nla_total_size(sizeof(struct nlattr)) +
535 ops->get_slave_size(master_dev, dev);
536
537out:
538 rcu_read_unlock();
539 return size;
540}
541
542static size_t rtnl_link_get_size(const struct net_device *dev)
543{
544 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
545 size_t size;
546
547 if (!ops)
548 return 0;
549
550 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
551 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
552
553 if (ops->get_size)
554 /* IFLA_INFO_DATA + nested data */
555 size += nla_total_size(sizeof(struct nlattr)) +
556 ops->get_size(dev);
557
558 if (ops->get_xstats_size)
559 /* IFLA_INFO_XSTATS */
560 size += nla_total_size(ops->get_xstats_size(dev));
561
562 size += rtnl_link_get_slave_info_data_size(dev);
563
564 return size;
565}
566
567static LIST_HEAD(rtnl_af_ops);
568
569static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
570{
571 const struct rtnl_af_ops *ops;
572
573 ASSERT_RTNL();
574
575 list_for_each_entry(ops, &rtnl_af_ops, list) {
576 if (ops->family == family)
577 return ops;
578 }
579
580 return NULL;
581}
582
583/**
584 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
585 * @ops: struct rtnl_af_ops * to register
586 *
587 * Returns 0 on success or a negative error code.
588 */
589void rtnl_af_register(struct rtnl_af_ops *ops)
590{
591 rtnl_lock();
592 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
593 rtnl_unlock();
594}
595EXPORT_SYMBOL_GPL(rtnl_af_register);
596
597/**
598 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
599 * @ops: struct rtnl_af_ops * to unregister
600 */
601void rtnl_af_unregister(struct rtnl_af_ops *ops)
602{
603 rtnl_lock();
604 list_del_rcu(&ops->list);
605 rtnl_unlock();
606
607 synchronize_rcu();
608}
609EXPORT_SYMBOL_GPL(rtnl_af_unregister);
610
611static size_t rtnl_link_get_af_size(const struct net_device *dev,
612 u32 ext_filter_mask)
613{
614 struct rtnl_af_ops *af_ops;
615 size_t size;
616
617 /* IFLA_AF_SPEC */
618 size = nla_total_size(sizeof(struct nlattr));
619
620 rcu_read_lock();
621 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
622 if (af_ops->get_link_af_size) {
623 /* AF_* + nested data */
624 size += nla_total_size(sizeof(struct nlattr)) +
625 af_ops->get_link_af_size(dev, ext_filter_mask);
626 }
627 }
628 rcu_read_unlock();
629
630 return size;
631}
632
633static bool rtnl_have_link_slave_info(const struct net_device *dev)
634{
635 struct net_device *master_dev;
636 bool ret = false;
637
638 rcu_read_lock();
639
640 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
641 if (master_dev && master_dev->rtnl_link_ops)
642 ret = true;
643 rcu_read_unlock();
644 return ret;
645}
646
647static int rtnl_link_slave_info_fill(struct sk_buff *skb,
648 const struct net_device *dev)
649{
650 struct net_device *master_dev;
651 const struct rtnl_link_ops *ops;
652 struct nlattr *slave_data;
653 int err;
654
655 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
656 if (!master_dev)
657 return 0;
658 ops = master_dev->rtnl_link_ops;
659 if (!ops)
660 return 0;
661 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
662 return -EMSGSIZE;
663 if (ops->fill_slave_info) {
664 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
665 if (!slave_data)
666 return -EMSGSIZE;
667 err = ops->fill_slave_info(skb, master_dev, dev);
668 if (err < 0)
669 goto err_cancel_slave_data;
670 nla_nest_end(skb, slave_data);
671 }
672 return 0;
673
674err_cancel_slave_data:
675 nla_nest_cancel(skb, slave_data);
676 return err;
677}
678
679static int rtnl_link_info_fill(struct sk_buff *skb,
680 const struct net_device *dev)
681{
682 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
683 struct nlattr *data;
684 int err;
685
686 if (!ops)
687 return 0;
688 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
689 return -EMSGSIZE;
690 if (ops->fill_xstats) {
691 err = ops->fill_xstats(skb, dev);
692 if (err < 0)
693 return err;
694 }
695 if (ops->fill_info) {
696 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
697 if (data == NULL)
698 return -EMSGSIZE;
699 err = ops->fill_info(skb, dev);
700 if (err < 0)
701 goto err_cancel_data;
702 nla_nest_end(skb, data);
703 }
704 return 0;
705
706err_cancel_data:
707 nla_nest_cancel(skb, data);
708 return err;
709}
710
711static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
712{
713 struct nlattr *linkinfo;
714 int err = -EMSGSIZE;
715
716 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
717 if (linkinfo == NULL)
718 goto out;
719
720 err = rtnl_link_info_fill(skb, dev);
721 if (err < 0)
722 goto err_cancel_link;
723
724 err = rtnl_link_slave_info_fill(skb, dev);
725 if (err < 0)
726 goto err_cancel_link;
727
728 nla_nest_end(skb, linkinfo);
729 return 0;
730
731err_cancel_link:
732 nla_nest_cancel(skb, linkinfo);
733out:
734 return err;
735}
736
737int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
738{
739 struct sock *rtnl = net->rtnl;
740
741 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
742}
743
744int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
745{
746 struct sock *rtnl = net->rtnl;
747
748 return nlmsg_unicast(rtnl, skb, pid);
749}
750EXPORT_SYMBOL(rtnl_unicast);
751
752void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
753 const struct nlmsghdr *nlh, gfp_t flags)
754{
755 struct sock *rtnl = net->rtnl;
756
757 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
758}
759EXPORT_SYMBOL(rtnl_notify);
760
761void rtnl_set_sk_err(struct net *net, u32 group, int error)
762{
763 struct sock *rtnl = net->rtnl;
764
765 netlink_set_err(rtnl, 0, group, error);
766}
767EXPORT_SYMBOL(rtnl_set_sk_err);
768
769int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
770{
771 struct nlattr *mx;
772 int i, valid = 0;
773
774 /* nothing is dumped for dst_default_metrics, so just skip the loop */
775 if (metrics == dst_default_metrics.metrics)
776 return 0;
777
778 mx = nla_nest_start_noflag(skb, RTA_METRICS);
779 if (mx == NULL)
780 return -ENOBUFS;
781
782 for (i = 0; i < RTAX_MAX; i++) {
783 if (metrics[i]) {
784 if (i == RTAX_CC_ALGO - 1) {
785 char tmp[TCP_CA_NAME_MAX], *name;
786
787 name = tcp_ca_get_name_by_key(metrics[i], tmp);
788 if (!name)
789 continue;
790 if (nla_put_string(skb, i + 1, name))
791 goto nla_put_failure;
792 } else if (i == RTAX_FEATURES - 1) {
793 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
794
795 if (!user_features)
796 continue;
797 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
798 if (nla_put_u32(skb, i + 1, user_features))
799 goto nla_put_failure;
800 } else {
801 if (nla_put_u32(skb, i + 1, metrics[i]))
802 goto nla_put_failure;
803 }
804 valid++;
805 }
806 }
807
808 if (!valid) {
809 nla_nest_cancel(skb, mx);
810 return 0;
811 }
812
813 return nla_nest_end(skb, mx);
814
815nla_put_failure:
816 nla_nest_cancel(skb, mx);
817 return -EMSGSIZE;
818}
819EXPORT_SYMBOL(rtnetlink_put_metrics);
820
821int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
822 long expires, u32 error)
823{
824 struct rta_cacheinfo ci = {
825 .rta_error = error,
826 .rta_id = id,
827 };
828
829 if (dst) {
830 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
831 ci.rta_used = dst->__use;
832 ci.rta_clntref = rcuref_read(&dst->__rcuref);
833 }
834 if (expires) {
835 unsigned long clock;
836
837 clock = jiffies_to_clock_t(abs(expires));
838 clock = min_t(unsigned long, clock, INT_MAX);
839 ci.rta_expires = (expires > 0) ? clock : -clock;
840 }
841 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
842}
843EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
844
845void netdev_set_operstate(struct net_device *dev, int newstate)
846{
847 unsigned int old = READ_ONCE(dev->operstate);
848
849 do {
850 if (old == newstate)
851 return;
852 } while (!try_cmpxchg(&dev->operstate, &old, newstate));
853
854 netdev_state_change(dev);
855}
856EXPORT_SYMBOL(netdev_set_operstate);
857
858static void set_operstate(struct net_device *dev, unsigned char transition)
859{
860 unsigned char operstate = READ_ONCE(dev->operstate);
861
862 switch (transition) {
863 case IF_OPER_UP:
864 if ((operstate == IF_OPER_DORMANT ||
865 operstate == IF_OPER_TESTING ||
866 operstate == IF_OPER_UNKNOWN) &&
867 !netif_dormant(dev) && !netif_testing(dev))
868 operstate = IF_OPER_UP;
869 break;
870
871 case IF_OPER_TESTING:
872 if (netif_oper_up(dev))
873 operstate = IF_OPER_TESTING;
874 break;
875
876 case IF_OPER_DORMANT:
877 if (netif_oper_up(dev))
878 operstate = IF_OPER_DORMANT;
879 break;
880 }
881
882 netdev_set_operstate(dev, operstate);
883}
884
885static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
886{
887 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
888 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
889}
890
891static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
892 const struct ifinfomsg *ifm)
893{
894 unsigned int flags = ifm->ifi_flags;
895
896 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
897 if (ifm->ifi_change)
898 flags = (flags & ifm->ifi_change) |
899 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
900
901 return flags;
902}
903
904static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
905 const struct rtnl_link_stats64 *b)
906{
907 a->rx_packets = b->rx_packets;
908 a->tx_packets = b->tx_packets;
909 a->rx_bytes = b->rx_bytes;
910 a->tx_bytes = b->tx_bytes;
911 a->rx_errors = b->rx_errors;
912 a->tx_errors = b->tx_errors;
913 a->rx_dropped = b->rx_dropped;
914 a->tx_dropped = b->tx_dropped;
915
916 a->multicast = b->multicast;
917 a->collisions = b->collisions;
918
919 a->rx_length_errors = b->rx_length_errors;
920 a->rx_over_errors = b->rx_over_errors;
921 a->rx_crc_errors = b->rx_crc_errors;
922 a->rx_frame_errors = b->rx_frame_errors;
923 a->rx_fifo_errors = b->rx_fifo_errors;
924 a->rx_missed_errors = b->rx_missed_errors;
925
926 a->tx_aborted_errors = b->tx_aborted_errors;
927 a->tx_carrier_errors = b->tx_carrier_errors;
928 a->tx_fifo_errors = b->tx_fifo_errors;
929 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
930 a->tx_window_errors = b->tx_window_errors;
931
932 a->rx_compressed = b->rx_compressed;
933 a->tx_compressed = b->tx_compressed;
934
935 a->rx_nohandler = b->rx_nohandler;
936}
937
938/* All VF info */
939static inline int rtnl_vfinfo_size(const struct net_device *dev,
940 u32 ext_filter_mask)
941{
942 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
943 int num_vfs = dev_num_vf(dev->dev.parent);
944 size_t size = nla_total_size(0);
945 size += num_vfs *
946 (nla_total_size(0) +
947 nla_total_size(sizeof(struct ifla_vf_mac)) +
948 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
949 nla_total_size(sizeof(struct ifla_vf_vlan)) +
950 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
951 nla_total_size(MAX_VLAN_LIST_LEN *
952 sizeof(struct ifla_vf_vlan_info)) +
953 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
954 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
955 nla_total_size(sizeof(struct ifla_vf_rate)) +
956 nla_total_size(sizeof(struct ifla_vf_link_state)) +
957 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
958 nla_total_size(sizeof(struct ifla_vf_trust)));
959 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
960 size += num_vfs *
961 (nla_total_size(0) + /* nest IFLA_VF_STATS */
962 /* IFLA_VF_STATS_RX_PACKETS */
963 nla_total_size_64bit(sizeof(__u64)) +
964 /* IFLA_VF_STATS_TX_PACKETS */
965 nla_total_size_64bit(sizeof(__u64)) +
966 /* IFLA_VF_STATS_RX_BYTES */
967 nla_total_size_64bit(sizeof(__u64)) +
968 /* IFLA_VF_STATS_TX_BYTES */
969 nla_total_size_64bit(sizeof(__u64)) +
970 /* IFLA_VF_STATS_BROADCAST */
971 nla_total_size_64bit(sizeof(__u64)) +
972 /* IFLA_VF_STATS_MULTICAST */
973 nla_total_size_64bit(sizeof(__u64)) +
974 /* IFLA_VF_STATS_RX_DROPPED */
975 nla_total_size_64bit(sizeof(__u64)) +
976 /* IFLA_VF_STATS_TX_DROPPED */
977 nla_total_size_64bit(sizeof(__u64)));
978 }
979 return size;
980 } else
981 return 0;
982}
983
984static size_t rtnl_port_size(const struct net_device *dev,
985 u32 ext_filter_mask)
986{
987 size_t port_size = nla_total_size(4) /* PORT_VF */
988 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
989 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
990 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
991 + nla_total_size(1) /* PROT_VDP_REQUEST */
992 + nla_total_size(2); /* PORT_VDP_RESPONSE */
993 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
994 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
995 + port_size;
996 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
997 + port_size;
998
999 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000 !(ext_filter_mask & RTEXT_FILTER_VF))
1001 return 0;
1002 if (dev_num_vf(dev->dev.parent))
1003 return port_self_size + vf_ports_size +
1004 vf_port_size * dev_num_vf(dev->dev.parent);
1005 else
1006 return port_self_size;
1007}
1008
1009static size_t rtnl_xdp_size(void)
1010{
1011 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1012 nla_total_size(1) + /* XDP_ATTACHED */
1013 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1014 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1015
1016 return xdp_size;
1017}
1018
1019static size_t rtnl_prop_list_size(const struct net_device *dev)
1020{
1021 struct netdev_name_node *name_node;
1022 unsigned int cnt = 0;
1023
1024 rcu_read_lock();
1025 list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
1026 cnt++;
1027 rcu_read_unlock();
1028
1029 if (!cnt)
1030 return 0;
1031
1032 return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
1033}
1034
1035static size_t rtnl_proto_down_size(const struct net_device *dev)
1036{
1037 size_t size = nla_total_size(1);
1038
1039 if (dev->proto_down_reason)
1040 size += nla_total_size(0) + nla_total_size(4);
1041
1042 return size;
1043}
1044
1045static size_t rtnl_devlink_port_size(const struct net_device *dev)
1046{
1047 size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1048
1049 if (dev->devlink_port)
1050 size += devlink_nl_port_handle_size(dev->devlink_port);
1051
1052 return size;
1053}
1054
1055static size_t rtnl_dpll_pin_size(const struct net_device *dev)
1056{
1057 size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
1058
1059 size += dpll_netdev_pin_handle_size(dev);
1060
1061 return size;
1062}
1063
1064static noinline size_t if_nlmsg_size(const struct net_device *dev,
1065 u32 ext_filter_mask)
1066{
1067 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1068 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1069 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1070 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1071 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1072 + nla_total_size(sizeof(struct rtnl_link_stats))
1073 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1074 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1075 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1076 + nla_total_size(4) /* IFLA_TXQLEN */
1077 + nla_total_size(4) /* IFLA_WEIGHT */
1078 + nla_total_size(4) /* IFLA_MTU */
1079 + nla_total_size(4) /* IFLA_LINK */
1080 + nla_total_size(4) /* IFLA_MASTER */
1081 + nla_total_size(1) /* IFLA_CARRIER */
1082 + nla_total_size(4) /* IFLA_PROMISCUITY */
1083 + nla_total_size(4) /* IFLA_ALLMULTI */
1084 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1085 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1086 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1087 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1088 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1089 + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
1090 + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
1091 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1092 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1093 + nla_total_size(1) /* IFLA_OPERSTATE */
1094 + nla_total_size(1) /* IFLA_LINKMODE */
1095 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1096 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1097 + nla_total_size(4) /* IFLA_GROUP */
1098 + nla_total_size(ext_filter_mask
1099 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1100 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1101 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1102 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1103 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1104 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1105 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1106 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1107 + rtnl_xdp_size() /* IFLA_XDP */
1108 + nla_total_size(4) /* IFLA_EVENT */
1109 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1110 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1111 + rtnl_proto_down_size(dev) /* proto down */
1112 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1113 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1114 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1115 + nla_total_size(4) /* IFLA_MIN_MTU */
1116 + nla_total_size(4) /* IFLA_MAX_MTU */
1117 + rtnl_prop_list_size(dev)
1118 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1119 + rtnl_devlink_port_size(dev)
1120 + rtnl_dpll_pin_size(dev)
1121 + 0;
1122}
1123
1124static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1125{
1126 struct nlattr *vf_ports;
1127 struct nlattr *vf_port;
1128 int vf;
1129 int err;
1130
1131 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1132 if (!vf_ports)
1133 return -EMSGSIZE;
1134
1135 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1136 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1137 if (!vf_port)
1138 goto nla_put_failure;
1139 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1140 goto nla_put_failure;
1141 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1142 if (err == -EMSGSIZE)
1143 goto nla_put_failure;
1144 if (err) {
1145 nla_nest_cancel(skb, vf_port);
1146 continue;
1147 }
1148 nla_nest_end(skb, vf_port);
1149 }
1150
1151 nla_nest_end(skb, vf_ports);
1152
1153 return 0;
1154
1155nla_put_failure:
1156 nla_nest_cancel(skb, vf_ports);
1157 return -EMSGSIZE;
1158}
1159
1160static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1161{
1162 struct nlattr *port_self;
1163 int err;
1164
1165 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1166 if (!port_self)
1167 return -EMSGSIZE;
1168
1169 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1170 if (err) {
1171 nla_nest_cancel(skb, port_self);
1172 return (err == -EMSGSIZE) ? err : 0;
1173 }
1174
1175 nla_nest_end(skb, port_self);
1176
1177 return 0;
1178}
1179
1180static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1181 u32 ext_filter_mask)
1182{
1183 int err;
1184
1185 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1186 !(ext_filter_mask & RTEXT_FILTER_VF))
1187 return 0;
1188
1189 err = rtnl_port_self_fill(skb, dev);
1190 if (err)
1191 return err;
1192
1193 if (dev_num_vf(dev->dev.parent)) {
1194 err = rtnl_vf_ports_fill(skb, dev);
1195 if (err)
1196 return err;
1197 }
1198
1199 return 0;
1200}
1201
1202static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1203{
1204 int err;
1205 struct netdev_phys_item_id ppid;
1206
1207 err = dev_get_phys_port_id(dev, &ppid);
1208 if (err) {
1209 if (err == -EOPNOTSUPP)
1210 return 0;
1211 return err;
1212 }
1213
1214 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1215 return -EMSGSIZE;
1216
1217 return 0;
1218}
1219
1220static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1221{
1222 char name[IFNAMSIZ];
1223 int err;
1224
1225 err = dev_get_phys_port_name(dev, name, sizeof(name));
1226 if (err) {
1227 if (err == -EOPNOTSUPP)
1228 return 0;
1229 return err;
1230 }
1231
1232 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1233 return -EMSGSIZE;
1234
1235 return 0;
1236}
1237
1238static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1239{
1240 struct netdev_phys_item_id ppid = { };
1241 int err;
1242
1243 err = dev_get_port_parent_id(dev, &ppid, false);
1244 if (err) {
1245 if (err == -EOPNOTSUPP)
1246 return 0;
1247 return err;
1248 }
1249
1250 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1251 return -EMSGSIZE;
1252
1253 return 0;
1254}
1255
1256static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1257 struct net_device *dev)
1258{
1259 struct rtnl_link_stats64 *sp;
1260 struct nlattr *attr;
1261
1262 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1263 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1264 if (!attr)
1265 return -EMSGSIZE;
1266
1267 sp = nla_data(attr);
1268 dev_get_stats(dev, sp);
1269
1270 attr = nla_reserve(skb, IFLA_STATS,
1271 sizeof(struct rtnl_link_stats));
1272 if (!attr)
1273 return -EMSGSIZE;
1274
1275 copy_rtnl_link_stats(nla_data(attr), sp);
1276
1277 return 0;
1278}
1279
1280static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1281 struct net_device *dev,
1282 int vfs_num,
1283 u32 ext_filter_mask)
1284{
1285 struct ifla_vf_rss_query_en vf_rss_query_en;
1286 struct nlattr *vf, *vfstats, *vfvlanlist;
1287 struct ifla_vf_link_state vf_linkstate;
1288 struct ifla_vf_vlan_info vf_vlan_info;
1289 struct ifla_vf_spoofchk vf_spoofchk;
1290 struct ifla_vf_tx_rate vf_tx_rate;
1291 struct ifla_vf_stats vf_stats;
1292 struct ifla_vf_trust vf_trust;
1293 struct ifla_vf_vlan vf_vlan;
1294 struct ifla_vf_rate vf_rate;
1295 struct ifla_vf_mac vf_mac;
1296 struct ifla_vf_broadcast vf_broadcast;
1297 struct ifla_vf_info ivi;
1298 struct ifla_vf_guid node_guid;
1299 struct ifla_vf_guid port_guid;
1300
1301 memset(&ivi, 0, sizeof(ivi));
1302
1303 /* Not all SR-IOV capable drivers support the
1304 * spoofcheck and "RSS query enable" query. Preset to
1305 * -1 so the user space tool can detect that the driver
1306 * didn't report anything.
1307 */
1308 ivi.spoofchk = -1;
1309 ivi.rss_query_en = -1;
1310 ivi.trusted = -1;
1311 /* The default value for VF link state is "auto"
1312 * IFLA_VF_LINK_STATE_AUTO which equals zero
1313 */
1314 ivi.linkstate = 0;
1315 /* VLAN Protocol by default is 802.1Q */
1316 ivi.vlan_proto = htons(ETH_P_8021Q);
1317 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1318 return 0;
1319
1320 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1321 memset(&node_guid, 0, sizeof(node_guid));
1322 memset(&port_guid, 0, sizeof(port_guid));
1323
1324 vf_mac.vf =
1325 vf_vlan.vf =
1326 vf_vlan_info.vf =
1327 vf_rate.vf =
1328 vf_tx_rate.vf =
1329 vf_spoofchk.vf =
1330 vf_linkstate.vf =
1331 vf_rss_query_en.vf =
1332 vf_trust.vf =
1333 node_guid.vf =
1334 port_guid.vf = ivi.vf;
1335
1336 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1337 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1338 vf_vlan.vlan = ivi.vlan;
1339 vf_vlan.qos = ivi.qos;
1340 vf_vlan_info.vlan = ivi.vlan;
1341 vf_vlan_info.qos = ivi.qos;
1342 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1343 vf_tx_rate.rate = ivi.max_tx_rate;
1344 vf_rate.min_tx_rate = ivi.min_tx_rate;
1345 vf_rate.max_tx_rate = ivi.max_tx_rate;
1346 vf_spoofchk.setting = ivi.spoofchk;
1347 vf_linkstate.link_state = ivi.linkstate;
1348 vf_rss_query_en.setting = ivi.rss_query_en;
1349 vf_trust.setting = ivi.trusted;
1350 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1351 if (!vf)
1352 return -EMSGSIZE;
1353 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1354 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1355 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1356 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1357 &vf_rate) ||
1358 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1359 &vf_tx_rate) ||
1360 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1361 &vf_spoofchk) ||
1362 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1363 &vf_linkstate) ||
1364 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1365 sizeof(vf_rss_query_en),
1366 &vf_rss_query_en) ||
1367 nla_put(skb, IFLA_VF_TRUST,
1368 sizeof(vf_trust), &vf_trust))
1369 goto nla_put_vf_failure;
1370
1371 if (dev->netdev_ops->ndo_get_vf_guid &&
1372 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1373 &port_guid)) {
1374 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1375 &node_guid) ||
1376 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1377 &port_guid))
1378 goto nla_put_vf_failure;
1379 }
1380 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1381 if (!vfvlanlist)
1382 goto nla_put_vf_failure;
1383 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1384 &vf_vlan_info)) {
1385 nla_nest_cancel(skb, vfvlanlist);
1386 goto nla_put_vf_failure;
1387 }
1388 nla_nest_end(skb, vfvlanlist);
1389 if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
1390 memset(&vf_stats, 0, sizeof(vf_stats));
1391 if (dev->netdev_ops->ndo_get_vf_stats)
1392 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1393 &vf_stats);
1394 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1395 if (!vfstats)
1396 goto nla_put_vf_failure;
1397 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1398 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1399 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1400 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1401 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1402 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1403 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1404 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1405 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1406 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1407 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1408 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1409 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1410 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1411 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1412 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1413 nla_nest_cancel(skb, vfstats);
1414 goto nla_put_vf_failure;
1415 }
1416 nla_nest_end(skb, vfstats);
1417 }
1418 nla_nest_end(skb, vf);
1419 return 0;
1420
1421nla_put_vf_failure:
1422 nla_nest_cancel(skb, vf);
1423 return -EMSGSIZE;
1424}
1425
1426static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1427 struct net_device *dev,
1428 u32 ext_filter_mask)
1429{
1430 struct nlattr *vfinfo;
1431 int i, num_vfs;
1432
1433 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1434 return 0;
1435
1436 num_vfs = dev_num_vf(dev->dev.parent);
1437 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1438 return -EMSGSIZE;
1439
1440 if (!dev->netdev_ops->ndo_get_vf_config)
1441 return 0;
1442
1443 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1444 if (!vfinfo)
1445 return -EMSGSIZE;
1446
1447 for (i = 0; i < num_vfs; i++) {
1448 if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
1449 nla_nest_cancel(skb, vfinfo);
1450 return -EMSGSIZE;
1451 }
1452 }
1453
1454 nla_nest_end(skb, vfinfo);
1455 return 0;
1456}
1457
1458static int rtnl_fill_link_ifmap(struct sk_buff *skb,
1459 const struct net_device *dev)
1460{
1461 struct rtnl_link_ifmap map;
1462
1463 memset(&map, 0, sizeof(map));
1464 map.mem_start = READ_ONCE(dev->mem_start);
1465 map.mem_end = READ_ONCE(dev->mem_end);
1466 map.base_addr = READ_ONCE(dev->base_addr);
1467 map.irq = READ_ONCE(dev->irq);
1468 map.dma = READ_ONCE(dev->dma);
1469 map.port = READ_ONCE(dev->if_port);
1470
1471 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1472 return -EMSGSIZE;
1473
1474 return 0;
1475}
1476
1477static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1478{
1479 const struct bpf_prog *generic_xdp_prog;
1480
1481 ASSERT_RTNL();
1482
1483 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1484 if (!generic_xdp_prog)
1485 return 0;
1486 return generic_xdp_prog->aux->id;
1487}
1488
1489static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1490{
1491 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1492}
1493
1494static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1495{
1496 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1497}
1498
1499static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1500 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1501 u32 (*get_prog_id)(struct net_device *dev))
1502{
1503 u32 curr_id;
1504 int err;
1505
1506 curr_id = get_prog_id(dev);
1507 if (!curr_id)
1508 return 0;
1509
1510 *prog_id = curr_id;
1511 err = nla_put_u32(skb, attr, curr_id);
1512 if (err)
1513 return err;
1514
1515 if (*mode != XDP_ATTACHED_NONE)
1516 *mode = XDP_ATTACHED_MULTI;
1517 else
1518 *mode = tgt_mode;
1519
1520 return 0;
1521}
1522
1523static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1524{
1525 struct nlattr *xdp;
1526 u32 prog_id;
1527 int err;
1528 u8 mode;
1529
1530 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1531 if (!xdp)
1532 return -EMSGSIZE;
1533
1534 prog_id = 0;
1535 mode = XDP_ATTACHED_NONE;
1536 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1537 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1538 if (err)
1539 goto err_cancel;
1540 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1541 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1542 if (err)
1543 goto err_cancel;
1544 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1545 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1546 if (err)
1547 goto err_cancel;
1548
1549 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1550 if (err)
1551 goto err_cancel;
1552
1553 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1554 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1555 if (err)
1556 goto err_cancel;
1557 }
1558
1559 nla_nest_end(skb, xdp);
1560 return 0;
1561
1562err_cancel:
1563 nla_nest_cancel(skb, xdp);
1564 return err;
1565}
1566
1567static u32 rtnl_get_event(unsigned long event)
1568{
1569 u32 rtnl_event_type = IFLA_EVENT_NONE;
1570
1571 switch (event) {
1572 case NETDEV_REBOOT:
1573 rtnl_event_type = IFLA_EVENT_REBOOT;
1574 break;
1575 case NETDEV_FEAT_CHANGE:
1576 rtnl_event_type = IFLA_EVENT_FEATURES;
1577 break;
1578 case NETDEV_BONDING_FAILOVER:
1579 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1580 break;
1581 case NETDEV_NOTIFY_PEERS:
1582 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1583 break;
1584 case NETDEV_RESEND_IGMP:
1585 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1586 break;
1587 case NETDEV_CHANGEINFODATA:
1588 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1589 break;
1590 default:
1591 break;
1592 }
1593
1594 return rtnl_event_type;
1595}
1596
1597static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1598{
1599 const struct net_device *upper_dev;
1600 int ret = 0;
1601
1602 rcu_read_lock();
1603
1604 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1605 if (upper_dev)
1606 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1607
1608 rcu_read_unlock();
1609 return ret;
1610}
1611
1612static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1613 bool force)
1614{
1615 int iflink = dev_get_iflink(dev);
1616
1617 if (force || READ_ONCE(dev->ifindex) != iflink)
1618 return nla_put_u32(skb, IFLA_LINK, iflink);
1619
1620 return 0;
1621}
1622
1623static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1624 struct net_device *dev)
1625{
1626 char buf[IFALIASZ];
1627 int ret;
1628
1629 ret = dev_get_alias(dev, buf, sizeof(buf));
1630 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1631}
1632
1633static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1634 const struct net_device *dev,
1635 struct net *src_net, gfp_t gfp)
1636{
1637 bool put_iflink = false;
1638
1639 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1640 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1641
1642 if (!net_eq(dev_net(dev), link_net)) {
1643 int id = peernet2id_alloc(src_net, link_net, gfp);
1644
1645 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1646 return -EMSGSIZE;
1647
1648 put_iflink = true;
1649 }
1650 }
1651
1652 return nla_put_iflink(skb, dev, put_iflink);
1653}
1654
1655static int rtnl_fill_link_af(struct sk_buff *skb,
1656 const struct net_device *dev,
1657 u32 ext_filter_mask)
1658{
1659 const struct rtnl_af_ops *af_ops;
1660 struct nlattr *af_spec;
1661
1662 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1663 if (!af_spec)
1664 return -EMSGSIZE;
1665
1666 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1667 struct nlattr *af;
1668 int err;
1669
1670 if (!af_ops->fill_link_af)
1671 continue;
1672
1673 af = nla_nest_start_noflag(skb, af_ops->family);
1674 if (!af)
1675 return -EMSGSIZE;
1676
1677 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1678 /*
1679 * Caller may return ENODATA to indicate that there
1680 * was no data to be dumped. This is not an error, it
1681 * means we should trim the attribute header and
1682 * continue.
1683 */
1684 if (err == -ENODATA)
1685 nla_nest_cancel(skb, af);
1686 else if (err < 0)
1687 return -EMSGSIZE;
1688
1689 nla_nest_end(skb, af);
1690 }
1691
1692 nla_nest_end(skb, af_spec);
1693 return 0;
1694}
1695
1696static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1697 const struct net_device *dev)
1698{
1699 struct netdev_name_node *name_node;
1700 int count = 0;
1701
1702 list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
1703 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1704 return -EMSGSIZE;
1705 count++;
1706 }
1707 return count;
1708}
1709
1710/* RCU protected. */
1711static int rtnl_fill_prop_list(struct sk_buff *skb,
1712 const struct net_device *dev)
1713{
1714 struct nlattr *prop_list;
1715 int ret;
1716
1717 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1718 if (!prop_list)
1719 return -EMSGSIZE;
1720
1721 ret = rtnl_fill_alt_ifnames(skb, dev);
1722 if (ret <= 0)
1723 goto nest_cancel;
1724
1725 nla_nest_end(skb, prop_list);
1726 return 0;
1727
1728nest_cancel:
1729 nla_nest_cancel(skb, prop_list);
1730 return ret;
1731}
1732
1733static int rtnl_fill_proto_down(struct sk_buff *skb,
1734 const struct net_device *dev)
1735{
1736 struct nlattr *pr;
1737 u32 preason;
1738
1739 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1740 goto nla_put_failure;
1741
1742 preason = dev->proto_down_reason;
1743 if (!preason)
1744 return 0;
1745
1746 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1747 if (!pr)
1748 return -EMSGSIZE;
1749
1750 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1751 nla_nest_cancel(skb, pr);
1752 goto nla_put_failure;
1753 }
1754
1755 nla_nest_end(skb, pr);
1756 return 0;
1757
1758nla_put_failure:
1759 return -EMSGSIZE;
1760}
1761
1762static int rtnl_fill_devlink_port(struct sk_buff *skb,
1763 const struct net_device *dev)
1764{
1765 struct nlattr *devlink_port_nest;
1766 int ret;
1767
1768 devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1769 if (!devlink_port_nest)
1770 return -EMSGSIZE;
1771
1772 if (dev->devlink_port) {
1773 ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1774 if (ret < 0)
1775 goto nest_cancel;
1776 }
1777
1778 nla_nest_end(skb, devlink_port_nest);
1779 return 0;
1780
1781nest_cancel:
1782 nla_nest_cancel(skb, devlink_port_nest);
1783 return ret;
1784}
1785
1786static int rtnl_fill_dpll_pin(struct sk_buff *skb,
1787 const struct net_device *dev)
1788{
1789 struct nlattr *dpll_pin_nest;
1790 int ret;
1791
1792 dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
1793 if (!dpll_pin_nest)
1794 return -EMSGSIZE;
1795
1796 ret = dpll_netdev_add_pin_handle(skb, dev);
1797 if (ret < 0)
1798 goto nest_cancel;
1799
1800 nla_nest_end(skb, dpll_pin_nest);
1801 return 0;
1802
1803nest_cancel:
1804 nla_nest_cancel(skb, dpll_pin_nest);
1805 return ret;
1806}
1807
1808static int rtnl_fill_ifinfo(struct sk_buff *skb,
1809 struct net_device *dev, struct net *src_net,
1810 int type, u32 pid, u32 seq, u32 change,
1811 unsigned int flags, u32 ext_filter_mask,
1812 u32 event, int *new_nsid, int new_ifindex,
1813 int tgt_netnsid, gfp_t gfp)
1814{
1815 struct ifinfomsg *ifm;
1816 struct nlmsghdr *nlh;
1817 struct Qdisc *qdisc;
1818
1819 ASSERT_RTNL();
1820 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1821 if (nlh == NULL)
1822 return -EMSGSIZE;
1823
1824 ifm = nlmsg_data(nlh);
1825 ifm->ifi_family = AF_UNSPEC;
1826 ifm->__ifi_pad = 0;
1827 ifm->ifi_type = dev->type;
1828 ifm->ifi_index = dev->ifindex;
1829 ifm->ifi_flags = dev_get_flags(dev);
1830 ifm->ifi_change = change;
1831
1832 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1833 goto nla_put_failure;
1834
1835 qdisc = rtnl_dereference(dev->qdisc);
1836 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1837 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1838 nla_put_u8(skb, IFLA_OPERSTATE,
1839 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1840 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1841 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1842 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1843 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1844 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1845 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1846 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1847 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1848 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1849 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1850 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1851 nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE, dev->gso_ipv4_max_size) ||
1852 nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE, dev->gro_ipv4_max_size) ||
1853 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1854 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1855#ifdef CONFIG_RPS
1856 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1857#endif
1858 put_master_ifindex(skb, dev) ||
1859 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1860 (qdisc &&
1861 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1862 nla_put_ifalias(skb, dev) ||
1863 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1864 atomic_read(&dev->carrier_up_count) +
1865 atomic_read(&dev->carrier_down_count)) ||
1866 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1867 atomic_read(&dev->carrier_up_count)) ||
1868 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1869 atomic_read(&dev->carrier_down_count)))
1870 goto nla_put_failure;
1871
1872 if (rtnl_fill_proto_down(skb, dev))
1873 goto nla_put_failure;
1874
1875 if (event != IFLA_EVENT_NONE) {
1876 if (nla_put_u32(skb, IFLA_EVENT, event))
1877 goto nla_put_failure;
1878 }
1879
1880 if (dev->addr_len) {
1881 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1882 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1883 goto nla_put_failure;
1884 }
1885
1886 if (rtnl_phys_port_id_fill(skb, dev))
1887 goto nla_put_failure;
1888
1889 if (rtnl_phys_port_name_fill(skb, dev))
1890 goto nla_put_failure;
1891
1892 if (rtnl_phys_switch_id_fill(skb, dev))
1893 goto nla_put_failure;
1894
1895 if (rtnl_fill_stats(skb, dev))
1896 goto nla_put_failure;
1897
1898 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1899 goto nla_put_failure;
1900
1901 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1902 goto nla_put_failure;
1903
1904 if (rtnl_xdp_fill(skb, dev))
1905 goto nla_put_failure;
1906
1907 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1908 if (rtnl_link_fill(skb, dev) < 0)
1909 goto nla_put_failure;
1910 }
1911
1912 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1913 goto nla_put_failure;
1914
1915 if (new_nsid &&
1916 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1917 goto nla_put_failure;
1918 if (new_ifindex &&
1919 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1920 goto nla_put_failure;
1921
1922 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1923 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1924 goto nla_put_failure;
1925
1926 rcu_read_lock();
1927 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1928 goto nla_put_failure_rcu;
1929 if (rtnl_fill_link_ifmap(skb, dev))
1930 goto nla_put_failure_rcu;
1931 if (rtnl_fill_prop_list(skb, dev))
1932 goto nla_put_failure_rcu;
1933 rcu_read_unlock();
1934
1935 if (dev->dev.parent &&
1936 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1937 dev_name(dev->dev.parent)))
1938 goto nla_put_failure;
1939
1940 if (dev->dev.parent && dev->dev.parent->bus &&
1941 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1942 dev->dev.parent->bus->name))
1943 goto nla_put_failure;
1944
1945 if (rtnl_fill_devlink_port(skb, dev))
1946 goto nla_put_failure;
1947
1948 if (rtnl_fill_dpll_pin(skb, dev))
1949 goto nla_put_failure;
1950
1951 nlmsg_end(skb, nlh);
1952 return 0;
1953
1954nla_put_failure_rcu:
1955 rcu_read_unlock();
1956nla_put_failure:
1957 nlmsg_cancel(skb, nlh);
1958 return -EMSGSIZE;
1959}
1960
1961static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1962 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1963 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1964 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1965 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1966 [IFLA_MTU] = { .type = NLA_U32 },
1967 [IFLA_LINK] = { .type = NLA_U32 },
1968 [IFLA_MASTER] = { .type = NLA_U32 },
1969 [IFLA_CARRIER] = { .type = NLA_U8 },
1970 [IFLA_TXQLEN] = { .type = NLA_U32 },
1971 [IFLA_WEIGHT] = { .type = NLA_U32 },
1972 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1973 [IFLA_LINKMODE] = { .type = NLA_U8 },
1974 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1975 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1976 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1977 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1978 * allow 0-length string (needed to remove an alias).
1979 */
1980 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1981 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1982 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1983 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1984 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1985 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1986 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1987 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1988 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1989 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1990 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1991 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1992 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1993 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1994 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1995 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1996 [IFLA_XDP] = { .type = NLA_NESTED },
1997 [IFLA_EVENT] = { .type = NLA_U32 },
1998 [IFLA_GROUP] = { .type = NLA_U32 },
1999 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
2000 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
2001 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
2002 [IFLA_MIN_MTU] = { .type = NLA_U32 },
2003 [IFLA_MAX_MTU] = { .type = NLA_U32 },
2004 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
2005 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
2006 .len = ALTIFNAMSIZ - 1 },
2007 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
2008 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
2009 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
2010 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
2011 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
2012 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
2013 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
2014 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
2015 [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
2016 [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
2017};
2018
2019static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
2020 [IFLA_INFO_KIND] = { .type = NLA_STRING },
2021 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
2022 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
2023 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
2024};
2025
2026static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
2027 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
2028 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
2029 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
2030 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
2031 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
2032 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
2033 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
2034 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
2035 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
2036 [IFLA_VF_STATS] = { .type = NLA_NESTED },
2037 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
2038 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2039 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
2040};
2041
2042static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
2043 [IFLA_PORT_VF] = { .type = NLA_U32 },
2044 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
2045 .len = PORT_PROFILE_MAX },
2046 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2047 .len = PORT_UUID_MAX },
2048 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
2049 .len = PORT_UUID_MAX },
2050 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
2051 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
2052
2053 /* Unused, but we need to keep it here since user space could
2054 * fill it. It's also broken with regard to NLA_BINARY use in
2055 * combination with structs.
2056 */
2057 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
2058 .len = sizeof(struct ifla_port_vsi) },
2059};
2060
2061static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2062 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2063 [IFLA_XDP_FD] = { .type = NLA_S32 },
2064 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
2065 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
2066 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
2067 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
2068};
2069
2070static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2071{
2072 const struct rtnl_link_ops *ops = NULL;
2073 struct nlattr *linfo[IFLA_INFO_MAX + 1];
2074
2075 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2076 return NULL;
2077
2078 if (linfo[IFLA_INFO_KIND]) {
2079 char kind[MODULE_NAME_LEN];
2080
2081 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2082 ops = rtnl_link_ops_get(kind);
2083 }
2084
2085 return ops;
2086}
2087
2088static bool link_master_filtered(struct net_device *dev, int master_idx)
2089{
2090 struct net_device *master;
2091
2092 if (!master_idx)
2093 return false;
2094
2095 master = netdev_master_upper_dev_get(dev);
2096
2097 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2098 * another invalid value for ifindex to denote "no master".
2099 */
2100 if (master_idx == -1)
2101 return !!master;
2102
2103 if (!master || master->ifindex != master_idx)
2104 return true;
2105
2106 return false;
2107}
2108
2109static bool link_kind_filtered(const struct net_device *dev,
2110 const struct rtnl_link_ops *kind_ops)
2111{
2112 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2113 return true;
2114
2115 return false;
2116}
2117
2118static bool link_dump_filtered(struct net_device *dev,
2119 int master_idx,
2120 const struct rtnl_link_ops *kind_ops)
2121{
2122 if (link_master_filtered(dev, master_idx) ||
2123 link_kind_filtered(dev, kind_ops))
2124 return true;
2125
2126 return false;
2127}
2128
2129/**
2130 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2131 * @sk: netlink socket
2132 * @netnsid: network namespace identifier
2133 *
2134 * Returns the network namespace identified by netnsid on success or an error
2135 * pointer on failure.
2136 */
2137struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2138{
2139 struct net *net;
2140
2141 net = get_net_ns_by_id(sock_net(sk), netnsid);
2142 if (!net)
2143 return ERR_PTR(-EINVAL);
2144
2145 /* For now, the caller is required to have CAP_NET_ADMIN in
2146 * the user namespace owning the target net ns.
2147 */
2148 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2149 put_net(net);
2150 return ERR_PTR(-EACCES);
2151 }
2152 return net;
2153}
2154EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2155
2156static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2157 bool strict_check, struct nlattr **tb,
2158 struct netlink_ext_ack *extack)
2159{
2160 int hdrlen;
2161
2162 if (strict_check) {
2163 struct ifinfomsg *ifm;
2164
2165 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2166 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2167 return -EINVAL;
2168 }
2169
2170 ifm = nlmsg_data(nlh);
2171 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2172 ifm->ifi_change) {
2173 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2174 return -EINVAL;
2175 }
2176 if (ifm->ifi_index) {
2177 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2178 return -EINVAL;
2179 }
2180
2181 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2182 IFLA_MAX, ifla_policy,
2183 extack);
2184 }
2185
2186 /* A hack to preserve kernel<->userspace interface.
2187 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2188 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2189 * what iproute2 < v3.9.0 used.
2190 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2191 * attribute, its netlink message is shorter than struct ifinfomsg.
2192 */
2193 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2194 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2195
2196 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2197 extack);
2198}
2199
2200static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2201{
2202 const struct rtnl_link_ops *kind_ops = NULL;
2203 struct netlink_ext_ack *extack = cb->extack;
2204 const struct nlmsghdr *nlh = cb->nlh;
2205 struct net *net = sock_net(skb->sk);
2206 unsigned int flags = NLM_F_MULTI;
2207 struct nlattr *tb[IFLA_MAX+1];
2208 struct {
2209 unsigned long ifindex;
2210 } *ctx = (void *)cb->ctx;
2211 struct net *tgt_net = net;
2212 u32 ext_filter_mask = 0;
2213 struct net_device *dev;
2214 int master_idx = 0;
2215 int netnsid = -1;
2216 int err, i;
2217
2218 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2219 if (err < 0) {
2220 if (cb->strict_check)
2221 return err;
2222
2223 goto walk_entries;
2224 }
2225
2226 for (i = 0; i <= IFLA_MAX; ++i) {
2227 if (!tb[i])
2228 continue;
2229
2230 /* new attributes should only be added with strict checking */
2231 switch (i) {
2232 case IFLA_TARGET_NETNSID:
2233 netnsid = nla_get_s32(tb[i]);
2234 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2235 if (IS_ERR(tgt_net)) {
2236 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2237 return PTR_ERR(tgt_net);
2238 }
2239 break;
2240 case IFLA_EXT_MASK:
2241 ext_filter_mask = nla_get_u32(tb[i]);
2242 break;
2243 case IFLA_MASTER:
2244 master_idx = nla_get_u32(tb[i]);
2245 break;
2246 case IFLA_LINKINFO:
2247 kind_ops = linkinfo_to_kind_ops(tb[i]);
2248 break;
2249 default:
2250 if (cb->strict_check) {
2251 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2252 return -EINVAL;
2253 }
2254 }
2255 }
2256
2257 if (master_idx || kind_ops)
2258 flags |= NLM_F_DUMP_FILTERED;
2259
2260walk_entries:
2261 err = 0;
2262 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
2263 if (link_dump_filtered(dev, master_idx, kind_ops))
2264 continue;
2265 err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
2266 NETLINK_CB(cb->skb).portid,
2267 nlh->nlmsg_seq, 0, flags,
2268 ext_filter_mask, 0, NULL, 0,
2269 netnsid, GFP_KERNEL);
2270 if (err < 0)
2271 break;
2272 }
2273 cb->seq = tgt_net->dev_base_seq;
2274 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2275 if (netnsid >= 0)
2276 put_net(tgt_net);
2277
2278 return err;
2279}
2280
2281int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
2282 struct netlink_ext_ack *exterr)
2283{
2284 const struct ifinfomsg *ifmp;
2285 const struct nlattr *attrs;
2286 size_t len;
2287
2288 ifmp = nla_data(nla_peer);
2289 attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
2290 len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
2291
2292 if (ifmp->ifi_index < 0) {
2293 NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
2294 "ifindex can't be negative");
2295 return -EINVAL;
2296 }
2297
2298 return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
2299 exterr);
2300}
2301EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
2302
2303struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2304{
2305 struct net *net;
2306 /* Examine the link attributes and figure out which
2307 * network namespace we are talking about.
2308 */
2309 if (tb[IFLA_NET_NS_PID])
2310 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2311 else if (tb[IFLA_NET_NS_FD])
2312 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2313 else
2314 net = get_net(src_net);
2315 return net;
2316}
2317EXPORT_SYMBOL(rtnl_link_get_net);
2318
2319/* Figure out which network namespace we are talking about by
2320 * examining the link attributes in the following order:
2321 *
2322 * 1. IFLA_NET_NS_PID
2323 * 2. IFLA_NET_NS_FD
2324 * 3. IFLA_TARGET_NETNSID
2325 */
2326static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2327 struct nlattr *tb[])
2328{
2329 struct net *net;
2330
2331 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2332 return rtnl_link_get_net(src_net, tb);
2333
2334 if (!tb[IFLA_TARGET_NETNSID])
2335 return get_net(src_net);
2336
2337 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2338 if (!net)
2339 return ERR_PTR(-EINVAL);
2340
2341 return net;
2342}
2343
2344static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2345 struct net *src_net,
2346 struct nlattr *tb[], int cap)
2347{
2348 struct net *net;
2349
2350 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2351 if (IS_ERR(net))
2352 return net;
2353
2354 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2355 put_net(net);
2356 return ERR_PTR(-EPERM);
2357 }
2358
2359 return net;
2360}
2361
2362/* Verify that rtnetlink requests do not pass additional properties
2363 * potentially referring to different network namespaces.
2364 */
2365static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2366 struct netlink_ext_ack *extack,
2367 bool netns_id_only)
2368{
2369
2370 if (netns_id_only) {
2371 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2372 return 0;
2373
2374 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2375 return -EOPNOTSUPP;
2376 }
2377
2378 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2379 goto invalid_attr;
2380
2381 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2382 goto invalid_attr;
2383
2384 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2385 goto invalid_attr;
2386
2387 return 0;
2388
2389invalid_attr:
2390 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2391 return -EINVAL;
2392}
2393
2394static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2395 int max_tx_rate)
2396{
2397 const struct net_device_ops *ops = dev->netdev_ops;
2398
2399 if (!ops->ndo_set_vf_rate)
2400 return -EOPNOTSUPP;
2401 if (max_tx_rate && max_tx_rate < min_tx_rate)
2402 return -EINVAL;
2403
2404 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2405}
2406
2407static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2408 struct netlink_ext_ack *extack)
2409{
2410 if (tb[IFLA_ADDRESS] &&
2411 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2412 return -EINVAL;
2413
2414 if (tb[IFLA_BROADCAST] &&
2415 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2416 return -EINVAL;
2417
2418 if (tb[IFLA_GSO_MAX_SIZE] &&
2419 nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
2420 NL_SET_ERR_MSG(extack, "too big gso_max_size");
2421 return -EINVAL;
2422 }
2423
2424 if (tb[IFLA_GSO_MAX_SEGS] &&
2425 (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
2426 nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
2427 NL_SET_ERR_MSG(extack, "too big gso_max_segs");
2428 return -EINVAL;
2429 }
2430
2431 if (tb[IFLA_GRO_MAX_SIZE] &&
2432 nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
2433 NL_SET_ERR_MSG(extack, "too big gro_max_size");
2434 return -EINVAL;
2435 }
2436
2437 if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
2438 nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
2439 NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
2440 return -EINVAL;
2441 }
2442
2443 if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
2444 nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
2445 NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
2446 return -EINVAL;
2447 }
2448
2449 if (tb[IFLA_AF_SPEC]) {
2450 struct nlattr *af;
2451 int rem, err;
2452
2453 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2454 const struct rtnl_af_ops *af_ops;
2455
2456 af_ops = rtnl_af_lookup(nla_type(af));
2457 if (!af_ops)
2458 return -EAFNOSUPPORT;
2459
2460 if (!af_ops->set_link_af)
2461 return -EOPNOTSUPP;
2462
2463 if (af_ops->validate_link_af) {
2464 err = af_ops->validate_link_af(dev, af, extack);
2465 if (err < 0)
2466 return err;
2467 }
2468 }
2469 }
2470
2471 return 0;
2472}
2473
2474static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2475 int guid_type)
2476{
2477 const struct net_device_ops *ops = dev->netdev_ops;
2478
2479 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2480}
2481
2482static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2483{
2484 if (dev->type != ARPHRD_INFINIBAND)
2485 return -EOPNOTSUPP;
2486
2487 return handle_infiniband_guid(dev, ivt, guid_type);
2488}
2489
2490static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2491{
2492 const struct net_device_ops *ops = dev->netdev_ops;
2493 int err = -EINVAL;
2494
2495 if (tb[IFLA_VF_MAC]) {
2496 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2497
2498 if (ivm->vf >= INT_MAX)
2499 return -EINVAL;
2500 err = -EOPNOTSUPP;
2501 if (ops->ndo_set_vf_mac)
2502 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2503 ivm->mac);
2504 if (err < 0)
2505 return err;
2506 }
2507
2508 if (tb[IFLA_VF_VLAN]) {
2509 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2510
2511 if (ivv->vf >= INT_MAX)
2512 return -EINVAL;
2513 err = -EOPNOTSUPP;
2514 if (ops->ndo_set_vf_vlan)
2515 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2516 ivv->qos,
2517 htons(ETH_P_8021Q));
2518 if (err < 0)
2519 return err;
2520 }
2521
2522 if (tb[IFLA_VF_VLAN_LIST]) {
2523 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2524 struct nlattr *attr;
2525 int rem, len = 0;
2526
2527 err = -EOPNOTSUPP;
2528 if (!ops->ndo_set_vf_vlan)
2529 return err;
2530
2531 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2532 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2533 nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
2534 return -EINVAL;
2535 }
2536 if (len >= MAX_VLAN_LIST_LEN)
2537 return -EOPNOTSUPP;
2538 ivvl[len] = nla_data(attr);
2539
2540 len++;
2541 }
2542 if (len == 0)
2543 return -EINVAL;
2544
2545 if (ivvl[0]->vf >= INT_MAX)
2546 return -EINVAL;
2547 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2548 ivvl[0]->qos, ivvl[0]->vlan_proto);
2549 if (err < 0)
2550 return err;
2551 }
2552
2553 if (tb[IFLA_VF_TX_RATE]) {
2554 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2555 struct ifla_vf_info ivf;
2556
2557 if (ivt->vf >= INT_MAX)
2558 return -EINVAL;
2559 err = -EOPNOTSUPP;
2560 if (ops->ndo_get_vf_config)
2561 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2562 if (err < 0)
2563 return err;
2564
2565 err = rtnl_set_vf_rate(dev, ivt->vf,
2566 ivf.min_tx_rate, ivt->rate);
2567 if (err < 0)
2568 return err;
2569 }
2570
2571 if (tb[IFLA_VF_RATE]) {
2572 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2573
2574 if (ivt->vf >= INT_MAX)
2575 return -EINVAL;
2576
2577 err = rtnl_set_vf_rate(dev, ivt->vf,
2578 ivt->min_tx_rate, ivt->max_tx_rate);
2579 if (err < 0)
2580 return err;
2581 }
2582
2583 if (tb[IFLA_VF_SPOOFCHK]) {
2584 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2585
2586 if (ivs->vf >= INT_MAX)
2587 return -EINVAL;
2588 err = -EOPNOTSUPP;
2589 if (ops->ndo_set_vf_spoofchk)
2590 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2591 ivs->setting);
2592 if (err < 0)
2593 return err;
2594 }
2595
2596 if (tb[IFLA_VF_LINK_STATE]) {
2597 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2598
2599 if (ivl->vf >= INT_MAX)
2600 return -EINVAL;
2601 err = -EOPNOTSUPP;
2602 if (ops->ndo_set_vf_link_state)
2603 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2604 ivl->link_state);
2605 if (err < 0)
2606 return err;
2607 }
2608
2609 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2610 struct ifla_vf_rss_query_en *ivrssq_en;
2611
2612 err = -EOPNOTSUPP;
2613 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2614 if (ivrssq_en->vf >= INT_MAX)
2615 return -EINVAL;
2616 if (ops->ndo_set_vf_rss_query_en)
2617 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2618 ivrssq_en->setting);
2619 if (err < 0)
2620 return err;
2621 }
2622
2623 if (tb[IFLA_VF_TRUST]) {
2624 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2625
2626 if (ivt->vf >= INT_MAX)
2627 return -EINVAL;
2628 err = -EOPNOTSUPP;
2629 if (ops->ndo_set_vf_trust)
2630 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2631 if (err < 0)
2632 return err;
2633 }
2634
2635 if (tb[IFLA_VF_IB_NODE_GUID]) {
2636 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2637
2638 if (ivt->vf >= INT_MAX)
2639 return -EINVAL;
2640 if (!ops->ndo_set_vf_guid)
2641 return -EOPNOTSUPP;
2642 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2643 }
2644
2645 if (tb[IFLA_VF_IB_PORT_GUID]) {
2646 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2647
2648 if (ivt->vf >= INT_MAX)
2649 return -EINVAL;
2650 if (!ops->ndo_set_vf_guid)
2651 return -EOPNOTSUPP;
2652
2653 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2654 }
2655
2656 return err;
2657}
2658
2659static int do_set_master(struct net_device *dev, int ifindex,
2660 struct netlink_ext_ack *extack)
2661{
2662 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2663 const struct net_device_ops *ops;
2664 int err;
2665
2666 if (upper_dev) {
2667 if (upper_dev->ifindex == ifindex)
2668 return 0;
2669 ops = upper_dev->netdev_ops;
2670 if (ops->ndo_del_slave) {
2671 err = ops->ndo_del_slave(upper_dev, dev);
2672 if (err)
2673 return err;
2674 } else {
2675 return -EOPNOTSUPP;
2676 }
2677 }
2678
2679 if (ifindex) {
2680 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2681 if (!upper_dev)
2682 return -EINVAL;
2683 ops = upper_dev->netdev_ops;
2684 if (ops->ndo_add_slave) {
2685 err = ops->ndo_add_slave(upper_dev, dev, extack);
2686 if (err)
2687 return err;
2688 } else {
2689 return -EOPNOTSUPP;
2690 }
2691 }
2692 return 0;
2693}
2694
2695static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2696 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2697 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2698};
2699
2700static int do_set_proto_down(struct net_device *dev,
2701 struct nlattr *nl_proto_down,
2702 struct nlattr *nl_proto_down_reason,
2703 struct netlink_ext_ack *extack)
2704{
2705 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2706 unsigned long mask = 0;
2707 u32 value;
2708 bool proto_down;
2709 int err;
2710
2711 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2712 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2713 return -EOPNOTSUPP;
2714 }
2715
2716 if (nl_proto_down_reason) {
2717 err = nla_parse_nested_deprecated(pdreason,
2718 IFLA_PROTO_DOWN_REASON_MAX,
2719 nl_proto_down_reason,
2720 ifla_proto_down_reason_policy,
2721 NULL);
2722 if (err < 0)
2723 return err;
2724
2725 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2726 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2727 return -EINVAL;
2728 }
2729
2730 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2731
2732 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2733 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2734
2735 dev_change_proto_down_reason(dev, mask, value);
2736 }
2737
2738 if (nl_proto_down) {
2739 proto_down = nla_get_u8(nl_proto_down);
2740
2741 /* Don't turn off protodown if there are active reasons */
2742 if (!proto_down && dev->proto_down_reason) {
2743 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2744 return -EBUSY;
2745 }
2746 err = dev_change_proto_down(dev,
2747 proto_down);
2748 if (err)
2749 return err;
2750 }
2751
2752 return 0;
2753}
2754
2755#define DO_SETLINK_MODIFIED 0x01
2756/* notify flag means notify + modified. */
2757#define DO_SETLINK_NOTIFY 0x03
2758static int do_setlink(const struct sk_buff *skb,
2759 struct net_device *dev, struct ifinfomsg *ifm,
2760 struct netlink_ext_ack *extack,
2761 struct nlattr **tb, int status)
2762{
2763 const struct net_device_ops *ops = dev->netdev_ops;
2764 char ifname[IFNAMSIZ];
2765 int err;
2766
2767 if (tb[IFLA_IFNAME])
2768 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2769 else
2770 ifname[0] = '\0';
2771
2772 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2773 const char *pat = ifname[0] ? ifname : NULL;
2774 struct net *net;
2775 int new_ifindex;
2776
2777 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2778 tb, CAP_NET_ADMIN);
2779 if (IS_ERR(net)) {
2780 err = PTR_ERR(net);
2781 goto errout;
2782 }
2783
2784 if (tb[IFLA_NEW_IFINDEX])
2785 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2786 else
2787 new_ifindex = 0;
2788
2789 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2790 put_net(net);
2791 if (err)
2792 goto errout;
2793 status |= DO_SETLINK_MODIFIED;
2794 }
2795
2796 if (tb[IFLA_MAP]) {
2797 struct rtnl_link_ifmap *u_map;
2798 struct ifmap k_map;
2799
2800 if (!ops->ndo_set_config) {
2801 err = -EOPNOTSUPP;
2802 goto errout;
2803 }
2804
2805 if (!netif_device_present(dev)) {
2806 err = -ENODEV;
2807 goto errout;
2808 }
2809
2810 u_map = nla_data(tb[IFLA_MAP]);
2811 k_map.mem_start = (unsigned long) u_map->mem_start;
2812 k_map.mem_end = (unsigned long) u_map->mem_end;
2813 k_map.base_addr = (unsigned short) u_map->base_addr;
2814 k_map.irq = (unsigned char) u_map->irq;
2815 k_map.dma = (unsigned char) u_map->dma;
2816 k_map.port = (unsigned char) u_map->port;
2817
2818 err = ops->ndo_set_config(dev, &k_map);
2819 if (err < 0)
2820 goto errout;
2821
2822 status |= DO_SETLINK_NOTIFY;
2823 }
2824
2825 if (tb[IFLA_ADDRESS]) {
2826 struct sockaddr *sa;
2827 int len;
2828
2829 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2830 sizeof(*sa));
2831 sa = kmalloc(len, GFP_KERNEL);
2832 if (!sa) {
2833 err = -ENOMEM;
2834 goto errout;
2835 }
2836 sa->sa_family = dev->type;
2837 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2838 dev->addr_len);
2839 err = dev_set_mac_address_user(dev, sa, extack);
2840 kfree(sa);
2841 if (err)
2842 goto errout;
2843 status |= DO_SETLINK_MODIFIED;
2844 }
2845
2846 if (tb[IFLA_MTU]) {
2847 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2848 if (err < 0)
2849 goto errout;
2850 status |= DO_SETLINK_MODIFIED;
2851 }
2852
2853 if (tb[IFLA_GROUP]) {
2854 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2855 status |= DO_SETLINK_NOTIFY;
2856 }
2857
2858 /*
2859 * Interface selected by interface index but interface
2860 * name provided implies that a name change has been
2861 * requested.
2862 */
2863 if (ifm->ifi_index > 0 && ifname[0]) {
2864 err = dev_change_name(dev, ifname);
2865 if (err < 0)
2866 goto errout;
2867 status |= DO_SETLINK_MODIFIED;
2868 }
2869
2870 if (tb[IFLA_IFALIAS]) {
2871 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2872 nla_len(tb[IFLA_IFALIAS]));
2873 if (err < 0)
2874 goto errout;
2875 status |= DO_SETLINK_NOTIFY;
2876 }
2877
2878 if (tb[IFLA_BROADCAST]) {
2879 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2880 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2881 }
2882
2883 if (ifm->ifi_flags || ifm->ifi_change) {
2884 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2885 extack);
2886 if (err < 0)
2887 goto errout;
2888 }
2889
2890 if (tb[IFLA_MASTER]) {
2891 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2892 if (err)
2893 goto errout;
2894 status |= DO_SETLINK_MODIFIED;
2895 }
2896
2897 if (tb[IFLA_CARRIER]) {
2898 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2899 if (err)
2900 goto errout;
2901 status |= DO_SETLINK_MODIFIED;
2902 }
2903
2904 if (tb[IFLA_TXQLEN]) {
2905 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2906
2907 err = dev_change_tx_queue_len(dev, value);
2908 if (err)
2909 goto errout;
2910 status |= DO_SETLINK_MODIFIED;
2911 }
2912
2913 if (tb[IFLA_GSO_MAX_SIZE]) {
2914 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2915
2916 if (dev->gso_max_size ^ max_size) {
2917 netif_set_gso_max_size(dev, max_size);
2918 status |= DO_SETLINK_MODIFIED;
2919 }
2920 }
2921
2922 if (tb[IFLA_GSO_MAX_SEGS]) {
2923 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2924
2925 if (dev->gso_max_segs ^ max_segs) {
2926 netif_set_gso_max_segs(dev, max_segs);
2927 status |= DO_SETLINK_MODIFIED;
2928 }
2929 }
2930
2931 if (tb[IFLA_GRO_MAX_SIZE]) {
2932 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2933
2934 if (dev->gro_max_size ^ gro_max_size) {
2935 netif_set_gro_max_size(dev, gro_max_size);
2936 status |= DO_SETLINK_MODIFIED;
2937 }
2938 }
2939
2940 if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
2941 u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
2942
2943 if (dev->gso_ipv4_max_size ^ max_size) {
2944 netif_set_gso_ipv4_max_size(dev, max_size);
2945 status |= DO_SETLINK_MODIFIED;
2946 }
2947 }
2948
2949 if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
2950 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
2951
2952 if (dev->gro_ipv4_max_size ^ gro_max_size) {
2953 netif_set_gro_ipv4_max_size(dev, gro_max_size);
2954 status |= DO_SETLINK_MODIFIED;
2955 }
2956 }
2957
2958 if (tb[IFLA_OPERSTATE])
2959 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2960
2961 if (tb[IFLA_LINKMODE]) {
2962 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2963
2964 if (dev->link_mode ^ value)
2965 status |= DO_SETLINK_NOTIFY;
2966 WRITE_ONCE(dev->link_mode, value);
2967 }
2968
2969 if (tb[IFLA_VFINFO_LIST]) {
2970 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2971 struct nlattr *attr;
2972 int rem;
2973
2974 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2975 if (nla_type(attr) != IFLA_VF_INFO ||
2976 nla_len(attr) < NLA_HDRLEN) {
2977 err = -EINVAL;
2978 goto errout;
2979 }
2980 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2981 attr,
2982 ifla_vf_policy,
2983 NULL);
2984 if (err < 0)
2985 goto errout;
2986 err = do_setvfinfo(dev, vfinfo);
2987 if (err < 0)
2988 goto errout;
2989 status |= DO_SETLINK_NOTIFY;
2990 }
2991 }
2992 err = 0;
2993
2994 if (tb[IFLA_VF_PORTS]) {
2995 struct nlattr *port[IFLA_PORT_MAX+1];
2996 struct nlattr *attr;
2997 int vf;
2998 int rem;
2999
3000 err = -EOPNOTSUPP;
3001 if (!ops->ndo_set_vf_port)
3002 goto errout;
3003
3004 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
3005 if (nla_type(attr) != IFLA_VF_PORT ||
3006 nla_len(attr) < NLA_HDRLEN) {
3007 err = -EINVAL;
3008 goto errout;
3009 }
3010 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3011 attr,
3012 ifla_port_policy,
3013 NULL);
3014 if (err < 0)
3015 goto errout;
3016 if (!port[IFLA_PORT_VF]) {
3017 err = -EOPNOTSUPP;
3018 goto errout;
3019 }
3020 vf = nla_get_u32(port[IFLA_PORT_VF]);
3021 err = ops->ndo_set_vf_port(dev, vf, port);
3022 if (err < 0)
3023 goto errout;
3024 status |= DO_SETLINK_NOTIFY;
3025 }
3026 }
3027 err = 0;
3028
3029 if (tb[IFLA_PORT_SELF]) {
3030 struct nlattr *port[IFLA_PORT_MAX+1];
3031
3032 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
3033 tb[IFLA_PORT_SELF],
3034 ifla_port_policy, NULL);
3035 if (err < 0)
3036 goto errout;
3037
3038 err = -EOPNOTSUPP;
3039 if (ops->ndo_set_vf_port)
3040 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
3041 if (err < 0)
3042 goto errout;
3043 status |= DO_SETLINK_NOTIFY;
3044 }
3045
3046 if (tb[IFLA_AF_SPEC]) {
3047 struct nlattr *af;
3048 int rem;
3049
3050 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
3051 const struct rtnl_af_ops *af_ops;
3052
3053 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
3054
3055 err = af_ops->set_link_af(dev, af, extack);
3056 if (err < 0)
3057 goto errout;
3058
3059 status |= DO_SETLINK_NOTIFY;
3060 }
3061 }
3062 err = 0;
3063
3064 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
3065 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
3066 tb[IFLA_PROTO_DOWN_REASON], extack);
3067 if (err)
3068 goto errout;
3069 status |= DO_SETLINK_NOTIFY;
3070 }
3071
3072 if (tb[IFLA_XDP]) {
3073 struct nlattr *xdp[IFLA_XDP_MAX + 1];
3074 u32 xdp_flags = 0;
3075
3076 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3077 tb[IFLA_XDP],
3078 ifla_xdp_policy, NULL);
3079 if (err < 0)
3080 goto errout;
3081
3082 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3083 err = -EINVAL;
3084 goto errout;
3085 }
3086
3087 if (xdp[IFLA_XDP_FLAGS]) {
3088 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3089 if (xdp_flags & ~XDP_FLAGS_MASK) {
3090 err = -EINVAL;
3091 goto errout;
3092 }
3093 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3094 err = -EINVAL;
3095 goto errout;
3096 }
3097 }
3098
3099 if (xdp[IFLA_XDP_FD]) {
3100 int expected_fd = -1;
3101
3102 if (xdp_flags & XDP_FLAGS_REPLACE) {
3103 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3104 err = -EINVAL;
3105 goto errout;
3106 }
3107 expected_fd =
3108 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3109 }
3110
3111 err = dev_change_xdp_fd(dev, extack,
3112 nla_get_s32(xdp[IFLA_XDP_FD]),
3113 expected_fd,
3114 xdp_flags);
3115 if (err)
3116 goto errout;
3117 status |= DO_SETLINK_NOTIFY;
3118 }
3119 }
3120
3121errout:
3122 if (status & DO_SETLINK_MODIFIED) {
3123 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3124 netdev_state_change(dev);
3125
3126 if (err < 0)
3127 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3128 dev->name);
3129 }
3130
3131 return err;
3132}
3133
3134static struct net_device *rtnl_dev_get(struct net *net,
3135 struct nlattr *tb[])
3136{
3137 char ifname[ALTIFNAMSIZ];
3138
3139 if (tb[IFLA_IFNAME])
3140 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3141 else if (tb[IFLA_ALT_IFNAME])
3142 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3143 else
3144 return NULL;
3145
3146 return __dev_get_by_name(net, ifname);
3147}
3148
3149static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3150 struct netlink_ext_ack *extack)
3151{
3152 struct net *net = sock_net(skb->sk);
3153 struct ifinfomsg *ifm;
3154 struct net_device *dev;
3155 int err;
3156 struct nlattr *tb[IFLA_MAX+1];
3157
3158 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3159 ifla_policy, extack);
3160 if (err < 0)
3161 goto errout;
3162
3163 err = rtnl_ensure_unique_netns(tb, extack, false);
3164 if (err < 0)
3165 goto errout;
3166
3167 err = -EINVAL;
3168 ifm = nlmsg_data(nlh);
3169 if (ifm->ifi_index > 0)
3170 dev = __dev_get_by_index(net, ifm->ifi_index);
3171 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3172 dev = rtnl_dev_get(net, tb);
3173 else
3174 goto errout;
3175
3176 if (dev == NULL) {
3177 err = -ENODEV;
3178 goto errout;
3179 }
3180
3181 err = validate_linkmsg(dev, tb, extack);
3182 if (err < 0)
3183 goto errout;
3184
3185 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3186errout:
3187 return err;
3188}
3189
3190static int rtnl_group_dellink(const struct net *net, int group)
3191{
3192 struct net_device *dev, *aux;
3193 LIST_HEAD(list_kill);
3194 bool found = false;
3195
3196 if (!group)
3197 return -EPERM;
3198
3199 for_each_netdev(net, dev) {
3200 if (dev->group == group) {
3201 const struct rtnl_link_ops *ops;
3202
3203 found = true;
3204 ops = dev->rtnl_link_ops;
3205 if (!ops || !ops->dellink)
3206 return -EOPNOTSUPP;
3207 }
3208 }
3209
3210 if (!found)
3211 return -ENODEV;
3212
3213 for_each_netdev_safe(net, dev, aux) {
3214 if (dev->group == group) {
3215 const struct rtnl_link_ops *ops;
3216
3217 ops = dev->rtnl_link_ops;
3218 ops->dellink(dev, &list_kill);
3219 }
3220 }
3221 unregister_netdevice_many(&list_kill);
3222
3223 return 0;
3224}
3225
3226int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3227{
3228 const struct rtnl_link_ops *ops;
3229 LIST_HEAD(list_kill);
3230
3231 ops = dev->rtnl_link_ops;
3232 if (!ops || !ops->dellink)
3233 return -EOPNOTSUPP;
3234
3235 ops->dellink(dev, &list_kill);
3236 unregister_netdevice_many_notify(&list_kill, portid, nlh);
3237
3238 return 0;
3239}
3240EXPORT_SYMBOL_GPL(rtnl_delete_link);
3241
3242static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3243 struct netlink_ext_ack *extack)
3244{
3245 struct net *net = sock_net(skb->sk);
3246 u32 portid = NETLINK_CB(skb).portid;
3247 struct net *tgt_net = net;
3248 struct net_device *dev = NULL;
3249 struct ifinfomsg *ifm;
3250 struct nlattr *tb[IFLA_MAX+1];
3251 int err;
3252 int netnsid = -1;
3253
3254 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3255 ifla_policy, extack);
3256 if (err < 0)
3257 return err;
3258
3259 err = rtnl_ensure_unique_netns(tb, extack, true);
3260 if (err < 0)
3261 return err;
3262
3263 if (tb[IFLA_TARGET_NETNSID]) {
3264 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3265 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3266 if (IS_ERR(tgt_net))
3267 return PTR_ERR(tgt_net);
3268 }
3269
3270 err = -EINVAL;
3271 ifm = nlmsg_data(nlh);
3272 if (ifm->ifi_index > 0)
3273 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3274 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3275 dev = rtnl_dev_get(net, tb);
3276 else if (tb[IFLA_GROUP])
3277 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3278 else
3279 goto out;
3280
3281 if (!dev) {
3282 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3283 err = -ENODEV;
3284
3285 goto out;
3286 }
3287
3288 err = rtnl_delete_link(dev, portid, nlh);
3289
3290out:
3291 if (netnsid >= 0)
3292 put_net(tgt_net);
3293
3294 return err;
3295}
3296
3297int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3298 u32 portid, const struct nlmsghdr *nlh)
3299{
3300 unsigned int old_flags;
3301 int err;
3302
3303 old_flags = dev->flags;
3304 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3305 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3306 NULL);
3307 if (err < 0)
3308 return err;
3309 }
3310
3311 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3312 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3313 } else {
3314 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3315 __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3316 }
3317 return 0;
3318}
3319EXPORT_SYMBOL(rtnl_configure_link);
3320
3321struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3322 unsigned char name_assign_type,
3323 const struct rtnl_link_ops *ops,
3324 struct nlattr *tb[],
3325 struct netlink_ext_ack *extack)
3326{
3327 struct net_device *dev;
3328 unsigned int num_tx_queues = 1;
3329 unsigned int num_rx_queues = 1;
3330 int err;
3331
3332 if (tb[IFLA_NUM_TX_QUEUES])
3333 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3334 else if (ops->get_num_tx_queues)
3335 num_tx_queues = ops->get_num_tx_queues();
3336
3337 if (tb[IFLA_NUM_RX_QUEUES])
3338 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3339 else if (ops->get_num_rx_queues)
3340 num_rx_queues = ops->get_num_rx_queues();
3341
3342 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3343 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3344 return ERR_PTR(-EINVAL);
3345 }
3346
3347 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3348 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3349 return ERR_PTR(-EINVAL);
3350 }
3351
3352 if (ops->alloc) {
3353 dev = ops->alloc(tb, ifname, name_assign_type,
3354 num_tx_queues, num_rx_queues);
3355 if (IS_ERR(dev))
3356 return dev;
3357 } else {
3358 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3359 name_assign_type, ops->setup,
3360 num_tx_queues, num_rx_queues);
3361 }
3362
3363 if (!dev)
3364 return ERR_PTR(-ENOMEM);
3365
3366 err = validate_linkmsg(dev, tb, extack);
3367 if (err < 0) {
3368 free_netdev(dev);
3369 return ERR_PTR(err);
3370 }
3371
3372 dev_net_set(dev, net);
3373 dev->rtnl_link_ops = ops;
3374 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3375
3376 if (tb[IFLA_MTU]) {
3377 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3378
3379 err = dev_validate_mtu(dev, mtu, extack);
3380 if (err) {
3381 free_netdev(dev);
3382 return ERR_PTR(err);
3383 }
3384 dev->mtu = mtu;
3385 }
3386 if (tb[IFLA_ADDRESS]) {
3387 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3388 nla_len(tb[IFLA_ADDRESS]));
3389 dev->addr_assign_type = NET_ADDR_SET;
3390 }
3391 if (tb[IFLA_BROADCAST])
3392 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3393 nla_len(tb[IFLA_BROADCAST]));
3394 if (tb[IFLA_TXQLEN])
3395 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3396 if (tb[IFLA_OPERSTATE])
3397 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3398 if (tb[IFLA_LINKMODE])
3399 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3400 if (tb[IFLA_GROUP])
3401 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3402 if (tb[IFLA_GSO_MAX_SIZE])
3403 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3404 if (tb[IFLA_GSO_MAX_SEGS])
3405 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3406 if (tb[IFLA_GRO_MAX_SIZE])
3407 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3408 if (tb[IFLA_GSO_IPV4_MAX_SIZE])
3409 netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
3410 if (tb[IFLA_GRO_IPV4_MAX_SIZE])
3411 netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
3412
3413 return dev;
3414}
3415EXPORT_SYMBOL(rtnl_create_link);
3416
3417static int rtnl_group_changelink(const struct sk_buff *skb,
3418 struct net *net, int group,
3419 struct ifinfomsg *ifm,
3420 struct netlink_ext_ack *extack,
3421 struct nlattr **tb)
3422{
3423 struct net_device *dev, *aux;
3424 int err;
3425
3426 for_each_netdev_safe(net, dev, aux) {
3427 if (dev->group == group) {
3428 err = validate_linkmsg(dev, tb, extack);
3429 if (err < 0)
3430 return err;
3431 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3432 if (err < 0)
3433 return err;
3434 }
3435 }
3436
3437 return 0;
3438}
3439
3440static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3441 const struct rtnl_link_ops *ops,
3442 const struct nlmsghdr *nlh,
3443 struct nlattr **tb, struct nlattr **data,
3444 struct netlink_ext_ack *extack)
3445{
3446 unsigned char name_assign_type = NET_NAME_USER;
3447 struct net *net = sock_net(skb->sk);
3448 u32 portid = NETLINK_CB(skb).portid;
3449 struct net *dest_net, *link_net;
3450 struct net_device *dev;
3451 char ifname[IFNAMSIZ];
3452 int err;
3453
3454 if (!ops->alloc && !ops->setup)
3455 return -EOPNOTSUPP;
3456
3457 if (tb[IFLA_IFNAME]) {
3458 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3459 } else {
3460 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3461 name_assign_type = NET_NAME_ENUM;
3462 }
3463
3464 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3465 if (IS_ERR(dest_net))
3466 return PTR_ERR(dest_net);
3467
3468 if (tb[IFLA_LINK_NETNSID]) {
3469 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3470
3471 link_net = get_net_ns_by_id(dest_net, id);
3472 if (!link_net) {
3473 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3474 err = -EINVAL;
3475 goto out;
3476 }
3477 err = -EPERM;
3478 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3479 goto out;
3480 } else {
3481 link_net = NULL;
3482 }
3483
3484 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3485 name_assign_type, ops, tb, extack);
3486 if (IS_ERR(dev)) {
3487 err = PTR_ERR(dev);
3488 goto out;
3489 }
3490
3491 dev->ifindex = ifm->ifi_index;
3492
3493 if (ops->newlink)
3494 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3495 else
3496 err = register_netdevice(dev);
3497 if (err < 0) {
3498 free_netdev(dev);
3499 goto out;
3500 }
3501
3502 err = rtnl_configure_link(dev, ifm, portid, nlh);
3503 if (err < 0)
3504 goto out_unregister;
3505 if (link_net) {
3506 err = dev_change_net_namespace(dev, dest_net, ifname);
3507 if (err < 0)
3508 goto out_unregister;
3509 }
3510 if (tb[IFLA_MASTER]) {
3511 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3512 if (err)
3513 goto out_unregister;
3514 }
3515out:
3516 if (link_net)
3517 put_net(link_net);
3518 put_net(dest_net);
3519 return err;
3520out_unregister:
3521 if (ops->newlink) {
3522 LIST_HEAD(list_kill);
3523
3524 ops->dellink(dev, &list_kill);
3525 unregister_netdevice_many(&list_kill);
3526 } else {
3527 unregister_netdevice(dev);
3528 }
3529 goto out;
3530}
3531
3532struct rtnl_newlink_tbs {
3533 struct nlattr *tb[IFLA_MAX + 1];
3534 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3535 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3536};
3537
3538static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3539 struct rtnl_newlink_tbs *tbs,
3540 struct netlink_ext_ack *extack)
3541{
3542 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3543 struct nlattr ** const tb = tbs->tb;
3544 const struct rtnl_link_ops *m_ops;
3545 struct net_device *master_dev;
3546 struct net *net = sock_net(skb->sk);
3547 const struct rtnl_link_ops *ops;
3548 struct nlattr **slave_data;
3549 char kind[MODULE_NAME_LEN];
3550 struct net_device *dev;
3551 struct ifinfomsg *ifm;
3552 struct nlattr **data;
3553 bool link_specified;
3554 int err;
3555
3556#ifdef CONFIG_MODULES
3557replay:
3558#endif
3559 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3560 ifla_policy, extack);
3561 if (err < 0)
3562 return err;
3563
3564 err = rtnl_ensure_unique_netns(tb, extack, false);
3565 if (err < 0)
3566 return err;
3567
3568 ifm = nlmsg_data(nlh);
3569 if (ifm->ifi_index > 0) {
3570 link_specified = true;
3571 dev = __dev_get_by_index(net, ifm->ifi_index);
3572 } else if (ifm->ifi_index < 0) {
3573 NL_SET_ERR_MSG(extack, "ifindex can't be negative");
3574 return -EINVAL;
3575 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3576 link_specified = true;
3577 dev = rtnl_dev_get(net, tb);
3578 } else {
3579 link_specified = false;
3580 dev = NULL;
3581 }
3582
3583 master_dev = NULL;
3584 m_ops = NULL;
3585 if (dev) {
3586 master_dev = netdev_master_upper_dev_get(dev);
3587 if (master_dev)
3588 m_ops = master_dev->rtnl_link_ops;
3589 }
3590
3591 if (tb[IFLA_LINKINFO]) {
3592 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3593 tb[IFLA_LINKINFO],
3594 ifla_info_policy, NULL);
3595 if (err < 0)
3596 return err;
3597 } else
3598 memset(linkinfo, 0, sizeof(linkinfo));
3599
3600 if (linkinfo[IFLA_INFO_KIND]) {
3601 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3602 ops = rtnl_link_ops_get(kind);
3603 } else {
3604 kind[0] = '\0';
3605 ops = NULL;
3606 }
3607
3608 data = NULL;
3609 if (ops) {
3610 if (ops->maxtype > RTNL_MAX_TYPE)
3611 return -EINVAL;
3612
3613 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3614 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3615 linkinfo[IFLA_INFO_DATA],
3616 ops->policy, extack);
3617 if (err < 0)
3618 return err;
3619 data = tbs->attr;
3620 }
3621 if (ops->validate) {
3622 err = ops->validate(tb, data, extack);
3623 if (err < 0)
3624 return err;
3625 }
3626 }
3627
3628 slave_data = NULL;
3629 if (m_ops) {
3630 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3631 return -EINVAL;
3632
3633 if (m_ops->slave_maxtype &&
3634 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3635 err = nla_parse_nested_deprecated(tbs->slave_attr,
3636 m_ops->slave_maxtype,
3637 linkinfo[IFLA_INFO_SLAVE_DATA],
3638 m_ops->slave_policy,
3639 extack);
3640 if (err < 0)
3641 return err;
3642 slave_data = tbs->slave_attr;
3643 }
3644 }
3645
3646 if (dev) {
3647 int status = 0;
3648
3649 if (nlh->nlmsg_flags & NLM_F_EXCL)
3650 return -EEXIST;
3651 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3652 return -EOPNOTSUPP;
3653
3654 err = validate_linkmsg(dev, tb, extack);
3655 if (err < 0)
3656 return err;
3657
3658 if (linkinfo[IFLA_INFO_DATA]) {
3659 if (!ops || ops != dev->rtnl_link_ops ||
3660 !ops->changelink)
3661 return -EOPNOTSUPP;
3662
3663 err = ops->changelink(dev, tb, data, extack);
3664 if (err < 0)
3665 return err;
3666 status |= DO_SETLINK_NOTIFY;
3667 }
3668
3669 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3670 if (!m_ops || !m_ops->slave_changelink)
3671 return -EOPNOTSUPP;
3672
3673 err = m_ops->slave_changelink(master_dev, dev, tb,
3674 slave_data, extack);
3675 if (err < 0)
3676 return err;
3677 status |= DO_SETLINK_NOTIFY;
3678 }
3679
3680 return do_setlink(skb, dev, ifm, extack, tb, status);
3681 }
3682
3683 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3684 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3685 * or it's for a group
3686 */
3687 if (link_specified)
3688 return -ENODEV;
3689 if (tb[IFLA_GROUP])
3690 return rtnl_group_changelink(skb, net,
3691 nla_get_u32(tb[IFLA_GROUP]),
3692 ifm, extack, tb);
3693 return -ENODEV;
3694 }
3695
3696 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3697 return -EOPNOTSUPP;
3698
3699 if (!ops) {
3700#ifdef CONFIG_MODULES
3701 if (kind[0]) {
3702 __rtnl_unlock();
3703 request_module("rtnl-link-%s", kind);
3704 rtnl_lock();
3705 ops = rtnl_link_ops_get(kind);
3706 if (ops)
3707 goto replay;
3708 }
3709#endif
3710 NL_SET_ERR_MSG(extack, "Unknown device type");
3711 return -EOPNOTSUPP;
3712 }
3713
3714 return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
3715}
3716
3717static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3718 struct netlink_ext_ack *extack)
3719{
3720 struct rtnl_newlink_tbs *tbs;
3721 int ret;
3722
3723 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3724 if (!tbs)
3725 return -ENOMEM;
3726
3727 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3728 kfree(tbs);
3729 return ret;
3730}
3731
3732static int rtnl_valid_getlink_req(struct sk_buff *skb,
3733 const struct nlmsghdr *nlh,
3734 struct nlattr **tb,
3735 struct netlink_ext_ack *extack)
3736{
3737 struct ifinfomsg *ifm;
3738 int i, err;
3739
3740 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3741 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3742 return -EINVAL;
3743 }
3744
3745 if (!netlink_strict_get_check(skb))
3746 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3747 ifla_policy, extack);
3748
3749 ifm = nlmsg_data(nlh);
3750 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3751 ifm->ifi_change) {
3752 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3753 return -EINVAL;
3754 }
3755
3756 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3757 ifla_policy, extack);
3758 if (err)
3759 return err;
3760
3761 for (i = 0; i <= IFLA_MAX; i++) {
3762 if (!tb[i])
3763 continue;
3764
3765 switch (i) {
3766 case IFLA_IFNAME:
3767 case IFLA_ALT_IFNAME:
3768 case IFLA_EXT_MASK:
3769 case IFLA_TARGET_NETNSID:
3770 break;
3771 default:
3772 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3773 return -EINVAL;
3774 }
3775 }
3776
3777 return 0;
3778}
3779
3780static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3781 struct netlink_ext_ack *extack)
3782{
3783 struct net *net = sock_net(skb->sk);
3784 struct net *tgt_net = net;
3785 struct ifinfomsg *ifm;
3786 struct nlattr *tb[IFLA_MAX+1];
3787 struct net_device *dev = NULL;
3788 struct sk_buff *nskb;
3789 int netnsid = -1;
3790 int err;
3791 u32 ext_filter_mask = 0;
3792
3793 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3794 if (err < 0)
3795 return err;
3796
3797 err = rtnl_ensure_unique_netns(tb, extack, true);
3798 if (err < 0)
3799 return err;
3800
3801 if (tb[IFLA_TARGET_NETNSID]) {
3802 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3803 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3804 if (IS_ERR(tgt_net))
3805 return PTR_ERR(tgt_net);
3806 }
3807
3808 if (tb[IFLA_EXT_MASK])
3809 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3810
3811 err = -EINVAL;
3812 ifm = nlmsg_data(nlh);
3813 if (ifm->ifi_index > 0)
3814 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3815 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3816 dev = rtnl_dev_get(tgt_net, tb);
3817 else
3818 goto out;
3819
3820 err = -ENODEV;
3821 if (dev == NULL)
3822 goto out;
3823
3824 err = -ENOBUFS;
3825 nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask));
3826 if (nskb == NULL)
3827 goto out;
3828
3829 /* Synchronize the carrier state so we don't report a state
3830 * that we're not actually going to honour immediately; if
3831 * the driver just did a carrier off->on transition, we can
3832 * only TX if link watch work has run, but without this we'd
3833 * already report carrier on, even if it doesn't work yet.
3834 */
3835 linkwatch_sync_dev(dev);
3836
3837 err = rtnl_fill_ifinfo(nskb, dev, net,
3838 RTM_NEWLINK, NETLINK_CB(skb).portid,
3839 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3840 0, NULL, 0, netnsid, GFP_KERNEL);
3841 if (err < 0) {
3842 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3843 WARN_ON(err == -EMSGSIZE);
3844 kfree_skb(nskb);
3845 } else
3846 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3847out:
3848 if (netnsid >= 0)
3849 put_net(tgt_net);
3850
3851 return err;
3852}
3853
3854static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3855 bool *changed, struct netlink_ext_ack *extack)
3856{
3857 char *alt_ifname;
3858 size_t size;
3859 int err;
3860
3861 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3862 if (err)
3863 return err;
3864
3865 if (cmd == RTM_NEWLINKPROP) {
3866 size = rtnl_prop_list_size(dev);
3867 size += nla_total_size(ALTIFNAMSIZ);
3868 if (size >= U16_MAX) {
3869 NL_SET_ERR_MSG(extack,
3870 "effective property list too long");
3871 return -EINVAL;
3872 }
3873 }
3874
3875 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3876 if (!alt_ifname)
3877 return -ENOMEM;
3878
3879 if (cmd == RTM_NEWLINKPROP) {
3880 err = netdev_name_node_alt_create(dev, alt_ifname);
3881 if (!err)
3882 alt_ifname = NULL;
3883 } else if (cmd == RTM_DELLINKPROP) {
3884 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3885 } else {
3886 WARN_ON_ONCE(1);
3887 err = -EINVAL;
3888 }
3889
3890 kfree(alt_ifname);
3891 if (!err)
3892 *changed = true;
3893 return err;
3894}
3895
3896static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3897 struct netlink_ext_ack *extack)
3898{
3899 struct net *net = sock_net(skb->sk);
3900 struct nlattr *tb[IFLA_MAX + 1];
3901 struct net_device *dev;
3902 struct ifinfomsg *ifm;
3903 bool changed = false;
3904 struct nlattr *attr;
3905 int err, rem;
3906
3907 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3908 if (err)
3909 return err;
3910
3911 err = rtnl_ensure_unique_netns(tb, extack, true);
3912 if (err)
3913 return err;
3914
3915 ifm = nlmsg_data(nlh);
3916 if (ifm->ifi_index > 0)
3917 dev = __dev_get_by_index(net, ifm->ifi_index);
3918 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3919 dev = rtnl_dev_get(net, tb);
3920 else
3921 return -EINVAL;
3922
3923 if (!dev)
3924 return -ENODEV;
3925
3926 if (!tb[IFLA_PROP_LIST])
3927 return 0;
3928
3929 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3930 switch (nla_type(attr)) {
3931 case IFLA_ALT_IFNAME:
3932 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3933 if (err)
3934 return err;
3935 break;
3936 }
3937 }
3938
3939 if (changed)
3940 netdev_state_change(dev);
3941 return 0;
3942}
3943
3944static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3945 struct netlink_ext_ack *extack)
3946{
3947 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3948}
3949
3950static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3951 struct netlink_ext_ack *extack)
3952{
3953 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3954}
3955
3956static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3957{
3958 struct net *net = sock_net(skb->sk);
3959 size_t min_ifinfo_dump_size = 0;
3960 struct nlattr *tb[IFLA_MAX+1];
3961 u32 ext_filter_mask = 0;
3962 struct net_device *dev;
3963 int hdrlen;
3964
3965 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3966 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3967 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3968
3969 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3970 if (tb[IFLA_EXT_MASK])
3971 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3972 }
3973
3974 if (!ext_filter_mask)
3975 return NLMSG_GOODSIZE;
3976 /*
3977 * traverse the list of net devices and compute the minimum
3978 * buffer size based upon the filter mask.
3979 */
3980 rcu_read_lock();
3981 for_each_netdev_rcu(net, dev) {
3982 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3983 if_nlmsg_size(dev, ext_filter_mask));
3984 }
3985 rcu_read_unlock();
3986
3987 return nlmsg_total_size(min_ifinfo_dump_size);
3988}
3989
3990static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3991{
3992 int idx;
3993 int s_idx = cb->family;
3994 int type = cb->nlh->nlmsg_type - RTM_BASE;
3995 int ret = 0;
3996
3997 if (s_idx == 0)
3998 s_idx = 1;
3999
4000 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
4001 struct rtnl_link __rcu **tab;
4002 struct rtnl_link *link;
4003 rtnl_dumpit_func dumpit;
4004
4005 if (idx < s_idx || idx == PF_PACKET)
4006 continue;
4007
4008 if (type < 0 || type >= RTM_NR_MSGTYPES)
4009 continue;
4010
4011 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
4012 if (!tab)
4013 continue;
4014
4015 link = rcu_dereference_rtnl(tab[type]);
4016 if (!link)
4017 continue;
4018
4019 dumpit = link->dumpit;
4020 if (!dumpit)
4021 continue;
4022
4023 if (idx > s_idx) {
4024 memset(&cb->args[0], 0, sizeof(cb->args));
4025 cb->prev_seq = 0;
4026 cb->seq = 0;
4027 }
4028 ret = dumpit(skb, cb);
4029 if (ret)
4030 break;
4031 }
4032 cb->family = idx;
4033
4034 return skb->len ? : ret;
4035}
4036
4037struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
4038 unsigned int change,
4039 u32 event, gfp_t flags, int *new_nsid,
4040 int new_ifindex, u32 portid,
4041 const struct nlmsghdr *nlh)
4042{
4043 struct net *net = dev_net(dev);
4044 struct sk_buff *skb;
4045 int err = -ENOBUFS;
4046 u32 seq = 0;
4047
4048 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
4049 if (skb == NULL)
4050 goto errout;
4051
4052 if (nlmsg_report(nlh))
4053 seq = nlmsg_seq(nlh);
4054 else
4055 portid = 0;
4056
4057 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
4058 type, portid, seq, change, 0, 0, event,
4059 new_nsid, new_ifindex, -1, flags);
4060 if (err < 0) {
4061 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
4062 WARN_ON(err == -EMSGSIZE);
4063 kfree_skb(skb);
4064 goto errout;
4065 }
4066 return skb;
4067errout:
4068 if (err < 0)
4069 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4070 return NULL;
4071}
4072
4073void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
4074 u32 portid, const struct nlmsghdr *nlh)
4075{
4076 struct net *net = dev_net(dev);
4077
4078 rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
4079}
4080
4081static void rtmsg_ifinfo_event(int type, struct net_device *dev,
4082 unsigned int change, u32 event,
4083 gfp_t flags, int *new_nsid, int new_ifindex,
4084 u32 portid, const struct nlmsghdr *nlh)
4085{
4086 struct sk_buff *skb;
4087
4088 if (dev->reg_state != NETREG_REGISTERED)
4089 return;
4090
4091 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
4092 new_ifindex, portid, nlh);
4093 if (skb)
4094 rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
4095}
4096
4097void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
4098 gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
4099{
4100 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4101 NULL, 0, portid, nlh);
4102}
4103
4104void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4105 gfp_t flags, int *new_nsid, int new_ifindex)
4106{
4107 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4108 new_nsid, new_ifindex, 0, NULL);
4109}
4110
4111static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4112 struct net_device *dev,
4113 u8 *addr, u16 vid, u32 pid, u32 seq,
4114 int type, unsigned int flags,
4115 int nlflags, u16 ndm_state)
4116{
4117 struct nlmsghdr *nlh;
4118 struct ndmsg *ndm;
4119
4120 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4121 if (!nlh)
4122 return -EMSGSIZE;
4123
4124 ndm = nlmsg_data(nlh);
4125 ndm->ndm_family = AF_BRIDGE;
4126 ndm->ndm_pad1 = 0;
4127 ndm->ndm_pad2 = 0;
4128 ndm->ndm_flags = flags;
4129 ndm->ndm_type = 0;
4130 ndm->ndm_ifindex = dev->ifindex;
4131 ndm->ndm_state = ndm_state;
4132
4133 if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
4134 goto nla_put_failure;
4135 if (vid)
4136 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4137 goto nla_put_failure;
4138
4139 nlmsg_end(skb, nlh);
4140 return 0;
4141
4142nla_put_failure:
4143 nlmsg_cancel(skb, nlh);
4144 return -EMSGSIZE;
4145}
4146
4147static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
4148{
4149 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4150 nla_total_size(dev->addr_len) + /* NDA_LLADDR */
4151 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4152 0;
4153}
4154
4155static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4156 u16 ndm_state)
4157{
4158 struct net *net = dev_net(dev);
4159 struct sk_buff *skb;
4160 int err = -ENOBUFS;
4161
4162 skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
4163 if (!skb)
4164 goto errout;
4165
4166 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4167 0, 0, type, NTF_SELF, 0, ndm_state);
4168 if (err < 0) {
4169 kfree_skb(skb);
4170 goto errout;
4171 }
4172
4173 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4174 return;
4175errout:
4176 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4177}
4178
4179/*
4180 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4181 */
4182int ndo_dflt_fdb_add(struct ndmsg *ndm,
4183 struct nlattr *tb[],
4184 struct net_device *dev,
4185 const unsigned char *addr, u16 vid,
4186 u16 flags)
4187{
4188 int err = -EINVAL;
4189
4190 /* If aging addresses are supported device will need to
4191 * implement its own handler for this.
4192 */
4193 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4194 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4195 return err;
4196 }
4197
4198 if (tb[NDA_FLAGS_EXT]) {
4199 netdev_info(dev, "invalid flags given to default FDB implementation\n");
4200 return err;
4201 }
4202
4203 if (vid) {
4204 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4205 return err;
4206 }
4207
4208 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4209 err = dev_uc_add_excl(dev, addr);
4210 else if (is_multicast_ether_addr(addr))
4211 err = dev_mc_add_excl(dev, addr);
4212
4213 /* Only return duplicate errors if NLM_F_EXCL is set */
4214 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4215 err = 0;
4216
4217 return err;
4218}
4219EXPORT_SYMBOL(ndo_dflt_fdb_add);
4220
4221static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4222 struct netlink_ext_ack *extack)
4223{
4224 u16 vid = 0;
4225
4226 if (vlan_attr) {
4227 if (nla_len(vlan_attr) != sizeof(u16)) {
4228 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4229 return -EINVAL;
4230 }
4231
4232 vid = nla_get_u16(vlan_attr);
4233
4234 if (!vid || vid >= VLAN_VID_MASK) {
4235 NL_SET_ERR_MSG(extack, "invalid vlan id");
4236 return -EINVAL;
4237 }
4238 }
4239 *p_vid = vid;
4240 return 0;
4241}
4242
4243static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4244 struct netlink_ext_ack *extack)
4245{
4246 struct net *net = sock_net(skb->sk);
4247 struct ndmsg *ndm;
4248 struct nlattr *tb[NDA_MAX+1];
4249 struct net_device *dev;
4250 u8 *addr;
4251 u16 vid;
4252 int err;
4253
4254 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4255 extack);
4256 if (err < 0)
4257 return err;
4258
4259 ndm = nlmsg_data(nlh);
4260 if (ndm->ndm_ifindex == 0) {
4261 NL_SET_ERR_MSG(extack, "invalid ifindex");
4262 return -EINVAL;
4263 }
4264
4265 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4266 if (dev == NULL) {
4267 NL_SET_ERR_MSG(extack, "unknown ifindex");
4268 return -ENODEV;
4269 }
4270
4271 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4272 NL_SET_ERR_MSG(extack, "invalid address");
4273 return -EINVAL;
4274 }
4275
4276 if (dev->type != ARPHRD_ETHER) {
4277 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4278 return -EINVAL;
4279 }
4280
4281 addr = nla_data(tb[NDA_LLADDR]);
4282
4283 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4284 if (err)
4285 return err;
4286
4287 err = -EOPNOTSUPP;
4288
4289 /* Support fdb on master device the net/bridge default case */
4290 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4291 netif_is_bridge_port(dev)) {
4292 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4293 const struct net_device_ops *ops = br_dev->netdev_ops;
4294
4295 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4296 nlh->nlmsg_flags, extack);
4297 if (err)
4298 goto out;
4299 else
4300 ndm->ndm_flags &= ~NTF_MASTER;
4301 }
4302
4303 /* Embedded bridge, macvlan, and any other device support */
4304 if ((ndm->ndm_flags & NTF_SELF)) {
4305 if (dev->netdev_ops->ndo_fdb_add)
4306 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4307 vid,
4308 nlh->nlmsg_flags,
4309 extack);
4310 else
4311 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4312 nlh->nlmsg_flags);
4313
4314 if (!err) {
4315 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4316 ndm->ndm_state);
4317 ndm->ndm_flags &= ~NTF_SELF;
4318 }
4319 }
4320out:
4321 return err;
4322}
4323
4324/*
4325 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4326 */
4327int ndo_dflt_fdb_del(struct ndmsg *ndm,
4328 struct nlattr *tb[],
4329 struct net_device *dev,
4330 const unsigned char *addr, u16 vid)
4331{
4332 int err = -EINVAL;
4333
4334 /* If aging addresses are supported device will need to
4335 * implement its own handler for this.
4336 */
4337 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4338 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4339 return err;
4340 }
4341
4342 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4343 err = dev_uc_del(dev, addr);
4344 else if (is_multicast_ether_addr(addr))
4345 err = dev_mc_del(dev, addr);
4346
4347 return err;
4348}
4349EXPORT_SYMBOL(ndo_dflt_fdb_del);
4350
4351static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4352 struct netlink_ext_ack *extack)
4353{
4354 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4355 struct net *net = sock_net(skb->sk);
4356 const struct net_device_ops *ops;
4357 struct ndmsg *ndm;
4358 struct nlattr *tb[NDA_MAX+1];
4359 struct net_device *dev;
4360 __u8 *addr = NULL;
4361 int err;
4362 u16 vid;
4363
4364 if (!netlink_capable(skb, CAP_NET_ADMIN))
4365 return -EPERM;
4366
4367 if (!del_bulk) {
4368 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4369 NULL, extack);
4370 } else {
4371 /* For bulk delete, the drivers will parse the message with
4372 * policy.
4373 */
4374 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
4375 }
4376 if (err < 0)
4377 return err;
4378
4379 ndm = nlmsg_data(nlh);
4380 if (ndm->ndm_ifindex == 0) {
4381 NL_SET_ERR_MSG(extack, "invalid ifindex");
4382 return -EINVAL;
4383 }
4384
4385 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4386 if (dev == NULL) {
4387 NL_SET_ERR_MSG(extack, "unknown ifindex");
4388 return -ENODEV;
4389 }
4390
4391 if (!del_bulk) {
4392 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4393 NL_SET_ERR_MSG(extack, "invalid address");
4394 return -EINVAL;
4395 }
4396 addr = nla_data(tb[NDA_LLADDR]);
4397
4398 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4399 if (err)
4400 return err;
4401 }
4402
4403 if (dev->type != ARPHRD_ETHER) {
4404 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4405 return -EINVAL;
4406 }
4407
4408 err = -EOPNOTSUPP;
4409
4410 /* Support fdb on master device the net/bridge default case */
4411 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4412 netif_is_bridge_port(dev)) {
4413 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4414
4415 ops = br_dev->netdev_ops;
4416 if (!del_bulk) {
4417 if (ops->ndo_fdb_del)
4418 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4419 } else {
4420 if (ops->ndo_fdb_del_bulk)
4421 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4422 }
4423
4424 if (err)
4425 goto out;
4426 else
4427 ndm->ndm_flags &= ~NTF_MASTER;
4428 }
4429
4430 /* Embedded bridge, macvlan, and any other device support */
4431 if (ndm->ndm_flags & NTF_SELF) {
4432 ops = dev->netdev_ops;
4433 if (!del_bulk) {
4434 if (ops->ndo_fdb_del)
4435 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4436 else
4437 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4438 } else {
4439 /* in case err was cleared by NTF_MASTER call */
4440 err = -EOPNOTSUPP;
4441 if (ops->ndo_fdb_del_bulk)
4442 err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
4443 }
4444
4445 if (!err) {
4446 if (!del_bulk)
4447 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4448 ndm->ndm_state);
4449 ndm->ndm_flags &= ~NTF_SELF;
4450 }
4451 }
4452out:
4453 return err;
4454}
4455
4456static int nlmsg_populate_fdb(struct sk_buff *skb,
4457 struct netlink_callback *cb,
4458 struct net_device *dev,
4459 int *idx,
4460 struct netdev_hw_addr_list *list)
4461{
4462 struct netdev_hw_addr *ha;
4463 int err;
4464 u32 portid, seq;
4465
4466 portid = NETLINK_CB(cb->skb).portid;
4467 seq = cb->nlh->nlmsg_seq;
4468
4469 list_for_each_entry(ha, &list->list, list) {
4470 if (*idx < cb->args[2])
4471 goto skip;
4472
4473 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4474 portid, seq,
4475 RTM_NEWNEIGH, NTF_SELF,
4476 NLM_F_MULTI, NUD_PERMANENT);
4477 if (err < 0)
4478 return err;
4479skip:
4480 *idx += 1;
4481 }
4482 return 0;
4483}
4484
4485/**
4486 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4487 * @skb: socket buffer to store message in
4488 * @cb: netlink callback
4489 * @dev: netdevice
4490 * @filter_dev: ignored
4491 * @idx: the number of FDB table entries dumped is added to *@idx
4492 *
4493 * Default netdevice operation to dump the existing unicast address list.
4494 * Returns number of addresses from list put in skb.
4495 */
4496int ndo_dflt_fdb_dump(struct sk_buff *skb,
4497 struct netlink_callback *cb,
4498 struct net_device *dev,
4499 struct net_device *filter_dev,
4500 int *idx)
4501{
4502 int err;
4503
4504 if (dev->type != ARPHRD_ETHER)
4505 return -EINVAL;
4506
4507 netif_addr_lock_bh(dev);
4508 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4509 if (err)
4510 goto out;
4511 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4512out:
4513 netif_addr_unlock_bh(dev);
4514 return err;
4515}
4516EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4517
4518static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4519 int *br_idx, int *brport_idx,
4520 struct netlink_ext_ack *extack)
4521{
4522 struct nlattr *tb[NDA_MAX + 1];
4523 struct ndmsg *ndm;
4524 int err, i;
4525
4526 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4527 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4528 return -EINVAL;
4529 }
4530
4531 ndm = nlmsg_data(nlh);
4532 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4533 ndm->ndm_flags || ndm->ndm_type) {
4534 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4535 return -EINVAL;
4536 }
4537
4538 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4539 NDA_MAX, NULL, extack);
4540 if (err < 0)
4541 return err;
4542
4543 *brport_idx = ndm->ndm_ifindex;
4544 for (i = 0; i <= NDA_MAX; ++i) {
4545 if (!tb[i])
4546 continue;
4547
4548 switch (i) {
4549 case NDA_IFINDEX:
4550 if (nla_len(tb[i]) != sizeof(u32)) {
4551 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4552 return -EINVAL;
4553 }
4554 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4555 break;
4556 case NDA_MASTER:
4557 if (nla_len(tb[i]) != sizeof(u32)) {
4558 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4559 return -EINVAL;
4560 }
4561 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4562 break;
4563 default:
4564 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4565 return -EINVAL;
4566 }
4567 }
4568
4569 return 0;
4570}
4571
4572static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4573 int *br_idx, int *brport_idx,
4574 struct netlink_ext_ack *extack)
4575{
4576 struct nlattr *tb[IFLA_MAX+1];
4577 int err;
4578
4579 /* A hack to preserve kernel<->userspace interface.
4580 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4581 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4582 * So, check for ndmsg with an optional u32 attribute (not used here).
4583 * Fortunately these sizes don't conflict with the size of ifinfomsg
4584 * with an optional attribute.
4585 */
4586 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4587 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4588 nla_attr_size(sizeof(u32)))) {
4589 struct ifinfomsg *ifm;
4590
4591 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4592 tb, IFLA_MAX, ifla_policy,
4593 extack);
4594 if (err < 0) {
4595 return -EINVAL;
4596 } else if (err == 0) {
4597 if (tb[IFLA_MASTER])
4598 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4599 }
4600
4601 ifm = nlmsg_data(nlh);
4602 *brport_idx = ifm->ifi_index;
4603 }
4604 return 0;
4605}
4606
4607static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4608{
4609 struct net_device *dev;
4610 struct net_device *br_dev = NULL;
4611 const struct net_device_ops *ops = NULL;
4612 const struct net_device_ops *cops = NULL;
4613 struct net *net = sock_net(skb->sk);
4614 struct hlist_head *head;
4615 int brport_idx = 0;
4616 int br_idx = 0;
4617 int h, s_h;
4618 int idx = 0, s_idx;
4619 int err = 0;
4620 int fidx = 0;
4621
4622 if (cb->strict_check)
4623 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4624 cb->extack);
4625 else
4626 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4627 cb->extack);
4628 if (err < 0)
4629 return err;
4630
4631 if (br_idx) {
4632 br_dev = __dev_get_by_index(net, br_idx);
4633 if (!br_dev)
4634 return -ENODEV;
4635
4636 ops = br_dev->netdev_ops;
4637 }
4638
4639 s_h = cb->args[0];
4640 s_idx = cb->args[1];
4641
4642 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4643 idx = 0;
4644 head = &net->dev_index_head[h];
4645 hlist_for_each_entry(dev, head, index_hlist) {
4646
4647 if (brport_idx && (dev->ifindex != brport_idx))
4648 continue;
4649
4650 if (!br_idx) { /* user did not specify a specific bridge */
4651 if (netif_is_bridge_port(dev)) {
4652 br_dev = netdev_master_upper_dev_get(dev);
4653 cops = br_dev->netdev_ops;
4654 }
4655 } else {
4656 if (dev != br_dev &&
4657 !netif_is_bridge_port(dev))
4658 continue;
4659
4660 if (br_dev != netdev_master_upper_dev_get(dev) &&
4661 !netif_is_bridge_master(dev))
4662 continue;
4663 cops = ops;
4664 }
4665
4666 if (idx < s_idx)
4667 goto cont;
4668
4669 if (netif_is_bridge_port(dev)) {
4670 if (cops && cops->ndo_fdb_dump) {
4671 err = cops->ndo_fdb_dump(skb, cb,
4672 br_dev, dev,
4673 &fidx);
4674 if (err == -EMSGSIZE)
4675 goto out;
4676 }
4677 }
4678
4679 if (dev->netdev_ops->ndo_fdb_dump)
4680 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4681 dev, NULL,
4682 &fidx);
4683 else
4684 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4685 &fidx);
4686 if (err == -EMSGSIZE)
4687 goto out;
4688
4689 cops = NULL;
4690
4691 /* reset fdb offset to 0 for rest of the interfaces */
4692 cb->args[2] = 0;
4693 fidx = 0;
4694cont:
4695 idx++;
4696 }
4697 }
4698
4699out:
4700 cb->args[0] = h;
4701 cb->args[1] = idx;
4702 cb->args[2] = fidx;
4703
4704 return skb->len;
4705}
4706
4707static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4708 struct nlattr **tb, u8 *ndm_flags,
4709 int *br_idx, int *brport_idx, u8 **addr,
4710 u16 *vid, struct netlink_ext_ack *extack)
4711{
4712 struct ndmsg *ndm;
4713 int err, i;
4714
4715 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4716 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4717 return -EINVAL;
4718 }
4719
4720 ndm = nlmsg_data(nlh);
4721 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4722 ndm->ndm_type) {
4723 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4724 return -EINVAL;
4725 }
4726
4727 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4728 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4729 return -EINVAL;
4730 }
4731
4732 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4733 NDA_MAX, nda_policy, extack);
4734 if (err < 0)
4735 return err;
4736
4737 *ndm_flags = ndm->ndm_flags;
4738 *brport_idx = ndm->ndm_ifindex;
4739 for (i = 0; i <= NDA_MAX; ++i) {
4740 if (!tb[i])
4741 continue;
4742
4743 switch (i) {
4744 case NDA_MASTER:
4745 *br_idx = nla_get_u32(tb[i]);
4746 break;
4747 case NDA_LLADDR:
4748 if (nla_len(tb[i]) != ETH_ALEN) {
4749 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4750 return -EINVAL;
4751 }
4752 *addr = nla_data(tb[i]);
4753 break;
4754 case NDA_VLAN:
4755 err = fdb_vid_parse(tb[i], vid, extack);
4756 if (err)
4757 return err;
4758 break;
4759 case NDA_VNI:
4760 break;
4761 default:
4762 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4763 return -EINVAL;
4764 }
4765 }
4766
4767 return 0;
4768}
4769
4770static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4771 struct netlink_ext_ack *extack)
4772{
4773 struct net_device *dev = NULL, *br_dev = NULL;
4774 const struct net_device_ops *ops = NULL;
4775 struct net *net = sock_net(in_skb->sk);
4776 struct nlattr *tb[NDA_MAX + 1];
4777 struct sk_buff *skb;
4778 int brport_idx = 0;
4779 u8 ndm_flags = 0;
4780 int br_idx = 0;
4781 u8 *addr = NULL;
4782 u16 vid = 0;
4783 int err;
4784
4785 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4786 &brport_idx, &addr, &vid, extack);
4787 if (err < 0)
4788 return err;
4789
4790 if (!addr) {
4791 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4792 return -EINVAL;
4793 }
4794
4795 if (brport_idx) {
4796 dev = __dev_get_by_index(net, brport_idx);
4797 if (!dev) {
4798 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4799 return -ENODEV;
4800 }
4801 }
4802
4803 if (br_idx) {
4804 if (dev) {
4805 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4806 return -EINVAL;
4807 }
4808
4809 br_dev = __dev_get_by_index(net, br_idx);
4810 if (!br_dev) {
4811 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4812 return -EINVAL;
4813 }
4814 ops = br_dev->netdev_ops;
4815 }
4816
4817 if (dev) {
4818 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4819 if (!netif_is_bridge_port(dev)) {
4820 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4821 return -EINVAL;
4822 }
4823 br_dev = netdev_master_upper_dev_get(dev);
4824 if (!br_dev) {
4825 NL_SET_ERR_MSG(extack, "Master of device not found");
4826 return -EINVAL;
4827 }
4828 ops = br_dev->netdev_ops;
4829 } else {
4830 if (!(ndm_flags & NTF_SELF)) {
4831 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4832 return -EINVAL;
4833 }
4834 ops = dev->netdev_ops;
4835 }
4836 }
4837
4838 if (!br_dev && !dev) {
4839 NL_SET_ERR_MSG(extack, "No device specified");
4840 return -ENODEV;
4841 }
4842
4843 if (!ops || !ops->ndo_fdb_get) {
4844 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4845 return -EOPNOTSUPP;
4846 }
4847
4848 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4849 if (!skb)
4850 return -ENOBUFS;
4851
4852 if (br_dev)
4853 dev = br_dev;
4854 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4855 NETLINK_CB(in_skb).portid,
4856 nlh->nlmsg_seq, extack);
4857 if (err)
4858 goto out;
4859
4860 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4861out:
4862 kfree_skb(skb);
4863 return err;
4864}
4865
4866static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4867 unsigned int attrnum, unsigned int flag)
4868{
4869 if (mask & flag)
4870 return nla_put_u8(skb, attrnum, !!(flags & flag));
4871 return 0;
4872}
4873
4874int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4875 struct net_device *dev, u16 mode,
4876 u32 flags, u32 mask, int nlflags,
4877 u32 filter_mask,
4878 int (*vlan_fill)(struct sk_buff *skb,
4879 struct net_device *dev,
4880 u32 filter_mask))
4881{
4882 struct nlmsghdr *nlh;
4883 struct ifinfomsg *ifm;
4884 struct nlattr *br_afspec;
4885 struct nlattr *protinfo;
4886 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4887 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4888 int err = 0;
4889
4890 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4891 if (nlh == NULL)
4892 return -EMSGSIZE;
4893
4894 ifm = nlmsg_data(nlh);
4895 ifm->ifi_family = AF_BRIDGE;
4896 ifm->__ifi_pad = 0;
4897 ifm->ifi_type = dev->type;
4898 ifm->ifi_index = dev->ifindex;
4899 ifm->ifi_flags = dev_get_flags(dev);
4900 ifm->ifi_change = 0;
4901
4902
4903 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4904 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4905 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4906 (br_dev &&
4907 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4908 (dev->addr_len &&
4909 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4910 (dev->ifindex != dev_get_iflink(dev) &&
4911 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4912 goto nla_put_failure;
4913
4914 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4915 if (!br_afspec)
4916 goto nla_put_failure;
4917
4918 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4919 nla_nest_cancel(skb, br_afspec);
4920 goto nla_put_failure;
4921 }
4922
4923 if (mode != BRIDGE_MODE_UNDEF) {
4924 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4925 nla_nest_cancel(skb, br_afspec);
4926 goto nla_put_failure;
4927 }
4928 }
4929 if (vlan_fill) {
4930 err = vlan_fill(skb, dev, filter_mask);
4931 if (err) {
4932 nla_nest_cancel(skb, br_afspec);
4933 goto nla_put_failure;
4934 }
4935 }
4936 nla_nest_end(skb, br_afspec);
4937
4938 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4939 if (!protinfo)
4940 goto nla_put_failure;
4941
4942 if (brport_nla_put_flag(skb, flags, mask,
4943 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4944 brport_nla_put_flag(skb, flags, mask,
4945 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4946 brport_nla_put_flag(skb, flags, mask,
4947 IFLA_BRPORT_FAST_LEAVE,
4948 BR_MULTICAST_FAST_LEAVE) ||
4949 brport_nla_put_flag(skb, flags, mask,
4950 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4951 brport_nla_put_flag(skb, flags, mask,
4952 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4953 brport_nla_put_flag(skb, flags, mask,
4954 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4955 brport_nla_put_flag(skb, flags, mask,
4956 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4957 brport_nla_put_flag(skb, flags, mask,
4958 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4959 brport_nla_put_flag(skb, flags, mask,
4960 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4961 brport_nla_put_flag(skb, flags, mask,
4962 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4963 nla_nest_cancel(skb, protinfo);
4964 goto nla_put_failure;
4965 }
4966
4967 nla_nest_end(skb, protinfo);
4968
4969 nlmsg_end(skb, nlh);
4970 return 0;
4971nla_put_failure:
4972 nlmsg_cancel(skb, nlh);
4973 return err ? err : -EMSGSIZE;
4974}
4975EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4976
4977static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4978 bool strict_check, u32 *filter_mask,
4979 struct netlink_ext_ack *extack)
4980{
4981 struct nlattr *tb[IFLA_MAX+1];
4982 int err, i;
4983
4984 if (strict_check) {
4985 struct ifinfomsg *ifm;
4986
4987 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4988 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4989 return -EINVAL;
4990 }
4991
4992 ifm = nlmsg_data(nlh);
4993 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4994 ifm->ifi_change || ifm->ifi_index) {
4995 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4996 return -EINVAL;
4997 }
4998
4999 err = nlmsg_parse_deprecated_strict(nlh,
5000 sizeof(struct ifinfomsg),
5001 tb, IFLA_MAX, ifla_policy,
5002 extack);
5003 } else {
5004 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
5005 tb, IFLA_MAX, ifla_policy,
5006 extack);
5007 }
5008 if (err < 0)
5009 return err;
5010
5011 /* new attributes should only be added with strict checking */
5012 for (i = 0; i <= IFLA_MAX; ++i) {
5013 if (!tb[i])
5014 continue;
5015
5016 switch (i) {
5017 case IFLA_EXT_MASK:
5018 *filter_mask = nla_get_u32(tb[i]);
5019 break;
5020 default:
5021 if (strict_check) {
5022 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
5023 return -EINVAL;
5024 }
5025 }
5026 }
5027
5028 return 0;
5029}
5030
5031static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
5032{
5033 const struct nlmsghdr *nlh = cb->nlh;
5034 struct net *net = sock_net(skb->sk);
5035 struct net_device *dev;
5036 int idx = 0;
5037 u32 portid = NETLINK_CB(cb->skb).portid;
5038 u32 seq = nlh->nlmsg_seq;
5039 u32 filter_mask = 0;
5040 int err;
5041
5042 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
5043 cb->extack);
5044 if (err < 0 && cb->strict_check)
5045 return err;
5046
5047 rcu_read_lock();
5048 for_each_netdev_rcu(net, dev) {
5049 const struct net_device_ops *ops = dev->netdev_ops;
5050 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5051
5052 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
5053 if (idx >= cb->args[0]) {
5054 err = br_dev->netdev_ops->ndo_bridge_getlink(
5055 skb, portid, seq, dev,
5056 filter_mask, NLM_F_MULTI);
5057 if (err < 0 && err != -EOPNOTSUPP) {
5058 if (likely(skb->len))
5059 break;
5060
5061 goto out_err;
5062 }
5063 }
5064 idx++;
5065 }
5066
5067 if (ops->ndo_bridge_getlink) {
5068 if (idx >= cb->args[0]) {
5069 err = ops->ndo_bridge_getlink(skb, portid,
5070 seq, dev,
5071 filter_mask,
5072 NLM_F_MULTI);
5073 if (err < 0 && err != -EOPNOTSUPP) {
5074 if (likely(skb->len))
5075 break;
5076
5077 goto out_err;
5078 }
5079 }
5080 idx++;
5081 }
5082 }
5083 err = skb->len;
5084out_err:
5085 rcu_read_unlock();
5086 cb->args[0] = idx;
5087
5088 return err;
5089}
5090
5091static inline size_t bridge_nlmsg_size(void)
5092{
5093 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5094 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5095 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5096 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
5097 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
5098 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
5099 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
5100 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
5101 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
5102 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
5103 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
5104}
5105
5106static int rtnl_bridge_notify(struct net_device *dev)
5107{
5108 struct net *net = dev_net(dev);
5109 struct sk_buff *skb;
5110 int err = -EOPNOTSUPP;
5111
5112 if (!dev->netdev_ops->ndo_bridge_getlink)
5113 return 0;
5114
5115 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5116 if (!skb) {
5117 err = -ENOMEM;
5118 goto errout;
5119 }
5120
5121 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5122 if (err < 0)
5123 goto errout;
5124
5125 /* Notification info is only filled for bridge ports, not the bridge
5126 * device itself. Therefore, a zero notification length is valid and
5127 * should not result in an error.
5128 */
5129 if (!skb->len)
5130 goto errout;
5131
5132 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5133 return 0;
5134errout:
5135 WARN_ON(err == -EMSGSIZE);
5136 kfree_skb(skb);
5137 if (err)
5138 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5139 return err;
5140}
5141
5142static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5143 struct netlink_ext_ack *extack)
5144{
5145 struct net *net = sock_net(skb->sk);
5146 struct ifinfomsg *ifm;
5147 struct net_device *dev;
5148 struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
5149 int rem, err = -EOPNOTSUPP;
5150 u16 flags = 0;
5151
5152 if (nlmsg_len(nlh) < sizeof(*ifm))
5153 return -EINVAL;
5154
5155 ifm = nlmsg_data(nlh);
5156 if (ifm->ifi_family != AF_BRIDGE)
5157 return -EPFNOSUPPORT;
5158
5159 dev = __dev_get_by_index(net, ifm->ifi_index);
5160 if (!dev) {
5161 NL_SET_ERR_MSG(extack, "unknown ifindex");
5162 return -ENODEV;
5163 }
5164
5165 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5166 if (br_spec) {
5167 nla_for_each_nested(attr, br_spec, rem) {
5168 if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
5169 if (nla_len(attr) < sizeof(flags))
5170 return -EINVAL;
5171
5172 br_flags_attr = attr;
5173 flags = nla_get_u16(attr);
5174 }
5175
5176 if (nla_type(attr) == IFLA_BRIDGE_MODE) {
5177 if (nla_len(attr) < sizeof(u16))
5178 return -EINVAL;
5179 }
5180 }
5181 }
5182
5183 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5184 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5185
5186 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5187 err = -EOPNOTSUPP;
5188 goto out;
5189 }
5190
5191 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5192 extack);
5193 if (err)
5194 goto out;
5195
5196 flags &= ~BRIDGE_FLAGS_MASTER;
5197 }
5198
5199 if ((flags & BRIDGE_FLAGS_SELF)) {
5200 if (!dev->netdev_ops->ndo_bridge_setlink)
5201 err = -EOPNOTSUPP;
5202 else
5203 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5204 flags,
5205 extack);
5206 if (!err) {
5207 flags &= ~BRIDGE_FLAGS_SELF;
5208
5209 /* Generate event to notify upper layer of bridge
5210 * change
5211 */
5212 err = rtnl_bridge_notify(dev);
5213 }
5214 }
5215
5216 if (br_flags_attr)
5217 memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
5218out:
5219 return err;
5220}
5221
5222static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5223 struct netlink_ext_ack *extack)
5224{
5225 struct net *net = sock_net(skb->sk);
5226 struct ifinfomsg *ifm;
5227 struct net_device *dev;
5228 struct nlattr *br_spec, *attr = NULL;
5229 int rem, err = -EOPNOTSUPP;
5230 u16 flags = 0;
5231 bool have_flags = false;
5232
5233 if (nlmsg_len(nlh) < sizeof(*ifm))
5234 return -EINVAL;
5235
5236 ifm = nlmsg_data(nlh);
5237 if (ifm->ifi_family != AF_BRIDGE)
5238 return -EPFNOSUPPORT;
5239
5240 dev = __dev_get_by_index(net, ifm->ifi_index);
5241 if (!dev) {
5242 NL_SET_ERR_MSG(extack, "unknown ifindex");
5243 return -ENODEV;
5244 }
5245
5246 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5247 if (br_spec) {
5248 nla_for_each_nested(attr, br_spec, rem) {
5249 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5250 if (nla_len(attr) < sizeof(flags))
5251 return -EINVAL;
5252
5253 have_flags = true;
5254 flags = nla_get_u16(attr);
5255 break;
5256 }
5257 }
5258 }
5259
5260 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5261 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5262
5263 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5264 err = -EOPNOTSUPP;
5265 goto out;
5266 }
5267
5268 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5269 if (err)
5270 goto out;
5271
5272 flags &= ~BRIDGE_FLAGS_MASTER;
5273 }
5274
5275 if ((flags & BRIDGE_FLAGS_SELF)) {
5276 if (!dev->netdev_ops->ndo_bridge_dellink)
5277 err = -EOPNOTSUPP;
5278 else
5279 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5280 flags);
5281
5282 if (!err) {
5283 flags &= ~BRIDGE_FLAGS_SELF;
5284
5285 /* Generate event to notify upper layer of bridge
5286 * change
5287 */
5288 err = rtnl_bridge_notify(dev);
5289 }
5290 }
5291
5292 if (have_flags)
5293 memcpy(nla_data(attr), &flags, sizeof(flags));
5294out:
5295 return err;
5296}
5297
5298static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5299{
5300 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5301 (!idxattr || idxattr == attrid);
5302}
5303
5304static bool
5305rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5306{
5307 return dev->netdev_ops &&
5308 dev->netdev_ops->ndo_has_offload_stats &&
5309 dev->netdev_ops->ndo_get_offload_stats &&
5310 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5311}
5312
5313static unsigned int
5314rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5315{
5316 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5317 sizeof(struct rtnl_link_stats64) : 0;
5318}
5319
5320static int
5321rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5322 struct sk_buff *skb)
5323{
5324 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5325 struct nlattr *attr = NULL;
5326 void *attr_data;
5327 int err;
5328
5329 if (!size)
5330 return -ENODATA;
5331
5332 attr = nla_reserve_64bit(skb, attr_id, size,
5333 IFLA_OFFLOAD_XSTATS_UNSPEC);
5334 if (!attr)
5335 return -EMSGSIZE;
5336
5337 attr_data = nla_data(attr);
5338 memset(attr_data, 0, size);
5339
5340 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5341 if (err)
5342 return err;
5343
5344 return 0;
5345}
5346
5347static unsigned int
5348rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5349 enum netdev_offload_xstats_type type)
5350{
5351 bool enabled = netdev_offload_xstats_enabled(dev, type);
5352
5353 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5354}
5355
5356struct rtnl_offload_xstats_request_used {
5357 bool request;
5358 bool used;
5359};
5360
5361static int
5362rtnl_offload_xstats_get_stats(struct net_device *dev,
5363 enum netdev_offload_xstats_type type,
5364 struct rtnl_offload_xstats_request_used *ru,
5365 struct rtnl_hw_stats64 *stats,
5366 struct netlink_ext_ack *extack)
5367{
5368 bool request;
5369 bool used;
5370 int err;
5371
5372 request = netdev_offload_xstats_enabled(dev, type);
5373 if (!request) {
5374 used = false;
5375 goto out;
5376 }
5377
5378 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5379 if (err)
5380 return err;
5381
5382out:
5383 if (ru) {
5384 ru->request = request;
5385 ru->used = used;
5386 }
5387 return 0;
5388}
5389
5390static int
5391rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5392 struct rtnl_offload_xstats_request_used *ru)
5393{
5394 struct nlattr *nest;
5395
5396 nest = nla_nest_start(skb, attr_id);
5397 if (!nest)
5398 return -EMSGSIZE;
5399
5400 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5401 goto nla_put_failure;
5402
5403 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5404 goto nla_put_failure;
5405
5406 nla_nest_end(skb, nest);
5407 return 0;
5408
5409nla_put_failure:
5410 nla_nest_cancel(skb, nest);
5411 return -EMSGSIZE;
5412}
5413
5414static int
5415rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5416 struct netlink_ext_ack *extack)
5417{
5418 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5419 struct rtnl_offload_xstats_request_used ru_l3;
5420 struct nlattr *nest;
5421 int err;
5422
5423 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5424 if (err)
5425 return err;
5426
5427 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5428 if (!nest)
5429 return -EMSGSIZE;
5430
5431 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5432 IFLA_OFFLOAD_XSTATS_L3_STATS,
5433 &ru_l3))
5434 goto nla_put_failure;
5435
5436 nla_nest_end(skb, nest);
5437 return 0;
5438
5439nla_put_failure:
5440 nla_nest_cancel(skb, nest);
5441 return -EMSGSIZE;
5442}
5443
5444static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5445 int *prividx, u32 off_filter_mask,
5446 struct netlink_ext_ack *extack)
5447{
5448 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5449 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5450 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5451 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5452 bool have_data = false;
5453 int err;
5454
5455 if (*prividx <= attr_id_cpu_hit &&
5456 (off_filter_mask &
5457 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5458 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5459 if (!err) {
5460 have_data = true;
5461 } else if (err != -ENODATA) {
5462 *prividx = attr_id_cpu_hit;
5463 return err;
5464 }
5465 }
5466
5467 if (*prividx <= attr_id_hw_s_info &&
5468 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5469 *prividx = attr_id_hw_s_info;
5470
5471 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5472 if (err)
5473 return err;
5474
5475 have_data = true;
5476 *prividx = 0;
5477 }
5478
5479 if (*prividx <= attr_id_l3_stats &&
5480 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5481 unsigned int size_l3;
5482 struct nlattr *attr;
5483
5484 *prividx = attr_id_l3_stats;
5485
5486 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5487 if (!size_l3)
5488 goto skip_l3_stats;
5489 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5490 IFLA_OFFLOAD_XSTATS_UNSPEC);
5491 if (!attr)
5492 return -EMSGSIZE;
5493
5494 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5495 nla_data(attr), extack);
5496 if (err)
5497 return err;
5498
5499 have_data = true;
5500skip_l3_stats:
5501 *prividx = 0;
5502 }
5503
5504 if (!have_data)
5505 return -ENODATA;
5506
5507 *prividx = 0;
5508 return 0;
5509}
5510
5511static unsigned int
5512rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5513 enum netdev_offload_xstats_type type)
5514{
5515 return nla_total_size(0) +
5516 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5517 nla_total_size(sizeof(u8)) +
5518 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5519 nla_total_size(sizeof(u8)) +
5520 0;
5521}
5522
5523static unsigned int
5524rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5525{
5526 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5527
5528 return nla_total_size(0) +
5529 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5530 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5531 0;
5532}
5533
5534static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5535 u32 off_filter_mask)
5536{
5537 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5538 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5539 int nla_size = 0;
5540 int size;
5541
5542 if (off_filter_mask &
5543 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5544 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5545 nla_size += nla_total_size_64bit(size);
5546 }
5547
5548 if (off_filter_mask &
5549 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5550 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5551
5552 if (off_filter_mask &
5553 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5554 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5555 nla_size += nla_total_size_64bit(size);
5556 }
5557
5558 if (nla_size != 0)
5559 nla_size += nla_total_size(0);
5560
5561 return nla_size;
5562}
5563
5564struct rtnl_stats_dump_filters {
5565 /* mask[0] filters outer attributes. Then individual nests have their
5566 * filtering mask at the index of the nested attribute.
5567 */
5568 u32 mask[IFLA_STATS_MAX + 1];
5569};
5570
5571static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5572 int type, u32 pid, u32 seq, u32 change,
5573 unsigned int flags,
5574 const struct rtnl_stats_dump_filters *filters,
5575 int *idxattr, int *prividx,
5576 struct netlink_ext_ack *extack)
5577{
5578 unsigned int filter_mask = filters->mask[0];
5579 struct if_stats_msg *ifsm;
5580 struct nlmsghdr *nlh;
5581 struct nlattr *attr;
5582 int s_prividx = *prividx;
5583 int err;
5584
5585 ASSERT_RTNL();
5586
5587 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5588 if (!nlh)
5589 return -EMSGSIZE;
5590
5591 ifsm = nlmsg_data(nlh);
5592 ifsm->family = PF_UNSPEC;
5593 ifsm->pad1 = 0;
5594 ifsm->pad2 = 0;
5595 ifsm->ifindex = dev->ifindex;
5596 ifsm->filter_mask = filter_mask;
5597
5598 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5599 struct rtnl_link_stats64 *sp;
5600
5601 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5602 sizeof(struct rtnl_link_stats64),
5603 IFLA_STATS_UNSPEC);
5604 if (!attr) {
5605 err = -EMSGSIZE;
5606 goto nla_put_failure;
5607 }
5608
5609 sp = nla_data(attr);
5610 dev_get_stats(dev, sp);
5611 }
5612
5613 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5614 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5615
5616 if (ops && ops->fill_linkxstats) {
5617 *idxattr = IFLA_STATS_LINK_XSTATS;
5618 attr = nla_nest_start_noflag(skb,
5619 IFLA_STATS_LINK_XSTATS);
5620 if (!attr) {
5621 err = -EMSGSIZE;
5622 goto nla_put_failure;
5623 }
5624
5625 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5626 nla_nest_end(skb, attr);
5627 if (err)
5628 goto nla_put_failure;
5629 *idxattr = 0;
5630 }
5631 }
5632
5633 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5634 *idxattr)) {
5635 const struct rtnl_link_ops *ops = NULL;
5636 const struct net_device *master;
5637
5638 master = netdev_master_upper_dev_get(dev);
5639 if (master)
5640 ops = master->rtnl_link_ops;
5641 if (ops && ops->fill_linkxstats) {
5642 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5643 attr = nla_nest_start_noflag(skb,
5644 IFLA_STATS_LINK_XSTATS_SLAVE);
5645 if (!attr) {
5646 err = -EMSGSIZE;
5647 goto nla_put_failure;
5648 }
5649
5650 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5651 nla_nest_end(skb, attr);
5652 if (err)
5653 goto nla_put_failure;
5654 *idxattr = 0;
5655 }
5656 }
5657
5658 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5659 *idxattr)) {
5660 u32 off_filter_mask;
5661
5662 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5663 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5664 attr = nla_nest_start_noflag(skb,
5665 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5666 if (!attr) {
5667 err = -EMSGSIZE;
5668 goto nla_put_failure;
5669 }
5670
5671 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5672 off_filter_mask, extack);
5673 if (err == -ENODATA)
5674 nla_nest_cancel(skb, attr);
5675 else
5676 nla_nest_end(skb, attr);
5677
5678 if (err && err != -ENODATA)
5679 goto nla_put_failure;
5680 *idxattr = 0;
5681 }
5682
5683 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5684 struct rtnl_af_ops *af_ops;
5685
5686 *idxattr = IFLA_STATS_AF_SPEC;
5687 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5688 if (!attr) {
5689 err = -EMSGSIZE;
5690 goto nla_put_failure;
5691 }
5692
5693 rcu_read_lock();
5694 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5695 if (af_ops->fill_stats_af) {
5696 struct nlattr *af;
5697
5698 af = nla_nest_start_noflag(skb,
5699 af_ops->family);
5700 if (!af) {
5701 rcu_read_unlock();
5702 err = -EMSGSIZE;
5703 goto nla_put_failure;
5704 }
5705 err = af_ops->fill_stats_af(skb, dev);
5706
5707 if (err == -ENODATA) {
5708 nla_nest_cancel(skb, af);
5709 } else if (err < 0) {
5710 rcu_read_unlock();
5711 goto nla_put_failure;
5712 }
5713
5714 nla_nest_end(skb, af);
5715 }
5716 }
5717 rcu_read_unlock();
5718
5719 nla_nest_end(skb, attr);
5720
5721 *idxattr = 0;
5722 }
5723
5724 nlmsg_end(skb, nlh);
5725
5726 return 0;
5727
5728nla_put_failure:
5729 /* not a multi message or no progress mean a real error */
5730 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5731 nlmsg_cancel(skb, nlh);
5732 else
5733 nlmsg_end(skb, nlh);
5734
5735 return err;
5736}
5737
5738static size_t if_nlmsg_stats_size(const struct net_device *dev,
5739 const struct rtnl_stats_dump_filters *filters)
5740{
5741 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5742 unsigned int filter_mask = filters->mask[0];
5743
5744 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5745 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5746
5747 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5748 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5749 int attr = IFLA_STATS_LINK_XSTATS;
5750
5751 if (ops && ops->get_linkxstats_size) {
5752 size += nla_total_size(ops->get_linkxstats_size(dev,
5753 attr));
5754 /* for IFLA_STATS_LINK_XSTATS */
5755 size += nla_total_size(0);
5756 }
5757 }
5758
5759 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5760 struct net_device *_dev = (struct net_device *)dev;
5761 const struct rtnl_link_ops *ops = NULL;
5762 const struct net_device *master;
5763
5764 /* netdev_master_upper_dev_get can't take const */
5765 master = netdev_master_upper_dev_get(_dev);
5766 if (master)
5767 ops = master->rtnl_link_ops;
5768 if (ops && ops->get_linkxstats_size) {
5769 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5770
5771 size += nla_total_size(ops->get_linkxstats_size(dev,
5772 attr));
5773 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5774 size += nla_total_size(0);
5775 }
5776 }
5777
5778 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5779 u32 off_filter_mask;
5780
5781 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5782 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5783 }
5784
5785 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5786 struct rtnl_af_ops *af_ops;
5787
5788 /* for IFLA_STATS_AF_SPEC */
5789 size += nla_total_size(0);
5790
5791 rcu_read_lock();
5792 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5793 if (af_ops->get_stats_af_size) {
5794 size += nla_total_size(
5795 af_ops->get_stats_af_size(dev));
5796
5797 /* for AF_* */
5798 size += nla_total_size(0);
5799 }
5800 }
5801 rcu_read_unlock();
5802 }
5803
5804 return size;
5805}
5806
5807#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5808
5809static const struct nla_policy
5810rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5811 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5812 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5813};
5814
5815static const struct nla_policy
5816rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5817 [IFLA_STATS_GET_FILTERS] =
5818 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5819};
5820
5821static const struct nla_policy
5822ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5823 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5824};
5825
5826static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5827 struct rtnl_stats_dump_filters *filters,
5828 struct netlink_ext_ack *extack)
5829{
5830 struct nlattr *tb[IFLA_STATS_MAX + 1];
5831 int err;
5832 int at;
5833
5834 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5835 rtnl_stats_get_policy_filters, extack);
5836 if (err < 0)
5837 return err;
5838
5839 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5840 if (tb[at]) {
5841 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5842 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5843 return -EINVAL;
5844 }
5845 filters->mask[at] = nla_get_u32(tb[at]);
5846 }
5847 }
5848
5849 return 0;
5850}
5851
5852static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5853 u32 filter_mask,
5854 struct rtnl_stats_dump_filters *filters,
5855 struct netlink_ext_ack *extack)
5856{
5857 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5858 int err;
5859 int i;
5860
5861 filters->mask[0] = filter_mask;
5862 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5863 filters->mask[i] = -1U;
5864
5865 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5866 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5867 if (err < 0)
5868 return err;
5869
5870 if (tb[IFLA_STATS_GET_FILTERS]) {
5871 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5872 filters, extack);
5873 if (err)
5874 return err;
5875 }
5876
5877 return 0;
5878}
5879
5880static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5881 bool is_dump, struct netlink_ext_ack *extack)
5882{
5883 struct if_stats_msg *ifsm;
5884
5885 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5886 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5887 return -EINVAL;
5888 }
5889
5890 if (!strict_check)
5891 return 0;
5892
5893 ifsm = nlmsg_data(nlh);
5894
5895 /* only requests using strict checks can pass data to influence
5896 * the dump. The legacy exception is filter_mask.
5897 */
5898 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5899 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5900 return -EINVAL;
5901 }
5902 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5903 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5904 return -EINVAL;
5905 }
5906
5907 return 0;
5908}
5909
5910static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5911 struct netlink_ext_ack *extack)
5912{
5913 struct rtnl_stats_dump_filters filters;
5914 struct net *net = sock_net(skb->sk);
5915 struct net_device *dev = NULL;
5916 int idxattr = 0, prividx = 0;
5917 struct if_stats_msg *ifsm;
5918 struct sk_buff *nskb;
5919 int err;
5920
5921 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5922 false, extack);
5923 if (err)
5924 return err;
5925
5926 ifsm = nlmsg_data(nlh);
5927 if (ifsm->ifindex > 0)
5928 dev = __dev_get_by_index(net, ifsm->ifindex);
5929 else
5930 return -EINVAL;
5931
5932 if (!dev)
5933 return -ENODEV;
5934
5935 if (!ifsm->filter_mask) {
5936 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5937 return -EINVAL;
5938 }
5939
5940 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5941 if (err)
5942 return err;
5943
5944 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5945 if (!nskb)
5946 return -ENOBUFS;
5947
5948 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5949 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5950 0, &filters, &idxattr, &prividx, extack);
5951 if (err < 0) {
5952 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5953 WARN_ON(err == -EMSGSIZE);
5954 kfree_skb(nskb);
5955 } else {
5956 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5957 }
5958
5959 return err;
5960}
5961
5962static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5963{
5964 struct netlink_ext_ack *extack = cb->extack;
5965 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5966 struct rtnl_stats_dump_filters filters;
5967 struct net *net = sock_net(skb->sk);
5968 unsigned int flags = NLM_F_MULTI;
5969 struct if_stats_msg *ifsm;
5970 struct hlist_head *head;
5971 struct net_device *dev;
5972 int idx = 0;
5973
5974 s_h = cb->args[0];
5975 s_idx = cb->args[1];
5976 s_idxattr = cb->args[2];
5977 s_prividx = cb->args[3];
5978
5979 cb->seq = net->dev_base_seq;
5980
5981 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5982 if (err)
5983 return err;
5984
5985 ifsm = nlmsg_data(cb->nlh);
5986 if (!ifsm->filter_mask) {
5987 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5988 return -EINVAL;
5989 }
5990
5991 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5992 extack);
5993 if (err)
5994 return err;
5995
5996 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5997 idx = 0;
5998 head = &net->dev_index_head[h];
5999 hlist_for_each_entry(dev, head, index_hlist) {
6000 if (idx < s_idx)
6001 goto cont;
6002 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
6003 NETLINK_CB(cb->skb).portid,
6004 cb->nlh->nlmsg_seq, 0,
6005 flags, &filters,
6006 &s_idxattr, &s_prividx,
6007 extack);
6008 /* If we ran out of room on the first message,
6009 * we're in trouble
6010 */
6011 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
6012
6013 if (err < 0)
6014 goto out;
6015 s_prividx = 0;
6016 s_idxattr = 0;
6017 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
6018cont:
6019 idx++;
6020 }
6021 }
6022out:
6023 cb->args[3] = s_prividx;
6024 cb->args[2] = s_idxattr;
6025 cb->args[1] = idx;
6026 cb->args[0] = h;
6027
6028 return skb->len;
6029}
6030
6031void rtnl_offload_xstats_notify(struct net_device *dev)
6032{
6033 struct rtnl_stats_dump_filters response_filters = {};
6034 struct net *net = dev_net(dev);
6035 int idxattr = 0, prividx = 0;
6036 struct sk_buff *skb;
6037 int err = -ENOBUFS;
6038
6039 ASSERT_RTNL();
6040
6041 response_filters.mask[0] |=
6042 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6043 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6044 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6045
6046 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
6047 GFP_KERNEL);
6048 if (!skb)
6049 goto errout;
6050
6051 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
6052 &response_filters, &idxattr, &prividx, NULL);
6053 if (err < 0) {
6054 kfree_skb(skb);
6055 goto errout;
6056 }
6057
6058 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
6059 return;
6060
6061errout:
6062 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
6063}
6064EXPORT_SYMBOL(rtnl_offload_xstats_notify);
6065
6066static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
6067 struct netlink_ext_ack *extack)
6068{
6069 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
6070 struct rtnl_stats_dump_filters response_filters = {};
6071 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
6072 struct net *net = sock_net(skb->sk);
6073 struct net_device *dev = NULL;
6074 struct if_stats_msg *ifsm;
6075 bool notify = false;
6076 int err;
6077
6078 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
6079 false, extack);
6080 if (err)
6081 return err;
6082
6083 ifsm = nlmsg_data(nlh);
6084 if (ifsm->family != AF_UNSPEC) {
6085 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
6086 return -EINVAL;
6087 }
6088
6089 if (ifsm->ifindex > 0)
6090 dev = __dev_get_by_index(net, ifsm->ifindex);
6091 else
6092 return -EINVAL;
6093
6094 if (!dev)
6095 return -ENODEV;
6096
6097 if (ifsm->filter_mask) {
6098 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6099 return -EINVAL;
6100 }
6101
6102 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6103 ifla_stats_set_policy, extack);
6104 if (err < 0)
6105 return err;
6106
6107 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6108 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6109
6110 if (req)
6111 err = netdev_offload_xstats_enable(dev, t_l3, extack);
6112 else
6113 err = netdev_offload_xstats_disable(dev, t_l3);
6114
6115 if (!err)
6116 notify = true;
6117 else if (err != -EALREADY)
6118 return err;
6119
6120 response_filters.mask[0] |=
6121 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6122 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6123 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6124 }
6125
6126 if (notify)
6127 rtnl_offload_xstats_notify(dev);
6128
6129 return 0;
6130}
6131
6132static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
6133 struct netlink_ext_ack *extack)
6134{
6135 struct br_port_msg *bpm;
6136
6137 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
6138 NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
6139 return -EINVAL;
6140 }
6141
6142 bpm = nlmsg_data(nlh);
6143 if (bpm->ifindex) {
6144 NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
6145 return -EINVAL;
6146 }
6147 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
6148 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
6149 return -EINVAL;
6150 }
6151
6152 return 0;
6153}
6154
6155struct rtnl_mdb_dump_ctx {
6156 long idx;
6157};
6158
6159static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
6160{
6161 struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
6162 struct net *net = sock_net(skb->sk);
6163 struct net_device *dev;
6164 int idx, s_idx;
6165 int err;
6166
6167 NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx);
6168
6169 if (cb->strict_check) {
6170 err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
6171 if (err)
6172 return err;
6173 }
6174
6175 s_idx = ctx->idx;
6176 idx = 0;
6177
6178 for_each_netdev(net, dev) {
6179 if (idx < s_idx)
6180 goto skip;
6181 if (!dev->netdev_ops->ndo_mdb_dump)
6182 goto skip;
6183
6184 err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
6185 if (err == -EMSGSIZE)
6186 goto out;
6187 /* Moving on to next device, reset markers and sequence
6188 * counters since they are all maintained per-device.
6189 */
6190 memset(cb->ctx, 0, sizeof(cb->ctx));
6191 cb->prev_seq = 0;
6192 cb->seq = 0;
6193skip:
6194 idx++;
6195 }
6196
6197out:
6198 ctx->idx = idx;
6199 return skb->len;
6200}
6201
6202static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
6203 struct netlink_ext_ack *extack)
6204{
6205 struct br_mdb_entry *entry = nla_data(attr);
6206
6207 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6208 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6209 return -EINVAL;
6210 }
6211
6212 if (entry->ifindex) {
6213 NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
6214 return -EINVAL;
6215 }
6216
6217 if (entry->state) {
6218 NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
6219 return -EINVAL;
6220 }
6221
6222 if (entry->flags) {
6223 NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
6224 return -EINVAL;
6225 }
6226
6227 if (entry->vid >= VLAN_VID_MASK) {
6228 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6229 return -EINVAL;
6230 }
6231
6232 if (entry->addr.proto != htons(ETH_P_IP) &&
6233 entry->addr.proto != htons(ETH_P_IPV6) &&
6234 entry->addr.proto != 0) {
6235 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6236 return -EINVAL;
6237 }
6238
6239 return 0;
6240}
6241
6242static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
6243 [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6244 rtnl_validate_mdb_entry_get,
6245 sizeof(struct br_mdb_entry)),
6246 [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6247};
6248
6249static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6250 struct netlink_ext_ack *extack)
6251{
6252 struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
6253 struct net *net = sock_net(in_skb->sk);
6254 struct br_port_msg *bpm;
6255 struct net_device *dev;
6256 int err;
6257
6258 err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
6259 MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
6260 if (err)
6261 return err;
6262
6263 bpm = nlmsg_data(nlh);
6264 if (!bpm->ifindex) {
6265 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6266 return -EINVAL;
6267 }
6268
6269 dev = __dev_get_by_index(net, bpm->ifindex);
6270 if (!dev) {
6271 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6272 return -ENODEV;
6273 }
6274
6275 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
6276 NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
6277 return -EINVAL;
6278 }
6279
6280 if (!dev->netdev_ops->ndo_mdb_get) {
6281 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6282 return -EOPNOTSUPP;
6283 }
6284
6285 return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
6286 nlh->nlmsg_seq, extack);
6287}
6288
6289static int rtnl_validate_mdb_entry(const struct nlattr *attr,
6290 struct netlink_ext_ack *extack)
6291{
6292 struct br_mdb_entry *entry = nla_data(attr);
6293
6294 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6295 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6296 return -EINVAL;
6297 }
6298
6299 if (entry->ifindex == 0) {
6300 NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
6301 return -EINVAL;
6302 }
6303
6304 if (entry->addr.proto == htons(ETH_P_IP)) {
6305 if (!ipv4_is_multicast(entry->addr.u.ip4) &&
6306 !ipv4_is_zeronet(entry->addr.u.ip4)) {
6307 NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
6308 return -EINVAL;
6309 }
6310 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
6311 NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
6312 return -EINVAL;
6313 }
6314#if IS_ENABLED(CONFIG_IPV6)
6315 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
6316 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
6317 NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
6318 return -EINVAL;
6319 }
6320#endif
6321 } else if (entry->addr.proto == 0) {
6322 /* L2 mdb */
6323 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
6324 NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
6325 return -EINVAL;
6326 }
6327 } else {
6328 NL_SET_ERR_MSG(extack, "Unknown entry protocol");
6329 return -EINVAL;
6330 }
6331
6332 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6333 NL_SET_ERR_MSG(extack, "Unknown entry state");
6334 return -EINVAL;
6335 }
6336 if (entry->vid >= VLAN_VID_MASK) {
6337 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6338 return -EINVAL;
6339 }
6340
6341 return 0;
6342}
6343
6344static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
6345 [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
6346 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6347 rtnl_validate_mdb_entry,
6348 sizeof(struct br_mdb_entry)),
6349 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6350};
6351
6352static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
6353 struct netlink_ext_ack *extack)
6354{
6355 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6356 struct net *net = sock_net(skb->sk);
6357 struct br_port_msg *bpm;
6358 struct net_device *dev;
6359 int err;
6360
6361 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6362 MDBA_SET_ENTRY_MAX, mdba_policy, extack);
6363 if (err)
6364 return err;
6365
6366 bpm = nlmsg_data(nlh);
6367 if (!bpm->ifindex) {
6368 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6369 return -EINVAL;
6370 }
6371
6372 dev = __dev_get_by_index(net, bpm->ifindex);
6373 if (!dev) {
6374 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6375 return -ENODEV;
6376 }
6377
6378 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6379 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6380 return -EINVAL;
6381 }
6382
6383 if (!dev->netdev_ops->ndo_mdb_add) {
6384 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6385 return -EOPNOTSUPP;
6386 }
6387
6388 return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
6389}
6390
6391static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
6392 struct netlink_ext_ack *extack)
6393{
6394 struct br_mdb_entry *entry = nla_data(attr);
6395 struct br_mdb_entry zero_entry = {};
6396
6397 if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
6398 NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
6399 return -EINVAL;
6400 }
6401
6402 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
6403 NL_SET_ERR_MSG(extack, "Unknown entry state");
6404 return -EINVAL;
6405 }
6406
6407 if (entry->flags) {
6408 NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
6409 return -EINVAL;
6410 }
6411
6412 if (entry->vid >= VLAN_N_VID - 1) {
6413 NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
6414 return -EINVAL;
6415 }
6416
6417 if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
6418 NL_SET_ERR_MSG(extack, "Entry address cannot be set");
6419 return -EINVAL;
6420 }
6421
6422 return 0;
6423}
6424
6425static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
6426 [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
6427 rtnl_validate_mdb_entry_del_bulk,
6428 sizeof(struct br_mdb_entry)),
6429 [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
6430};
6431
6432static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
6433 struct netlink_ext_ack *extack)
6434{
6435 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
6436 struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
6437 struct net *net = sock_net(skb->sk);
6438 struct br_port_msg *bpm;
6439 struct net_device *dev;
6440 int err;
6441
6442 if (!del_bulk)
6443 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
6444 MDBA_SET_ENTRY_MAX, mdba_policy,
6445 extack);
6446 else
6447 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
6448 mdba_del_bulk_policy, extack);
6449 if (err)
6450 return err;
6451
6452 bpm = nlmsg_data(nlh);
6453 if (!bpm->ifindex) {
6454 NL_SET_ERR_MSG(extack, "Invalid ifindex");
6455 return -EINVAL;
6456 }
6457
6458 dev = __dev_get_by_index(net, bpm->ifindex);
6459 if (!dev) {
6460 NL_SET_ERR_MSG(extack, "Device doesn't exist");
6461 return -ENODEV;
6462 }
6463
6464 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
6465 NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
6466 return -EINVAL;
6467 }
6468
6469 if (del_bulk) {
6470 if (!dev->netdev_ops->ndo_mdb_del_bulk) {
6471 NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
6472 return -EOPNOTSUPP;
6473 }
6474 return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
6475 }
6476
6477 if (!dev->netdev_ops->ndo_mdb_del) {
6478 NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
6479 return -EOPNOTSUPP;
6480 }
6481
6482 return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
6483}
6484
6485/* Process one rtnetlink message. */
6486
6487static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6488 struct netlink_ext_ack *extack)
6489{
6490 struct net *net = sock_net(skb->sk);
6491 struct rtnl_link *link;
6492 enum rtnl_kinds kind;
6493 struct module *owner;
6494 int err = -EOPNOTSUPP;
6495 rtnl_doit_func doit;
6496 unsigned int flags;
6497 int family;
6498 int type;
6499
6500 type = nlh->nlmsg_type;
6501 if (type > RTM_MAX)
6502 return -EOPNOTSUPP;
6503
6504 type -= RTM_BASE;
6505
6506 /* All the messages must have at least 1 byte length */
6507 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6508 return 0;
6509
6510 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6511 kind = rtnl_msgtype_kind(type);
6512
6513 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6514 return -EPERM;
6515
6516 rcu_read_lock();
6517 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6518 struct sock *rtnl;
6519 rtnl_dumpit_func dumpit;
6520 u32 min_dump_alloc = 0;
6521
6522 link = rtnl_get_link(family, type);
6523 if (!link || !link->dumpit) {
6524 family = PF_UNSPEC;
6525 link = rtnl_get_link(family, type);
6526 if (!link || !link->dumpit)
6527 goto err_unlock;
6528 }
6529 owner = link->owner;
6530 dumpit = link->dumpit;
6531 flags = link->flags;
6532
6533 if (type == RTM_GETLINK - RTM_BASE)
6534 min_dump_alloc = rtnl_calcit(skb, nlh);
6535
6536 err = 0;
6537 /* need to do this before rcu_read_unlock() */
6538 if (!try_module_get(owner))
6539 err = -EPROTONOSUPPORT;
6540
6541 rcu_read_unlock();
6542
6543 rtnl = net->rtnl;
6544 if (err == 0) {
6545 struct netlink_dump_control c = {
6546 .dump = dumpit,
6547 .min_dump_alloc = min_dump_alloc,
6548 .module = owner,
6549 .flags = flags,
6550 };
6551 err = netlink_dump_start(rtnl, skb, nlh, &c);
6552 /* netlink_dump_start() will keep a reference on
6553 * module if dump is still in progress.
6554 */
6555 module_put(owner);
6556 }
6557 return err;
6558 }
6559
6560 link = rtnl_get_link(family, type);
6561 if (!link || !link->doit) {
6562 family = PF_UNSPEC;
6563 link = rtnl_get_link(PF_UNSPEC, type);
6564 if (!link || !link->doit)
6565 goto out_unlock;
6566 }
6567
6568 owner = link->owner;
6569 if (!try_module_get(owner)) {
6570 err = -EPROTONOSUPPORT;
6571 goto out_unlock;
6572 }
6573
6574 flags = link->flags;
6575 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6576 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6577 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6578 module_put(owner);
6579 goto err_unlock;
6580 }
6581
6582 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6583 doit = link->doit;
6584 rcu_read_unlock();
6585 if (doit)
6586 err = doit(skb, nlh, extack);
6587 module_put(owner);
6588 return err;
6589 }
6590 rcu_read_unlock();
6591
6592 rtnl_lock();
6593 link = rtnl_get_link(family, type);
6594 if (link && link->doit)
6595 err = link->doit(skb, nlh, extack);
6596 rtnl_unlock();
6597
6598 module_put(owner);
6599
6600 return err;
6601
6602out_unlock:
6603 rcu_read_unlock();
6604 return err;
6605
6606err_unlock:
6607 rcu_read_unlock();
6608 return -EOPNOTSUPP;
6609}
6610
6611static void rtnetlink_rcv(struct sk_buff *skb)
6612{
6613 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6614}
6615
6616static int rtnetlink_bind(struct net *net, int group)
6617{
6618 switch (group) {
6619 case RTNLGRP_IPV4_MROUTE_R:
6620 case RTNLGRP_IPV6_MROUTE_R:
6621 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6622 return -EPERM;
6623 break;
6624 }
6625 return 0;
6626}
6627
6628static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6629{
6630 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6631
6632 switch (event) {
6633 case NETDEV_REBOOT:
6634 case NETDEV_CHANGEMTU:
6635 case NETDEV_CHANGEADDR:
6636 case NETDEV_CHANGENAME:
6637 case NETDEV_FEAT_CHANGE:
6638 case NETDEV_BONDING_FAILOVER:
6639 case NETDEV_POST_TYPE_CHANGE:
6640 case NETDEV_NOTIFY_PEERS:
6641 case NETDEV_CHANGEUPPER:
6642 case NETDEV_RESEND_IGMP:
6643 case NETDEV_CHANGEINFODATA:
6644 case NETDEV_CHANGELOWERSTATE:
6645 case NETDEV_CHANGE_TX_QUEUE_LEN:
6646 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6647 GFP_KERNEL, NULL, 0, 0, NULL);
6648 break;
6649 default:
6650 break;
6651 }
6652 return NOTIFY_DONE;
6653}
6654
6655static struct notifier_block rtnetlink_dev_notifier = {
6656 .notifier_call = rtnetlink_event,
6657};
6658
6659
6660static int __net_init rtnetlink_net_init(struct net *net)
6661{
6662 struct sock *sk;
6663 struct netlink_kernel_cfg cfg = {
6664 .groups = RTNLGRP_MAX,
6665 .input = rtnetlink_rcv,
6666 .cb_mutex = &rtnl_mutex,
6667 .flags = NL_CFG_F_NONROOT_RECV,
6668 .bind = rtnetlink_bind,
6669 };
6670
6671 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6672 if (!sk)
6673 return -ENOMEM;
6674 net->rtnl = sk;
6675 return 0;
6676}
6677
6678static void __net_exit rtnetlink_net_exit(struct net *net)
6679{
6680 netlink_kernel_release(net->rtnl);
6681 net->rtnl = NULL;
6682}
6683
6684static struct pernet_operations rtnetlink_net_ops = {
6685 .init = rtnetlink_net_init,
6686 .exit = rtnetlink_net_exit,
6687};
6688
6689void __init rtnetlink_init(void)
6690{
6691 if (register_pernet_subsys(&rtnetlink_net_ops))
6692 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6693
6694 register_netdevice_notifier(&rtnetlink_dev_notifier);
6695
6696 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6697 rtnl_dump_ifinfo, 0);
6698 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6699 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6700 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6701
6702 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6703 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6704 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6705
6706 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6707 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6708
6709 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6710 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6711 RTNL_FLAG_BULK_DEL_SUPPORTED);
6712 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6713
6714 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6715 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6716 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6717
6718 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6719 0);
6720 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6721
6722 rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
6723 rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
6724 rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
6725 RTNL_FLAG_BULK_DEL_SUPPORTED);
6726}