Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Routing netlink socket interface: protocol independent part.
   8 *
   9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 *
 
 
 
 
 
  11 *	Fixes:
  12 *	Vitaly E. Lavrov		RTA_OK arithmetic was wrong.
  13 */
  14
  15#include <linux/bitops.h>
  16#include <linux/errno.h>
  17#include <linux/module.h>
  18#include <linux/types.h>
  19#include <linux/socket.h>
  20#include <linux/kernel.h>
  21#include <linux/timer.h>
  22#include <linux/string.h>
  23#include <linux/sockios.h>
  24#include <linux/net.h>
  25#include <linux/fcntl.h>
  26#include <linux/mm.h>
  27#include <linux/slab.h>
  28#include <linux/interrupt.h>
  29#include <linux/capability.h>
  30#include <linux/skbuff.h>
  31#include <linux/init.h>
  32#include <linux/security.h>
  33#include <linux/mutex.h>
  34#include <linux/if_addr.h>
  35#include <linux/if_bridge.h>
  36#include <linux/if_vlan.h>
  37#include <linux/pci.h>
  38#include <linux/etherdevice.h>
  39#include <linux/bpf.h>
  40
  41#include <linux/uaccess.h>
  42
  43#include <linux/inet.h>
  44#include <linux/netdevice.h>
  45#include <net/ip.h>
  46#include <net/protocol.h>
  47#include <net/arp.h>
  48#include <net/route.h>
  49#include <net/udp.h>
  50#include <net/tcp.h>
  51#include <net/sock.h>
  52#include <net/pkt_sched.h>
  53#include <net/fib_rules.h>
  54#include <net/rtnetlink.h>
  55#include <net/net_namespace.h>
  56#include <net/devlink.h>
  57
  58#include "dev.h"
  59
  60#define RTNL_MAX_TYPE		50
  61#define RTNL_SLAVE_MAX_TYPE	40
  62
  63struct rtnl_link {
  64	rtnl_doit_func		doit;
  65	rtnl_dumpit_func	dumpit;
  66	struct module		*owner;
  67	unsigned int		flags;
  68	struct rcu_head		rcu;
  69};
  70
  71static DEFINE_MUTEX(rtnl_mutex);
  72
  73void rtnl_lock(void)
  74{
  75	mutex_lock(&rtnl_mutex);
  76}
  77EXPORT_SYMBOL(rtnl_lock);
  78
  79int rtnl_lock_killable(void)
  80{
  81	return mutex_lock_killable(&rtnl_mutex);
  82}
  83EXPORT_SYMBOL(rtnl_lock_killable);
  84
  85static struct sk_buff *defer_kfree_skb_list;
  86void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
  87{
  88	if (head && tail) {
  89		tail->next = defer_kfree_skb_list;
  90		defer_kfree_skb_list = head;
  91	}
  92}
  93EXPORT_SYMBOL(rtnl_kfree_skbs);
  94
  95void __rtnl_unlock(void)
  96{
  97	struct sk_buff *head = defer_kfree_skb_list;
  98
  99	defer_kfree_skb_list = NULL;
 100
 101	/* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
 102	 * is used. In some places, e.g. in cfg80211, we have code that will do
 103	 * something like
 104	 *   rtnl_lock()
 105	 *   wiphy_lock()
 106	 *   ...
 107	 *   rtnl_unlock()
 108	 *
 109	 * and because netdev_run_todo() acquires the RTNL for items on the list
 110	 * we could cause a situation such as this:
 111	 * Thread 1			Thread 2
 112	 *				  rtnl_lock()
 113	 *				  unregister_netdevice()
 114	 *				  __rtnl_unlock()
 115	 * rtnl_lock()
 116	 * wiphy_lock()
 117	 * rtnl_unlock()
 118	 *   netdev_run_todo()
 119	 *     __rtnl_unlock()
 120	 *
 121	 *     // list not empty now
 122	 *     // because of thread 2
 123	 *				  rtnl_lock()
 124	 *     while (!list_empty(...))
 125	 *       rtnl_lock()
 126	 *				  wiphy_lock()
 127	 * **** DEADLOCK ****
 128	 *
 129	 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
 130	 * it's not used in cases where something is added to do the list.
 131	 */
 132	WARN_ON(!list_empty(&net_todo_list));
 133
 134	mutex_unlock(&rtnl_mutex);
 135
 136	while (head) {
 137		struct sk_buff *next = head->next;
 138
 139		kfree_skb(head);
 140		cond_resched();
 141		head = next;
 142	}
 143}
 144
 145void rtnl_unlock(void)
 146{
 147	/* This fellow will unlock it for us. */
 148	netdev_run_todo();
 149}
 150EXPORT_SYMBOL(rtnl_unlock);
 151
 152int rtnl_trylock(void)
 153{
 154	return mutex_trylock(&rtnl_mutex);
 155}
 156EXPORT_SYMBOL(rtnl_trylock);
 157
 158int rtnl_is_locked(void)
 159{
 160	return mutex_is_locked(&rtnl_mutex);
 161}
 162EXPORT_SYMBOL(rtnl_is_locked);
 163
 164bool refcount_dec_and_rtnl_lock(refcount_t *r)
 165{
 166	return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
 167}
 168EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
 169
 170#ifdef CONFIG_PROVE_LOCKING
 171bool lockdep_rtnl_is_held(void)
 172{
 173	return lockdep_is_held(&rtnl_mutex);
 174}
 175EXPORT_SYMBOL(lockdep_rtnl_is_held);
 176#endif /* #ifdef CONFIG_PROVE_LOCKING */
 177
 178static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
 179
 180static inline int rtm_msgindex(int msgtype)
 181{
 182	int msgindex = msgtype - RTM_BASE;
 183
 184	/*
 185	 * msgindex < 0 implies someone tried to register a netlink
 186	 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
 187	 * the message type has not been added to linux/rtnetlink.h
 188	 */
 189	BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
 190
 191	return msgindex;
 192}
 193
 194static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
 195{
 196	struct rtnl_link __rcu **tab;
 197
 198	if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
 199		protocol = PF_UNSPEC;
 
 
 200
 201	tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
 202	if (!tab)
 203		tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
 204
 205	return rcu_dereference_rtnl(tab[msgtype]);
 206}
 207
 208static int rtnl_register_internal(struct module *owner,
 209				  int protocol, int msgtype,
 210				  rtnl_doit_func doit, rtnl_dumpit_func dumpit,
 211				  unsigned int flags)
 212{
 213	struct rtnl_link *link, *old;
 214	struct rtnl_link __rcu **tab;
 215	int msgindex;
 216	int ret = -ENOBUFS;
 217
 218	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 219	msgindex = rtm_msgindex(msgtype);
 
 
 220
 221	rtnl_lock();
 222	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
 223	if (tab == NULL) {
 224		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
 225		if (!tab)
 226			goto unlock;
 227
 228		/* ensures we see the 0 stores */
 229		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
 230	}
 231
 232	old = rtnl_dereference(tab[msgindex]);
 233	if (old) {
 234		link = kmemdup(old, sizeof(*old), GFP_KERNEL);
 235		if (!link)
 236			goto unlock;
 237	} else {
 238		link = kzalloc(sizeof(*link), GFP_KERNEL);
 239		if (!link)
 240			goto unlock;
 241	}
 242
 243	WARN_ON(link->owner && link->owner != owner);
 244	link->owner = owner;
 
 245
 246	WARN_ON(doit && link->doit && link->doit != doit);
 247	if (doit)
 248		link->doit = doit;
 249	WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
 250	if (dumpit)
 251		link->dumpit = dumpit;
 252
 253	WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
 254		(flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
 255	link->flags |= flags;
 256
 257	/* publish protocol:msgtype */
 258	rcu_assign_pointer(tab[msgindex], link);
 259	ret = 0;
 260	if (old)
 261		kfree_rcu(old, rcu);
 262unlock:
 263	rtnl_unlock();
 264	return ret;
 265}
 266
 267/**
 268 * rtnl_register_module - Register a rtnetlink message type
 269 *
 270 * @owner: module registering the hook (THIS_MODULE)
 271 * @protocol: Protocol family or PF_UNSPEC
 272 * @msgtype: rtnetlink message type
 273 * @doit: Function pointer called for each request message
 274 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
 275 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
 276 *
 277 * Like rtnl_register, but for use by removable modules.
 278 */
 279int rtnl_register_module(struct module *owner,
 280			 int protocol, int msgtype,
 281			 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
 282			 unsigned int flags)
 283{
 284	return rtnl_register_internal(owner, protocol, msgtype,
 285				      doit, dumpit, flags);
 286}
 287EXPORT_SYMBOL_GPL(rtnl_register_module);
 288
 289/**
 290 * rtnl_register - Register a rtnetlink message type
 291 * @protocol: Protocol family or PF_UNSPEC
 292 * @msgtype: rtnetlink message type
 293 * @doit: Function pointer called for each request message
 294 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
 295 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
 296 *
 297 * Registers the specified function pointers (at least one of them has
 298 * to be non-NULL) to be called whenever a request message for the
 299 * specified protocol family and message type is received.
 300 *
 301 * The special protocol family PF_UNSPEC may be used to define fallback
 302 * function pointers for the case when no entry for the specific protocol
 303 * family exists.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 304 */
 305void rtnl_register(int protocol, int msgtype,
 306		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
 307		   unsigned int flags)
 308{
 309	int err;
 310
 311	err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
 312				     flags);
 313	if (err)
 314		pr_err("Unable to register rtnetlink message handler, "
 315		       "protocol = %d, message type = %d\n", protocol, msgtype);
 316}
 
 317
 318/**
 319 * rtnl_unregister - Unregister a rtnetlink message type
 320 * @protocol: Protocol family or PF_UNSPEC
 321 * @msgtype: rtnetlink message type
 322 *
 323 * Returns 0 on success or a negative error code.
 324 */
 325int rtnl_unregister(int protocol, int msgtype)
 326{
 327	struct rtnl_link __rcu **tab;
 328	struct rtnl_link *link;
 329	int msgindex;
 330
 331	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 332	msgindex = rtm_msgindex(msgtype);
 333
 334	rtnl_lock();
 335	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
 336	if (!tab) {
 337		rtnl_unlock();
 338		return -ENOENT;
 339	}
 340
 341	link = rtnl_dereference(tab[msgindex]);
 342	RCU_INIT_POINTER(tab[msgindex], NULL);
 343	rtnl_unlock();
 344
 345	kfree_rcu(link, rcu);
 346
 347	return 0;
 348}
 349EXPORT_SYMBOL_GPL(rtnl_unregister);
 350
 351/**
 352 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
 353 * @protocol : Protocol family or PF_UNSPEC
 354 *
 355 * Identical to calling rtnl_unregster() for all registered message types
 356 * of a certain protocol family.
 357 */
 358void rtnl_unregister_all(int protocol)
 359{
 360	struct rtnl_link __rcu **tab;
 361	struct rtnl_link *link;
 362	int msgindex;
 363
 364	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 365
 366	rtnl_lock();
 367	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
 368	if (!tab) {
 369		rtnl_unlock();
 370		return;
 371	}
 372	RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
 373	for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
 374		link = rtnl_dereference(tab[msgindex]);
 375		if (!link)
 376			continue;
 377
 378		RCU_INIT_POINTER(tab[msgindex], NULL);
 379		kfree_rcu(link, rcu);
 380	}
 381	rtnl_unlock();
 382
 383	synchronize_net();
 384
 385	kfree(tab);
 386}
 387EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 388
 389static LIST_HEAD(link_ops);
 390
 391static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
 392{
 393	const struct rtnl_link_ops *ops;
 394
 395	list_for_each_entry(ops, &link_ops, list) {
 396		if (!strcmp(ops->kind, kind))
 397			return ops;
 398	}
 399	return NULL;
 400}
 401
 402/**
 403 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 404 * @ops: struct rtnl_link_ops * to register
 405 *
 406 * The caller must hold the rtnl_mutex. This function should be used
 407 * by drivers that create devices during module initialization. It
 408 * must be called before registering the devices.
 409 *
 410 * Returns 0 on success or a negative error code.
 411 */
 412int __rtnl_link_register(struct rtnl_link_ops *ops)
 413{
 414	if (rtnl_link_ops_get(ops->kind))
 415		return -EEXIST;
 416
 417	/* The check for alloc/setup is here because if ops
 418	 * does not have that filled up, it is not possible
 419	 * to use the ops for creating device. So do not
 420	 * fill up dellink as well. That disables rtnl_dellink.
 421	 */
 422	if ((ops->alloc || ops->setup) && !ops->dellink)
 423		ops->dellink = unregister_netdevice_queue;
 424
 425	list_add_tail(&ops->list, &link_ops);
 426	return 0;
 427}
 428EXPORT_SYMBOL_GPL(__rtnl_link_register);
 429
 430/**
 431 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 432 * @ops: struct rtnl_link_ops * to register
 433 *
 434 * Returns 0 on success or a negative error code.
 435 */
 436int rtnl_link_register(struct rtnl_link_ops *ops)
 437{
 438	int err;
 439
 440	/* Sanity-check max sizes to avoid stack buffer overflow. */
 441	if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
 442		    ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
 443		return -EINVAL;
 444
 445	rtnl_lock();
 446	err = __rtnl_link_register(ops);
 447	rtnl_unlock();
 448	return err;
 449}
 450EXPORT_SYMBOL_GPL(rtnl_link_register);
 451
 452static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
 453{
 454	struct net_device *dev;
 455	LIST_HEAD(list_kill);
 456
 457	for_each_netdev(net, dev) {
 458		if (dev->rtnl_link_ops == ops)
 459			ops->dellink(dev, &list_kill);
 460	}
 461	unregister_netdevice_many(&list_kill);
 462}
 463
 464/**
 465 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 466 * @ops: struct rtnl_link_ops * to unregister
 467 *
 468 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
 469 * integrity (hold pernet_ops_rwsem for writing to close the race
 470 * with setup_net() and cleanup_net()).
 471 */
 472void __rtnl_link_unregister(struct rtnl_link_ops *ops)
 473{
 474	struct net *net;
 475
 476	for_each_net(net) {
 477		__rtnl_kill_links(net, ops);
 478	}
 479	list_del(&ops->list);
 480}
 481EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 482
 483/* Return with the rtnl_lock held when there are no network
 484 * devices unregistering in any network namespace.
 485 */
 486static void rtnl_lock_unregistering_all(void)
 487{
 488	struct net *net;
 489	bool unregistering;
 490	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 491
 492	add_wait_queue(&netdev_unregistering_wq, &wait);
 493	for (;;) {
 494		unregistering = false;
 495		rtnl_lock();
 496		/* We held write locked pernet_ops_rwsem, and parallel
 497		 * setup_net() and cleanup_net() are not possible.
 498		 */
 499		for_each_net(net) {
 500			if (atomic_read(&net->dev_unreg_count) > 0) {
 501				unregistering = true;
 502				break;
 503			}
 504		}
 505		if (!unregistering)
 506			break;
 507		__rtnl_unlock();
 508
 509		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 510	}
 511	remove_wait_queue(&netdev_unregistering_wq, &wait);
 512}
 513
 514/**
 515 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 516 * @ops: struct rtnl_link_ops * to unregister
 517 */
 518void rtnl_link_unregister(struct rtnl_link_ops *ops)
 519{
 520	/* Close the race with setup_net() and cleanup_net() */
 521	down_write(&pernet_ops_rwsem);
 522	rtnl_lock_unregistering_all();
 523	__rtnl_link_unregister(ops);
 524	rtnl_unlock();
 525	up_write(&pernet_ops_rwsem);
 526}
 527EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 528
 529static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
 530{
 531	struct net_device *master_dev;
 532	const struct rtnl_link_ops *ops;
 533	size_t size = 0;
 534
 535	rcu_read_lock();
 536
 537	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
 538	if (!master_dev)
 539		goto out;
 540
 541	ops = master_dev->rtnl_link_ops;
 542	if (!ops || !ops->get_slave_size)
 543		goto out;
 544	/* IFLA_INFO_SLAVE_DATA + nested data */
 545	size = nla_total_size(sizeof(struct nlattr)) +
 546	       ops->get_slave_size(master_dev, dev);
 547
 548out:
 549	rcu_read_unlock();
 550	return size;
 551}
 552
 553static size_t rtnl_link_get_size(const struct net_device *dev)
 554{
 555	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
 556	size_t size;
 557
 558	if (!ops)
 559		return 0;
 560
 561	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
 562	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
 563
 564	if (ops->get_size)
 565		/* IFLA_INFO_DATA + nested data */
 566		size += nla_total_size(sizeof(struct nlattr)) +
 567			ops->get_size(dev);
 568
 569	if (ops->get_xstats_size)
 570		/* IFLA_INFO_XSTATS */
 571		size += nla_total_size(ops->get_xstats_size(dev));
 572
 573	size += rtnl_link_get_slave_info_data_size(dev);
 574
 575	return size;
 576}
 577
 578static LIST_HEAD(rtnl_af_ops);
 579
 580static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
 581{
 582	const struct rtnl_af_ops *ops;
 583
 584	ASSERT_RTNL();
 585
 586	list_for_each_entry(ops, &rtnl_af_ops, list) {
 587		if (ops->family == family)
 588			return ops;
 589	}
 590
 591	return NULL;
 592}
 593
 594/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 595 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
 596 * @ops: struct rtnl_af_ops * to register
 597 *
 598 * Returns 0 on success or a negative error code.
 599 */
 600void rtnl_af_register(struct rtnl_af_ops *ops)
 601{
 
 
 602	rtnl_lock();
 603	list_add_tail_rcu(&ops->list, &rtnl_af_ops);
 604	rtnl_unlock();
 
 605}
 606EXPORT_SYMBOL_GPL(rtnl_af_register);
 607
 608/**
 
 
 
 
 
 
 
 
 
 
 
 
 609 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
 610 * @ops: struct rtnl_af_ops * to unregister
 611 */
 612void rtnl_af_unregister(struct rtnl_af_ops *ops)
 613{
 614	rtnl_lock();
 615	list_del_rcu(&ops->list);
 616	rtnl_unlock();
 617
 618	synchronize_rcu();
 619}
 620EXPORT_SYMBOL_GPL(rtnl_af_unregister);
 621
 622static size_t rtnl_link_get_af_size(const struct net_device *dev,
 623				    u32 ext_filter_mask)
 624{
 625	struct rtnl_af_ops *af_ops;
 626	size_t size;
 627
 628	/* IFLA_AF_SPEC */
 629	size = nla_total_size(sizeof(struct nlattr));
 630
 631	rcu_read_lock();
 632	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
 633		if (af_ops->get_link_af_size) {
 634			/* AF_* + nested data */
 635			size += nla_total_size(sizeof(struct nlattr)) +
 636				af_ops->get_link_af_size(dev, ext_filter_mask);
 637		}
 638	}
 639	rcu_read_unlock();
 640
 641	return size;
 642}
 643
 644static bool rtnl_have_link_slave_info(const struct net_device *dev)
 645{
 646	struct net_device *master_dev;
 647	bool ret = false;
 648
 649	rcu_read_lock();
 650
 651	master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
 652	if (master_dev && master_dev->rtnl_link_ops)
 653		ret = true;
 654	rcu_read_unlock();
 655	return ret;
 656}
 657
 658static int rtnl_link_slave_info_fill(struct sk_buff *skb,
 659				     const struct net_device *dev)
 660{
 661	struct net_device *master_dev;
 662	const struct rtnl_link_ops *ops;
 663	struct nlattr *slave_data;
 664	int err;
 665
 666	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
 667	if (!master_dev)
 668		return 0;
 669	ops = master_dev->rtnl_link_ops;
 670	if (!ops)
 671		return 0;
 672	if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
 673		return -EMSGSIZE;
 674	if (ops->fill_slave_info) {
 675		slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
 676		if (!slave_data)
 677			return -EMSGSIZE;
 678		err = ops->fill_slave_info(skb, master_dev, dev);
 679		if (err < 0)
 680			goto err_cancel_slave_data;
 681		nla_nest_end(skb, slave_data);
 682	}
 683	return 0;
 684
 685err_cancel_slave_data:
 686	nla_nest_cancel(skb, slave_data);
 687	return err;
 688}
 689
 690static int rtnl_link_info_fill(struct sk_buff *skb,
 691			       const struct net_device *dev)
 692{
 693	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
 694	struct nlattr *data;
 695	int err;
 
 
 
 
 696
 697	if (!ops)
 698		return 0;
 699	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
 700		return -EMSGSIZE;
 701	if (ops->fill_xstats) {
 702		err = ops->fill_xstats(skb, dev);
 703		if (err < 0)
 704			return err;
 705	}
 706	if (ops->fill_info) {
 707		data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
 708		if (data == NULL)
 709			return -EMSGSIZE;
 710		err = ops->fill_info(skb, dev);
 711		if (err < 0)
 712			goto err_cancel_data;
 713		nla_nest_end(skb, data);
 714	}
 
 
 715	return 0;
 716
 717err_cancel_data:
 718	nla_nest_cancel(skb, data);
 
 
 
 719	return err;
 720}
 721
 722static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
 723{
 724	struct nlattr *linkinfo;
 725	int err = -EMSGSIZE;
 726
 727	linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
 728	if (linkinfo == NULL)
 729		goto out;
 730
 731	err = rtnl_link_info_fill(skb, dev);
 732	if (err < 0)
 733		goto err_cancel_link;
 
 734
 735	err = rtnl_link_slave_info_fill(skb, dev);
 736	if (err < 0)
 737		goto err_cancel_link;
 
 
 
 
 
 
 
 
 738
 739	nla_nest_end(skb, linkinfo);
 740	return 0;
 
 
 741
 742err_cancel_link:
 743	nla_nest_cancel(skb, linkinfo);
 744out:
 745	return err;
 
 746}
 
 747
 748int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
 749{
 750	struct sock *rtnl = net->rtnl;
 
 751
 752	return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
 
 
 
 
 
 
 753}
 754
 755int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
 756{
 757	struct sock *rtnl = net->rtnl;
 758
 759	return nlmsg_unicast(rtnl, skb, pid);
 760}
 761EXPORT_SYMBOL(rtnl_unicast);
 762
 763void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
 764		 const struct nlmsghdr *nlh, gfp_t flags)
 765{
 766	struct sock *rtnl = net->rtnl;
 
 767
 768	nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
 
 
 
 769}
 770EXPORT_SYMBOL(rtnl_notify);
 771
 772void rtnl_set_sk_err(struct net *net, u32 group, int error)
 773{
 774	struct sock *rtnl = net->rtnl;
 775
 776	netlink_set_err(rtnl, 0, group, error);
 777}
 778EXPORT_SYMBOL(rtnl_set_sk_err);
 779
 780int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
 781{
 782	struct nlattr *mx;
 783	int i, valid = 0;
 784
 785	/* nothing is dumped for dst_default_metrics, so just skip the loop */
 786	if (metrics == dst_default_metrics.metrics)
 787		return 0;
 788
 789	mx = nla_nest_start_noflag(skb, RTA_METRICS);
 790	if (mx == NULL)
 791		return -ENOBUFS;
 792
 793	for (i = 0; i < RTAX_MAX; i++) {
 794		if (metrics[i]) {
 795			if (i == RTAX_CC_ALGO - 1) {
 796				char tmp[TCP_CA_NAME_MAX], *name;
 797
 798				name = tcp_ca_get_name_by_key(metrics[i], tmp);
 799				if (!name)
 800					continue;
 801				if (nla_put_string(skb, i + 1, name))
 802					goto nla_put_failure;
 803			} else if (i == RTAX_FEATURES - 1) {
 804				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
 805
 806				if (!user_features)
 807					continue;
 808				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
 809				if (nla_put_u32(skb, i + 1, user_features))
 810					goto nla_put_failure;
 811			} else {
 812				if (nla_put_u32(skb, i + 1, metrics[i]))
 813					goto nla_put_failure;
 814			}
 815			valid++;
 
 
 816		}
 817	}
 818
 819	if (!valid) {
 820		nla_nest_cancel(skb, mx);
 821		return 0;
 822	}
 823
 824	return nla_nest_end(skb, mx);
 825
 826nla_put_failure:
 827	nla_nest_cancel(skb, mx);
 828	return -EMSGSIZE;
 829}
 830EXPORT_SYMBOL(rtnetlink_put_metrics);
 831
 832int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
 833		       long expires, u32 error)
 834{
 835	struct rta_cacheinfo ci = {
 
 
 
 836		.rta_error = error,
 837		.rta_id =  id,
 
 
 838	};
 839
 840	if (dst) {
 841		ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
 842		ci.rta_used = dst->__use;
 843		ci.rta_clntref = atomic_read(&dst->__refcnt);
 844	}
 845	if (expires) {
 846		unsigned long clock;
 847
 848		clock = jiffies_to_clock_t(abs(expires));
 849		clock = min_t(unsigned long, clock, INT_MAX);
 850		ci.rta_expires = (expires > 0) ? clock : -clock;
 851	}
 852	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
 853}
 854EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
 855
 856static void set_operstate(struct net_device *dev, unsigned char transition)
 857{
 858	unsigned char operstate = dev->operstate;
 859
 860	switch (transition) {
 861	case IF_OPER_UP:
 862		if ((operstate == IF_OPER_DORMANT ||
 863		     operstate == IF_OPER_TESTING ||
 864		     operstate == IF_OPER_UNKNOWN) &&
 865		    !netif_dormant(dev) && !netif_testing(dev))
 866			operstate = IF_OPER_UP;
 867		break;
 868
 869	case IF_OPER_TESTING:
 870		if (netif_oper_up(dev))
 871			operstate = IF_OPER_TESTING;
 872		break;
 873
 874	case IF_OPER_DORMANT:
 875		if (netif_oper_up(dev))
 
 876			operstate = IF_OPER_DORMANT;
 877		break;
 878	}
 879
 880	if (dev->operstate != operstate) {
 881		write_lock(&dev_base_lock);
 882		dev->operstate = operstate;
 883		write_unlock(&dev_base_lock);
 884		netdev_state_change(dev);
 885	}
 886}
 887
 888static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
 889{
 890	return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
 891	       (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
 892}
 893
 894static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
 895					   const struct ifinfomsg *ifm)
 896{
 897	unsigned int flags = ifm->ifi_flags;
 898
 899	/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
 900	if (ifm->ifi_change)
 901		flags = (flags & ifm->ifi_change) |
 902			(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
 903
 904	return flags;
 905}
 906
 907static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
 908				 const struct rtnl_link_stats64 *b)
 909{
 910	a->rx_packets = b->rx_packets;
 911	a->tx_packets = b->tx_packets;
 912	a->rx_bytes = b->rx_bytes;
 913	a->tx_bytes = b->tx_bytes;
 914	a->rx_errors = b->rx_errors;
 915	a->tx_errors = b->tx_errors;
 916	a->rx_dropped = b->rx_dropped;
 917	a->tx_dropped = b->tx_dropped;
 918
 919	a->multicast = b->multicast;
 920	a->collisions = b->collisions;
 921
 922	a->rx_length_errors = b->rx_length_errors;
 923	a->rx_over_errors = b->rx_over_errors;
 924	a->rx_crc_errors = b->rx_crc_errors;
 925	a->rx_frame_errors = b->rx_frame_errors;
 926	a->rx_fifo_errors = b->rx_fifo_errors;
 927	a->rx_missed_errors = b->rx_missed_errors;
 928
 929	a->tx_aborted_errors = b->tx_aborted_errors;
 930	a->tx_carrier_errors = b->tx_carrier_errors;
 931	a->tx_fifo_errors = b->tx_fifo_errors;
 932	a->tx_heartbeat_errors = b->tx_heartbeat_errors;
 933	a->tx_window_errors = b->tx_window_errors;
 934
 935	a->rx_compressed = b->rx_compressed;
 936	a->tx_compressed = b->tx_compressed;
 
 937
 938	a->rx_nohandler = b->rx_nohandler;
 
 
 939}
 940
 941/* All VF info */
 942static inline int rtnl_vfinfo_size(const struct net_device *dev,
 943				   u32 ext_filter_mask)
 944{
 945	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
 
 946		int num_vfs = dev_num_vf(dev->dev.parent);
 947		size_t size = nla_total_size(0);
 
 948		size += num_vfs *
 949			(nla_total_size(0) +
 950			 nla_total_size(sizeof(struct ifla_vf_mac)) +
 951			 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
 952			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
 953			 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
 954			 nla_total_size(MAX_VLAN_LIST_LEN *
 955					sizeof(struct ifla_vf_vlan_info)) +
 956			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
 957			 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
 958			 nla_total_size(sizeof(struct ifla_vf_rate)) +
 959			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
 960			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
 961			 nla_total_size(0) + /* nest IFLA_VF_STATS */
 962			 /* IFLA_VF_STATS_RX_PACKETS */
 963			 nla_total_size_64bit(sizeof(__u64)) +
 964			 /* IFLA_VF_STATS_TX_PACKETS */
 965			 nla_total_size_64bit(sizeof(__u64)) +
 966			 /* IFLA_VF_STATS_RX_BYTES */
 967			 nla_total_size_64bit(sizeof(__u64)) +
 968			 /* IFLA_VF_STATS_TX_BYTES */
 969			 nla_total_size_64bit(sizeof(__u64)) +
 970			 /* IFLA_VF_STATS_BROADCAST */
 971			 nla_total_size_64bit(sizeof(__u64)) +
 972			 /* IFLA_VF_STATS_MULTICAST */
 973			 nla_total_size_64bit(sizeof(__u64)) +
 974			 /* IFLA_VF_STATS_RX_DROPPED */
 975			 nla_total_size_64bit(sizeof(__u64)) +
 976			 /* IFLA_VF_STATS_TX_DROPPED */
 977			 nla_total_size_64bit(sizeof(__u64)) +
 978			 nla_total_size(sizeof(struct ifla_vf_trust)));
 979		return size;
 980	} else
 981		return 0;
 982}
 983
 984static size_t rtnl_port_size(const struct net_device *dev,
 985			     u32 ext_filter_mask)
 986{
 987	size_t port_size = nla_total_size(4)		/* PORT_VF */
 988		+ nla_total_size(PORT_PROFILE_MAX)	/* PORT_PROFILE */
 
 
 989		+ nla_total_size(PORT_UUID_MAX)		/* PORT_INSTANCE_UUID */
 990		+ nla_total_size(PORT_UUID_MAX)		/* PORT_HOST_UUID */
 991		+ nla_total_size(1)			/* PROT_VDP_REQUEST */
 992		+ nla_total_size(2);			/* PORT_VDP_RESPONSE */
 993	size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
 994	size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
 995		+ port_size;
 996	size_t port_self_size = nla_total_size(sizeof(struct nlattr))
 997		+ port_size;
 998
 999	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1000	    !(ext_filter_mask & RTEXT_FILTER_VF))
1001		return 0;
1002	if (dev_num_vf(dev->dev.parent))
1003		return port_self_size + vf_ports_size +
1004			vf_port_size * dev_num_vf(dev->dev.parent);
1005	else
1006		return port_self_size;
1007}
1008
1009static size_t rtnl_xdp_size(void)
1010{
1011	size_t xdp_size = nla_total_size(0) +	/* nest IFLA_XDP */
1012			  nla_total_size(1) +	/* XDP_ATTACHED */
1013			  nla_total_size(4) +	/* XDP_PROG_ID (or 1st mode) */
1014			  nla_total_size(4);	/* XDP_<mode>_PROG_ID */
1015
1016	return xdp_size;
1017}
1018
1019static size_t rtnl_prop_list_size(const struct net_device *dev)
1020{
1021	struct netdev_name_node *name_node;
1022	size_t size;
1023
1024	if (list_empty(&dev->name_node->list))
1025		return 0;
1026	size = nla_total_size(0);
1027	list_for_each_entry(name_node, &dev->name_node->list, list)
1028		size += nla_total_size(ALTIFNAMSIZ);
1029	return size;
1030}
1031
1032static size_t rtnl_proto_down_size(const struct net_device *dev)
1033{
1034	size_t size = nla_total_size(1);
1035
1036	if (dev->proto_down_reason)
1037		size += nla_total_size(0) + nla_total_size(4);
1038
1039	return size;
1040}
1041
1042static size_t rtnl_devlink_port_size(const struct net_device *dev)
1043{
1044	size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
1045
1046	if (dev->devlink_port)
1047		size += devlink_nl_port_handle_size(dev->devlink_port);
1048
1049	return size;
1050}
1051
1052static noinline size_t if_nlmsg_size(const struct net_device *dev,
1053				     u32 ext_filter_mask)
1054{
1055	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1056	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1057	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1058	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1059	       + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1060	       + nla_total_size(sizeof(struct rtnl_link_stats))
1061	       + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1062	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1063	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1064	       + nla_total_size(4) /* IFLA_TXQLEN */
1065	       + nla_total_size(4) /* IFLA_WEIGHT */
1066	       + nla_total_size(4) /* IFLA_MTU */
1067	       + nla_total_size(4) /* IFLA_LINK */
1068	       + nla_total_size(4) /* IFLA_MASTER */
1069	       + nla_total_size(1) /* IFLA_CARRIER */
1070	       + nla_total_size(4) /* IFLA_PROMISCUITY */
1071	       + nla_total_size(4) /* IFLA_ALLMULTI */
1072	       + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1073	       + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1074	       + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1075	       + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1076	       + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1077	       + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1078	       + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1079	       + nla_total_size(1) /* IFLA_OPERSTATE */
1080	       + nla_total_size(1) /* IFLA_LINKMODE */
1081	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1082	       + nla_total_size(4) /* IFLA_LINK_NETNSID */
1083	       + nla_total_size(4) /* IFLA_GROUP */
1084	       + nla_total_size(ext_filter_mask
1085			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1086	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1087	       + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1088	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1089	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1090	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1091	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1092	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1093	       + rtnl_xdp_size() /* IFLA_XDP */
1094	       + nla_total_size(4)  /* IFLA_EVENT */
1095	       + nla_total_size(4)  /* IFLA_NEW_NETNSID */
1096	       + nla_total_size(4)  /* IFLA_NEW_IFINDEX */
1097	       + rtnl_proto_down_size(dev)  /* proto down */
1098	       + nla_total_size(4)  /* IFLA_TARGET_NETNSID */
1099	       + nla_total_size(4)  /* IFLA_CARRIER_UP_COUNT */
1100	       + nla_total_size(4)  /* IFLA_CARRIER_DOWN_COUNT */
1101	       + nla_total_size(4)  /* IFLA_MIN_MTU */
1102	       + nla_total_size(4)  /* IFLA_MAX_MTU */
1103	       + rtnl_prop_list_size(dev)
1104	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1105	       + rtnl_devlink_port_size(dev)
1106	       + 0;
1107}
1108
1109static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1110{
1111	struct nlattr *vf_ports;
1112	struct nlattr *vf_port;
1113	int vf;
1114	int err;
1115
1116	vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1117	if (!vf_ports)
1118		return -EMSGSIZE;
1119
1120	for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1121		vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1122		if (!vf_port)
1123			goto nla_put_failure;
1124		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1125			goto nla_put_failure;
1126		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1127		if (err == -EMSGSIZE)
1128			goto nla_put_failure;
1129		if (err) {
1130			nla_nest_cancel(skb, vf_port);
1131			continue;
1132		}
1133		nla_nest_end(skb, vf_port);
1134	}
1135
1136	nla_nest_end(skb, vf_ports);
1137
1138	return 0;
1139
1140nla_put_failure:
1141	nla_nest_cancel(skb, vf_ports);
1142	return -EMSGSIZE;
1143}
1144
1145static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1146{
1147	struct nlattr *port_self;
1148	int err;
1149
1150	port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1151	if (!port_self)
1152		return -EMSGSIZE;
1153
1154	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1155	if (err) {
1156		nla_nest_cancel(skb, port_self);
1157		return (err == -EMSGSIZE) ? err : 0;
1158	}
1159
1160	nla_nest_end(skb, port_self);
1161
1162	return 0;
1163}
1164
1165static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1166			  u32 ext_filter_mask)
1167{
1168	int err;
1169
1170	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1171	    !(ext_filter_mask & RTEXT_FILTER_VF))
1172		return 0;
1173
1174	err = rtnl_port_self_fill(skb, dev);
1175	if (err)
1176		return err;
1177
1178	if (dev_num_vf(dev->dev.parent)) {
1179		err = rtnl_vf_ports_fill(skb, dev);
1180		if (err)
1181			return err;
1182	}
1183
1184	return 0;
1185}
1186
1187static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1188{
1189	int err;
1190	struct netdev_phys_item_id ppid;
1191
1192	err = dev_get_phys_port_id(dev, &ppid);
1193	if (err) {
1194		if (err == -EOPNOTSUPP)
1195			return 0;
1196		return err;
1197	}
1198
1199	if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1200		return -EMSGSIZE;
1201
1202	return 0;
1203}
1204
1205static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1206{
1207	char name[IFNAMSIZ];
1208	int err;
1209
1210	err = dev_get_phys_port_name(dev, name, sizeof(name));
1211	if (err) {
1212		if (err == -EOPNOTSUPP)
1213			return 0;
1214		return err;
1215	}
1216
1217	if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1218		return -EMSGSIZE;
1219
1220	return 0;
1221}
1222
1223static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1224{
1225	struct netdev_phys_item_id ppid = { };
1226	int err;
1227
1228	err = dev_get_port_parent_id(dev, &ppid, false);
1229	if (err) {
1230		if (err == -EOPNOTSUPP)
1231			return 0;
1232		return err;
1233	}
1234
1235	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1236		return -EMSGSIZE;
1237
1238	return 0;
1239}
1240
1241static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1242					      struct net_device *dev)
1243{
1244	struct rtnl_link_stats64 *sp;
1245	struct nlattr *attr;
1246
1247	attr = nla_reserve_64bit(skb, IFLA_STATS64,
1248				 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1249	if (!attr)
1250		return -EMSGSIZE;
1251
1252	sp = nla_data(attr);
1253	dev_get_stats(dev, sp);
1254
1255	attr = nla_reserve(skb, IFLA_STATS,
1256			   sizeof(struct rtnl_link_stats));
1257	if (!attr)
1258		return -EMSGSIZE;
1259
1260	copy_rtnl_link_stats(nla_data(attr), sp);
1261
1262	return 0;
1263}
1264
1265static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1266					       struct net_device *dev,
1267					       int vfs_num,
1268					       struct nlattr *vfinfo)
1269{
1270	struct ifla_vf_rss_query_en vf_rss_query_en;
1271	struct nlattr *vf, *vfstats, *vfvlanlist;
1272	struct ifla_vf_link_state vf_linkstate;
1273	struct ifla_vf_vlan_info vf_vlan_info;
1274	struct ifla_vf_spoofchk vf_spoofchk;
1275	struct ifla_vf_tx_rate vf_tx_rate;
1276	struct ifla_vf_stats vf_stats;
1277	struct ifla_vf_trust vf_trust;
1278	struct ifla_vf_vlan vf_vlan;
1279	struct ifla_vf_rate vf_rate;
1280	struct ifla_vf_mac vf_mac;
1281	struct ifla_vf_broadcast vf_broadcast;
1282	struct ifla_vf_info ivi;
1283	struct ifla_vf_guid node_guid;
1284	struct ifla_vf_guid port_guid;
1285
1286	memset(&ivi, 0, sizeof(ivi));
1287
1288	/* Not all SR-IOV capable drivers support the
1289	 * spoofcheck and "RSS query enable" query.  Preset to
1290	 * -1 so the user space tool can detect that the driver
1291	 * didn't report anything.
1292	 */
1293	ivi.spoofchk = -1;
1294	ivi.rss_query_en = -1;
1295	ivi.trusted = -1;
1296	/* The default value for VF link state is "auto"
1297	 * IFLA_VF_LINK_STATE_AUTO which equals zero
1298	 */
1299	ivi.linkstate = 0;
1300	/* VLAN Protocol by default is 802.1Q */
1301	ivi.vlan_proto = htons(ETH_P_8021Q);
1302	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1303		return 0;
1304
1305	memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1306	memset(&node_guid, 0, sizeof(node_guid));
1307	memset(&port_guid, 0, sizeof(port_guid));
1308
1309	vf_mac.vf =
1310		vf_vlan.vf =
1311		vf_vlan_info.vf =
1312		vf_rate.vf =
1313		vf_tx_rate.vf =
1314		vf_spoofchk.vf =
1315		vf_linkstate.vf =
1316		vf_rss_query_en.vf =
1317		vf_trust.vf =
1318		node_guid.vf =
1319		port_guid.vf = ivi.vf;
1320
1321	memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1322	memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1323	vf_vlan.vlan = ivi.vlan;
1324	vf_vlan.qos = ivi.qos;
1325	vf_vlan_info.vlan = ivi.vlan;
1326	vf_vlan_info.qos = ivi.qos;
1327	vf_vlan_info.vlan_proto = ivi.vlan_proto;
1328	vf_tx_rate.rate = ivi.max_tx_rate;
1329	vf_rate.min_tx_rate = ivi.min_tx_rate;
1330	vf_rate.max_tx_rate = ivi.max_tx_rate;
1331	vf_spoofchk.setting = ivi.spoofchk;
1332	vf_linkstate.link_state = ivi.linkstate;
1333	vf_rss_query_en.setting = ivi.rss_query_en;
1334	vf_trust.setting = ivi.trusted;
1335	vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1336	if (!vf)
1337		goto nla_put_vfinfo_failure;
1338	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1339	    nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1340	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1341	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1342		    &vf_rate) ||
1343	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1344		    &vf_tx_rate) ||
1345	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1346		    &vf_spoofchk) ||
1347	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1348		    &vf_linkstate) ||
1349	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1350		    sizeof(vf_rss_query_en),
1351		    &vf_rss_query_en) ||
1352	    nla_put(skb, IFLA_VF_TRUST,
1353		    sizeof(vf_trust), &vf_trust))
1354		goto nla_put_vf_failure;
1355
1356	if (dev->netdev_ops->ndo_get_vf_guid &&
1357	    !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1358					      &port_guid)) {
1359		if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1360			    &node_guid) ||
1361		    nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1362			    &port_guid))
1363			goto nla_put_vf_failure;
1364	}
1365	vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1366	if (!vfvlanlist)
1367		goto nla_put_vf_failure;
1368	if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1369		    &vf_vlan_info)) {
1370		nla_nest_cancel(skb, vfvlanlist);
1371		goto nla_put_vf_failure;
1372	}
1373	nla_nest_end(skb, vfvlanlist);
1374	memset(&vf_stats, 0, sizeof(vf_stats));
1375	if (dev->netdev_ops->ndo_get_vf_stats)
1376		dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1377						&vf_stats);
1378	vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1379	if (!vfstats)
1380		goto nla_put_vf_failure;
1381	if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1382			      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1383	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1384			      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1385	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1386			      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1387	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1388			      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1389	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1390			      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1391	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1392			      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1393	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1394			      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1395	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1396			      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1397		nla_nest_cancel(skb, vfstats);
1398		goto nla_put_vf_failure;
1399	}
1400	nla_nest_end(skb, vfstats);
1401	nla_nest_end(skb, vf);
1402	return 0;
1403
1404nla_put_vf_failure:
1405	nla_nest_cancel(skb, vf);
1406nla_put_vfinfo_failure:
1407	nla_nest_cancel(skb, vfinfo);
1408	return -EMSGSIZE;
1409}
1410
1411static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1412					   struct net_device *dev,
1413					   u32 ext_filter_mask)
1414{
1415	struct nlattr *vfinfo;
1416	int i, num_vfs;
1417
1418	if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1419		return 0;
1420
1421	num_vfs = dev_num_vf(dev->dev.parent);
1422	if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1423		return -EMSGSIZE;
1424
1425	if (!dev->netdev_ops->ndo_get_vf_config)
1426		return 0;
1427
1428	vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1429	if (!vfinfo)
1430		return -EMSGSIZE;
1431
1432	for (i = 0; i < num_vfs; i++) {
1433		if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1434			return -EMSGSIZE;
1435	}
1436
1437	nla_nest_end(skb, vfinfo);
1438	return 0;
1439}
1440
1441static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1442{
1443	struct rtnl_link_ifmap map;
1444
1445	memset(&map, 0, sizeof(map));
1446	map.mem_start   = dev->mem_start;
1447	map.mem_end     = dev->mem_end;
1448	map.base_addr   = dev->base_addr;
1449	map.irq         = dev->irq;
1450	map.dma         = dev->dma;
1451	map.port        = dev->if_port;
1452
1453	if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1454		return -EMSGSIZE;
1455
1456	return 0;
1457}
1458
1459static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1460{
1461	const struct bpf_prog *generic_xdp_prog;
1462
1463	ASSERT_RTNL();
1464
1465	generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1466	if (!generic_xdp_prog)
1467		return 0;
1468	return generic_xdp_prog->aux->id;
1469}
1470
1471static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1472{
1473	return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1474}
1475
1476static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1477{
1478	return dev_xdp_prog_id(dev, XDP_MODE_HW);
1479}
1480
1481static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1482			       u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1483			       u32 (*get_prog_id)(struct net_device *dev))
1484{
1485	u32 curr_id;
1486	int err;
1487
1488	curr_id = get_prog_id(dev);
1489	if (!curr_id)
1490		return 0;
1491
1492	*prog_id = curr_id;
1493	err = nla_put_u32(skb, attr, curr_id);
1494	if (err)
1495		return err;
1496
1497	if (*mode != XDP_ATTACHED_NONE)
1498		*mode = XDP_ATTACHED_MULTI;
1499	else
1500		*mode = tgt_mode;
1501
1502	return 0;
1503}
1504
1505static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1506{
1507	struct nlattr *xdp;
1508	u32 prog_id;
1509	int err;
1510	u8 mode;
1511
1512	xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1513	if (!xdp)
1514		return -EMSGSIZE;
1515
1516	prog_id = 0;
1517	mode = XDP_ATTACHED_NONE;
1518	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1519				  IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1520	if (err)
1521		goto err_cancel;
1522	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1523				  IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1524	if (err)
1525		goto err_cancel;
1526	err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1527				  IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1528	if (err)
1529		goto err_cancel;
1530
1531	err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1532	if (err)
1533		goto err_cancel;
1534
1535	if (prog_id && mode != XDP_ATTACHED_MULTI) {
1536		err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1537		if (err)
1538			goto err_cancel;
1539	}
1540
1541	nla_nest_end(skb, xdp);
1542	return 0;
1543
1544err_cancel:
1545	nla_nest_cancel(skb, xdp);
1546	return err;
1547}
1548
1549static u32 rtnl_get_event(unsigned long event)
1550{
1551	u32 rtnl_event_type = IFLA_EVENT_NONE;
1552
1553	switch (event) {
1554	case NETDEV_REBOOT:
1555		rtnl_event_type = IFLA_EVENT_REBOOT;
1556		break;
1557	case NETDEV_FEAT_CHANGE:
1558		rtnl_event_type = IFLA_EVENT_FEATURES;
1559		break;
1560	case NETDEV_BONDING_FAILOVER:
1561		rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1562		break;
1563	case NETDEV_NOTIFY_PEERS:
1564		rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1565		break;
1566	case NETDEV_RESEND_IGMP:
1567		rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1568		break;
1569	case NETDEV_CHANGEINFODATA:
1570		rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1571		break;
1572	default:
1573		break;
1574	}
1575
1576	return rtnl_event_type;
1577}
1578
1579static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1580{
1581	const struct net_device *upper_dev;
1582	int ret = 0;
1583
1584	rcu_read_lock();
1585
1586	upper_dev = netdev_master_upper_dev_get_rcu(dev);
1587	if (upper_dev)
1588		ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1589
1590	rcu_read_unlock();
1591	return ret;
1592}
1593
1594static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1595			  bool force)
1596{
1597	int ifindex = dev_get_iflink(dev);
1598
1599	if (force || dev->ifindex != ifindex)
1600		return nla_put_u32(skb, IFLA_LINK, ifindex);
1601
1602	return 0;
1603}
1604
1605static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1606					      struct net_device *dev)
1607{
1608	char buf[IFALIASZ];
1609	int ret;
1610
1611	ret = dev_get_alias(dev, buf, sizeof(buf));
1612	return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1613}
1614
1615static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1616				  const struct net_device *dev,
1617				  struct net *src_net, gfp_t gfp)
1618{
1619	bool put_iflink = false;
1620
1621	if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1622		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1623
1624		if (!net_eq(dev_net(dev), link_net)) {
1625			int id = peernet2id_alloc(src_net, link_net, gfp);
1626
1627			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1628				return -EMSGSIZE;
1629
1630			put_iflink = true;
1631		}
1632	}
1633
1634	return nla_put_iflink(skb, dev, put_iflink);
1635}
1636
1637static int rtnl_fill_link_af(struct sk_buff *skb,
1638			     const struct net_device *dev,
1639			     u32 ext_filter_mask)
1640{
1641	const struct rtnl_af_ops *af_ops;
1642	struct nlattr *af_spec;
1643
1644	af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1645	if (!af_spec)
1646		return -EMSGSIZE;
1647
1648	list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1649		struct nlattr *af;
1650		int err;
1651
1652		if (!af_ops->fill_link_af)
1653			continue;
1654
1655		af = nla_nest_start_noflag(skb, af_ops->family);
1656		if (!af)
1657			return -EMSGSIZE;
1658
1659		err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1660		/*
1661		 * Caller may return ENODATA to indicate that there
1662		 * was no data to be dumped. This is not an error, it
1663		 * means we should trim the attribute header and
1664		 * continue.
1665		 */
1666		if (err == -ENODATA)
1667			nla_nest_cancel(skb, af);
1668		else if (err < 0)
1669			return -EMSGSIZE;
1670
1671		nla_nest_end(skb, af);
1672	}
1673
1674	nla_nest_end(skb, af_spec);
1675	return 0;
1676}
1677
1678static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1679				 const struct net_device *dev)
1680{
1681	struct netdev_name_node *name_node;
1682	int count = 0;
1683
1684	list_for_each_entry(name_node, &dev->name_node->list, list) {
1685		if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1686			return -EMSGSIZE;
1687		count++;
1688	}
1689	return count;
1690}
1691
1692static int rtnl_fill_prop_list(struct sk_buff *skb,
1693			       const struct net_device *dev)
1694{
1695	struct nlattr *prop_list;
1696	int ret;
1697
1698	prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1699	if (!prop_list)
1700		return -EMSGSIZE;
1701
1702	ret = rtnl_fill_alt_ifnames(skb, dev);
1703	if (ret <= 0)
1704		goto nest_cancel;
1705
1706	nla_nest_end(skb, prop_list);
1707	return 0;
1708
1709nest_cancel:
1710	nla_nest_cancel(skb, prop_list);
1711	return ret;
1712}
1713
1714static int rtnl_fill_proto_down(struct sk_buff *skb,
1715				const struct net_device *dev)
1716{
1717	struct nlattr *pr;
1718	u32 preason;
1719
1720	if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1721		goto nla_put_failure;
1722
1723	preason = dev->proto_down_reason;
1724	if (!preason)
1725		return 0;
1726
1727	pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1728	if (!pr)
1729		return -EMSGSIZE;
1730
1731	if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1732		nla_nest_cancel(skb, pr);
1733		goto nla_put_failure;
1734	}
1735
1736	nla_nest_end(skb, pr);
1737	return 0;
1738
1739nla_put_failure:
1740	return -EMSGSIZE;
1741}
1742
1743static int rtnl_fill_devlink_port(struct sk_buff *skb,
1744				  const struct net_device *dev)
1745{
1746	struct nlattr *devlink_port_nest;
1747	int ret;
1748
1749	devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
1750	if (!devlink_port_nest)
1751		return -EMSGSIZE;
1752
1753	if (dev->devlink_port) {
1754		ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
1755		if (ret < 0)
1756			goto nest_cancel;
1757	}
1758
1759	nla_nest_end(skb, devlink_port_nest);
1760	return 0;
1761
1762nest_cancel:
1763	nla_nest_cancel(skb, devlink_port_nest);
1764	return ret;
1765}
1766
1767static int rtnl_fill_ifinfo(struct sk_buff *skb,
1768			    struct net_device *dev, struct net *src_net,
1769			    int type, u32 pid, u32 seq, u32 change,
1770			    unsigned int flags, u32 ext_filter_mask,
1771			    u32 event, int *new_nsid, int new_ifindex,
1772			    int tgt_netnsid, gfp_t gfp)
1773{
1774	struct ifinfomsg *ifm;
1775	struct nlmsghdr *nlh;
1776	struct Qdisc *qdisc;
 
 
 
1777
1778	ASSERT_RTNL();
1779	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1780	if (nlh == NULL)
1781		return -EMSGSIZE;
1782
1783	ifm = nlmsg_data(nlh);
1784	ifm->ifi_family = AF_UNSPEC;
1785	ifm->__ifi_pad = 0;
1786	ifm->ifi_type = dev->type;
1787	ifm->ifi_index = dev->ifindex;
1788	ifm->ifi_flags = dev_get_flags(dev);
1789	ifm->ifi_change = change;
1790
1791	if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1792		goto nla_put_failure;
1793
1794	qdisc = rtnl_dereference(dev->qdisc);
1795	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1796	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1797	    nla_put_u8(skb, IFLA_OPERSTATE,
1798		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1799	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1800	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1801	    nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1802	    nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1803	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1804	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1805	    nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1806	    nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1807	    nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1808	    nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1809	    nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1810	    nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1811	    nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1812#ifdef CONFIG_RPS
1813	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1814#endif
1815	    put_master_ifindex(skb, dev) ||
1816	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1817	    (qdisc &&
1818	     nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1819	    nla_put_ifalias(skb, dev) ||
1820	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1821			atomic_read(&dev->carrier_up_count) +
1822			atomic_read(&dev->carrier_down_count)) ||
1823	    nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1824			atomic_read(&dev->carrier_up_count)) ||
1825	    nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1826			atomic_read(&dev->carrier_down_count)))
1827		goto nla_put_failure;
1828
1829	if (rtnl_fill_proto_down(skb, dev))
1830		goto nla_put_failure;
1831
1832	if (event != IFLA_EVENT_NONE) {
1833		if (nla_put_u32(skb, IFLA_EVENT, event))
1834			goto nla_put_failure;
1835	}
1836
1837	if (rtnl_fill_link_ifmap(skb, dev))
1838		goto nla_put_failure;
1839
1840	if (dev->addr_len) {
1841		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1842		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1843			goto nla_put_failure;
1844	}
1845
1846	if (rtnl_phys_port_id_fill(skb, dev))
 
 
1847		goto nla_put_failure;
1848
1849	if (rtnl_phys_port_name_fill(skb, dev))
1850		goto nla_put_failure;
1851
1852	if (rtnl_phys_switch_id_fill(skb, dev))
 
 
1853		goto nla_put_failure;
 
1854
1855	if (rtnl_fill_stats(skb, dev))
 
1856		goto nla_put_failure;
1857
1858	if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1859		goto nla_put_failure;
 
1860
1861	if (rtnl_port_fill(skb, dev, ext_filter_mask))
1862		goto nla_put_failure;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1863
1864	if (rtnl_xdp_fill(skb, dev))
1865		goto nla_put_failure;
1866
1867	if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1868		if (rtnl_link_fill(skb, dev) < 0)
1869			goto nla_put_failure;
1870	}
1871
1872	if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1873		goto nla_put_failure;
1874
1875	if (new_nsid &&
1876	    nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1877		goto nla_put_failure;
1878	if (new_ifindex &&
1879	    nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1880		goto nla_put_failure;
1881
1882	if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1883	    nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1884		goto nla_put_failure;
1885
1886	rcu_read_lock();
1887	if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1888		goto nla_put_failure_rcu;
1889	rcu_read_unlock();
1890
1891	if (rtnl_fill_prop_list(skb, dev))
1892		goto nla_put_failure;
1893
1894	if (dev->dev.parent &&
1895	    nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1896			   dev_name(dev->dev.parent)))
1897		goto nla_put_failure;
 
 
 
 
 
 
1898
1899	if (dev->dev.parent && dev->dev.parent->bus &&
1900	    nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1901			   dev->dev.parent->bus->name))
1902		goto nla_put_failure;
1903
1904	if (rtnl_fill_devlink_port(skb, dev))
1905		goto nla_put_failure;
1906
1907	nlmsg_end(skb, nlh);
1908	return 0;
1909
1910nla_put_failure_rcu:
1911	rcu_read_unlock();
1912nla_put_failure:
1913	nlmsg_cancel(skb, nlh);
1914	return -EMSGSIZE;
1915}
1916
1917static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1918	[IFLA_IFNAME]		= { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1919	[IFLA_ADDRESS]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1920	[IFLA_BROADCAST]	= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1921	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
1922	[IFLA_MTU]		= { .type = NLA_U32 },
1923	[IFLA_LINK]		= { .type = NLA_U32 },
1924	[IFLA_MASTER]		= { .type = NLA_U32 },
1925	[IFLA_CARRIER]		= { .type = NLA_U8 },
1926	[IFLA_TXQLEN]		= { .type = NLA_U32 },
1927	[IFLA_WEIGHT]		= { .type = NLA_U32 },
1928	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
1929	[IFLA_LINKMODE]		= { .type = NLA_U8 },
1930	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
1931	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },
1932	[IFLA_NET_NS_FD]	= { .type = NLA_U32 },
1933	/* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1934	 * allow 0-length string (needed to remove an alias).
1935	 */
1936	[IFLA_IFALIAS]	        = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1937	[IFLA_VFINFO_LIST]	= {. type = NLA_NESTED },
1938	[IFLA_VF_PORTS]		= { .type = NLA_NESTED },
1939	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
1940	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
1941	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
1942	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
1943	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
1944	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
1945	[IFLA_GSO_MAX_SEGS]	= { .type = NLA_U32 },
1946	[IFLA_GSO_MAX_SIZE]	= { .type = NLA_U32 },
1947	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1948	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
1949	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1950	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
1951	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
1952	[IFLA_XDP]		= { .type = NLA_NESTED },
1953	[IFLA_EVENT]		= { .type = NLA_U32 },
1954	[IFLA_GROUP]		= { .type = NLA_U32 },
1955	[IFLA_TARGET_NETNSID]	= { .type = NLA_S32 },
1956	[IFLA_CARRIER_UP_COUNT]	= { .type = NLA_U32 },
1957	[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1958	[IFLA_MIN_MTU]		= { .type = NLA_U32 },
1959	[IFLA_MAX_MTU]		= { .type = NLA_U32 },
1960	[IFLA_PROP_LIST]	= { .type = NLA_NESTED },
1961	[IFLA_ALT_IFNAME]	= { .type = NLA_STRING,
1962				    .len = ALTIFNAMSIZ - 1 },
1963	[IFLA_PERM_ADDRESS]	= { .type = NLA_REJECT },
1964	[IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1965	[IFLA_NEW_IFINDEX]	= NLA_POLICY_MIN(NLA_S32, 1),
1966	[IFLA_PARENT_DEV_NAME]	= { .type = NLA_NUL_STRING },
1967	[IFLA_GRO_MAX_SIZE]	= { .type = NLA_U32 },
1968	[IFLA_TSO_MAX_SIZE]	= { .type = NLA_REJECT },
1969	[IFLA_TSO_MAX_SEGS]	= { .type = NLA_REJECT },
1970	[IFLA_ALLMULTI]		= { .type = NLA_REJECT },
1971};
 
1972
1973static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1974	[IFLA_INFO_KIND]	= { .type = NLA_STRING },
1975	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
1976	[IFLA_INFO_SLAVE_KIND]	= { .type = NLA_STRING },
1977	[IFLA_INFO_SLAVE_DATA]	= { .type = NLA_NESTED },
 
 
1978};
1979
1980static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1981	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
1982	[IFLA_VF_BROADCAST]	= { .type = NLA_REJECT },
1983	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
1984	[IFLA_VF_VLAN_LIST]     = { .type = NLA_NESTED },
1985	[IFLA_VF_TX_RATE]	= { .len = sizeof(struct ifla_vf_tx_rate) },
1986	[IFLA_VF_SPOOFCHK]	= { .len = sizeof(struct ifla_vf_spoofchk) },
1987	[IFLA_VF_RATE]		= { .len = sizeof(struct ifla_vf_rate) },
1988	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
1989	[IFLA_VF_RSS_QUERY_EN]	= { .len = sizeof(struct ifla_vf_rss_query_en) },
1990	[IFLA_VF_STATS]		= { .type = NLA_NESTED },
1991	[IFLA_VF_TRUST]		= { .len = sizeof(struct ifla_vf_trust) },
1992	[IFLA_VF_IB_NODE_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
1993	[IFLA_VF_IB_PORT_GUID]	= { .len = sizeof(struct ifla_vf_guid) },
1994};
1995
1996static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1997	[IFLA_PORT_VF]		= { .type = NLA_U32 },
1998	[IFLA_PORT_PROFILE]	= { .type = NLA_STRING,
1999				    .len = PORT_PROFILE_MAX },
 
 
2000	[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
2001				      .len = PORT_UUID_MAX },
2002	[IFLA_PORT_HOST_UUID]	= { .type = NLA_STRING,
2003				    .len = PORT_UUID_MAX },
2004	[IFLA_PORT_REQUEST]	= { .type = NLA_U8, },
2005	[IFLA_PORT_RESPONSE]	= { .type = NLA_U16, },
2006
2007	/* Unused, but we need to keep it here since user space could
2008	 * fill it. It's also broken with regard to NLA_BINARY use in
2009	 * combination with structs.
2010	 */
2011	[IFLA_PORT_VSI_TYPE]	= { .type = NLA_BINARY,
2012				    .len = sizeof(struct ifla_port_vsi) },
2013};
2014
2015static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
2016	[IFLA_XDP_UNSPEC]	= { .strict_start_type = IFLA_XDP_EXPECTED_FD },
2017	[IFLA_XDP_FD]		= { .type = NLA_S32 },
2018	[IFLA_XDP_EXPECTED_FD]	= { .type = NLA_S32 },
2019	[IFLA_XDP_ATTACHED]	= { .type = NLA_U8 },
2020	[IFLA_XDP_FLAGS]	= { .type = NLA_U32 },
2021	[IFLA_XDP_PROG_ID]	= { .type = NLA_U32 },
2022};
2023
2024static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
2025{
2026	const struct rtnl_link_ops *ops = NULL;
2027	struct nlattr *linfo[IFLA_INFO_MAX + 1];
2028
2029	if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
2030		return NULL;
2031
2032	if (linfo[IFLA_INFO_KIND]) {
2033		char kind[MODULE_NAME_LEN];
2034
2035		nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
2036		ops = rtnl_link_ops_get(kind);
2037	}
2038
2039	return ops;
2040}
2041
2042static bool link_master_filtered(struct net_device *dev, int master_idx)
2043{
2044	struct net_device *master;
2045
2046	if (!master_idx)
2047		return false;
2048
2049	master = netdev_master_upper_dev_get(dev);
2050
2051	/* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2052	 * another invalid value for ifindex to denote "no master".
2053	 */
2054	if (master_idx == -1)
2055		return !!master;
2056
2057	if (!master || master->ifindex != master_idx)
2058		return true;
2059
2060	return false;
2061}
2062
2063static bool link_kind_filtered(const struct net_device *dev,
2064			       const struct rtnl_link_ops *kind_ops)
2065{
2066	if (kind_ops && dev->rtnl_link_ops != kind_ops)
2067		return true;
2068
2069	return false;
2070}
2071
2072static bool link_dump_filtered(struct net_device *dev,
2073			       int master_idx,
2074			       const struct rtnl_link_ops *kind_ops)
2075{
2076	if (link_master_filtered(dev, master_idx) ||
2077	    link_kind_filtered(dev, kind_ops))
2078		return true;
2079
2080	return false;
2081}
2082
2083/**
2084 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2085 * @sk: netlink socket
2086 * @netnsid: network namespace identifier
2087 *
2088 * Returns the network namespace identified by netnsid on success or an error
2089 * pointer on failure.
2090 */
2091struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2092{
2093	struct net *net;
2094
2095	net = get_net_ns_by_id(sock_net(sk), netnsid);
2096	if (!net)
2097		return ERR_PTR(-EINVAL);
2098
2099	/* For now, the caller is required to have CAP_NET_ADMIN in
2100	 * the user namespace owning the target net ns.
2101	 */
2102	if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2103		put_net(net);
2104		return ERR_PTR(-EACCES);
2105	}
2106	return net;
2107}
2108EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2109
2110static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2111				      bool strict_check, struct nlattr **tb,
2112				      struct netlink_ext_ack *extack)
2113{
2114	int hdrlen;
2115
2116	if (strict_check) {
2117		struct ifinfomsg *ifm;
2118
2119		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2120			NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2121			return -EINVAL;
2122		}
2123
2124		ifm = nlmsg_data(nlh);
2125		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2126		    ifm->ifi_change) {
2127			NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2128			return -EINVAL;
2129		}
2130		if (ifm->ifi_index) {
2131			NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2132			return -EINVAL;
2133		}
2134
2135		return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2136						     IFLA_MAX, ifla_policy,
2137						     extack);
2138	}
2139
2140	/* A hack to preserve kernel<->userspace interface.
2141	 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2142	 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2143	 * what iproute2 < v3.9.0 used.
2144	 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2145	 * attribute, its netlink message is shorter than struct ifinfomsg.
2146	 */
2147	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2148		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2149
2150	return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2151				      extack);
2152}
2153
2154static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2155{
2156	struct netlink_ext_ack *extack = cb->extack;
2157	const struct nlmsghdr *nlh = cb->nlh;
2158	struct net *net = sock_net(skb->sk);
2159	struct net *tgt_net = net;
2160	int h, s_h;
2161	int idx = 0, s_idx;
2162	struct net_device *dev;
2163	struct hlist_head *head;
2164	struct nlattr *tb[IFLA_MAX+1];
2165	u32 ext_filter_mask = 0;
2166	const struct rtnl_link_ops *kind_ops = NULL;
2167	unsigned int flags = NLM_F_MULTI;
2168	int master_idx = 0;
2169	int netnsid = -1;
2170	int err, i;
2171
2172	s_h = cb->args[0];
2173	s_idx = cb->args[1];
2174
2175	err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2176	if (err < 0) {
2177		if (cb->strict_check)
2178			return err;
2179
2180		goto walk_entries;
2181	}
2182
2183	for (i = 0; i <= IFLA_MAX; ++i) {
2184		if (!tb[i])
2185			continue;
2186
2187		/* new attributes should only be added with strict checking */
2188		switch (i) {
2189		case IFLA_TARGET_NETNSID:
2190			netnsid = nla_get_s32(tb[i]);
2191			tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2192			if (IS_ERR(tgt_net)) {
2193				NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2194				return PTR_ERR(tgt_net);
2195			}
2196			break;
2197		case IFLA_EXT_MASK:
2198			ext_filter_mask = nla_get_u32(tb[i]);
2199			break;
2200		case IFLA_MASTER:
2201			master_idx = nla_get_u32(tb[i]);
2202			break;
2203		case IFLA_LINKINFO:
2204			kind_ops = linkinfo_to_kind_ops(tb[i]);
2205			break;
2206		default:
2207			if (cb->strict_check) {
2208				NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2209				return -EINVAL;
2210			}
2211		}
2212	}
2213
2214	if (master_idx || kind_ops)
2215		flags |= NLM_F_DUMP_FILTERED;
2216
2217walk_entries:
2218	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2219		idx = 0;
2220		head = &tgt_net->dev_index_head[h];
2221		hlist_for_each_entry(dev, head, index_hlist) {
2222			if (link_dump_filtered(dev, master_idx, kind_ops))
2223				goto cont;
2224			if (idx < s_idx)
2225				goto cont;
2226			err = rtnl_fill_ifinfo(skb, dev, net,
2227					       RTM_NEWLINK,
2228					       NETLINK_CB(cb->skb).portid,
2229					       nlh->nlmsg_seq, 0, flags,
2230					       ext_filter_mask, 0, NULL, 0,
2231					       netnsid, GFP_KERNEL);
2232
2233			if (err < 0) {
2234				if (likely(skb->len))
2235					goto out;
2236
2237				goto out_err;
2238			}
2239cont:
2240			idx++;
2241		}
2242	}
2243out:
2244	err = skb->len;
2245out_err:
2246	cb->args[1] = idx;
2247	cb->args[0] = h;
2248	cb->seq = tgt_net->dev_base_seq;
2249	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2250	if (netnsid >= 0)
2251		put_net(tgt_net);
2252
2253	return err;
2254}
2255
2256int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2257			struct netlink_ext_ack *exterr)
2258{
2259	return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2260				    exterr);
2261}
2262EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2263
2264struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2265{
2266	struct net *net;
2267	/* Examine the link attributes and figure out which
2268	 * network namespace we are talking about.
2269	 */
2270	if (tb[IFLA_NET_NS_PID])
2271		net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2272	else if (tb[IFLA_NET_NS_FD])
2273		net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2274	else
2275		net = get_net(src_net);
2276	return net;
2277}
2278EXPORT_SYMBOL(rtnl_link_get_net);
2279
2280/* Figure out which network namespace we are talking about by
2281 * examining the link attributes in the following order:
2282 *
2283 * 1. IFLA_NET_NS_PID
2284 * 2. IFLA_NET_NS_FD
2285 * 3. IFLA_TARGET_NETNSID
2286 */
2287static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2288					       struct nlattr *tb[])
2289{
2290	struct net *net;
2291
2292	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2293		return rtnl_link_get_net(src_net, tb);
2294
2295	if (!tb[IFLA_TARGET_NETNSID])
2296		return get_net(src_net);
2297
2298	net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2299	if (!net)
2300		return ERR_PTR(-EINVAL);
2301
2302	return net;
2303}
2304
2305static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2306					     struct net *src_net,
2307					     struct nlattr *tb[], int cap)
2308{
2309	struct net *net;
2310
2311	net = rtnl_link_get_net_by_nlattr(src_net, tb);
2312	if (IS_ERR(net))
2313		return net;
2314
2315	if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2316		put_net(net);
2317		return ERR_PTR(-EPERM);
2318	}
2319
2320	return net;
2321}
2322
2323/* Verify that rtnetlink requests do not pass additional properties
2324 * potentially referring to different network namespaces.
2325 */
2326static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2327				    struct netlink_ext_ack *extack,
2328				    bool netns_id_only)
2329{
2330
2331	if (netns_id_only) {
2332		if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2333			return 0;
2334
2335		NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2336		return -EOPNOTSUPP;
2337	}
2338
2339	if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2340		goto invalid_attr;
2341
2342	if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2343		goto invalid_attr;
2344
2345	if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2346		goto invalid_attr;
2347
2348	return 0;
2349
2350invalid_attr:
2351	NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2352	return -EINVAL;
2353}
2354
2355static	int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2356			     int max_tx_rate)
2357{
2358	const struct net_device_ops *ops = dev->netdev_ops;
2359
2360	if (!ops->ndo_set_vf_rate)
2361		return -EOPNOTSUPP;
2362	if (max_tx_rate && max_tx_rate < min_tx_rate)
2363		return -EINVAL;
2364
2365	return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2366}
2367
2368static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2369			    struct netlink_ext_ack *extack)
2370{
2371	if (dev) {
2372		if (tb[IFLA_ADDRESS] &&
2373		    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2374			return -EINVAL;
2375
2376		if (tb[IFLA_BROADCAST] &&
2377		    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2378			return -EINVAL;
2379	}
2380
2381	if (tb[IFLA_AF_SPEC]) {
2382		struct nlattr *af;
2383		int rem, err;
2384
2385		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2386			const struct rtnl_af_ops *af_ops;
2387
2388			af_ops = rtnl_af_lookup(nla_type(af));
2389			if (!af_ops)
2390				return -EAFNOSUPPORT;
2391
2392			if (!af_ops->set_link_af)
2393				return -EOPNOTSUPP;
2394
2395			if (af_ops->validate_link_af) {
2396				err = af_ops->validate_link_af(dev, af, extack);
2397				if (err < 0)
2398					return err;
2399			}
2400		}
2401	}
2402
2403	return 0;
2404}
2405
2406static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2407				  int guid_type)
2408{
 
 
2409	const struct net_device_ops *ops = dev->netdev_ops;
2410
2411	return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2412}
2413
2414static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2415{
2416	if (dev->type != ARPHRD_INFINIBAND)
2417		return -EOPNOTSUPP;
2418
2419	return handle_infiniband_guid(dev, ivt, guid_type);
2420}
2421
2422static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2423{
2424	const struct net_device_ops *ops = dev->netdev_ops;
2425	int err = -EINVAL;
2426
2427	if (tb[IFLA_VF_MAC]) {
2428		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2429
2430		if (ivm->vf >= INT_MAX)
2431			return -EINVAL;
2432		err = -EOPNOTSUPP;
2433		if (ops->ndo_set_vf_mac)
2434			err = ops->ndo_set_vf_mac(dev, ivm->vf,
2435						  ivm->mac);
2436		if (err < 0)
2437			return err;
2438	}
2439
2440	if (tb[IFLA_VF_VLAN]) {
2441		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2442
2443		if (ivv->vf >= INT_MAX)
2444			return -EINVAL;
2445		err = -EOPNOTSUPP;
2446		if (ops->ndo_set_vf_vlan)
2447			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2448						   ivv->qos,
2449						   htons(ETH_P_8021Q));
2450		if (err < 0)
2451			return err;
2452	}
2453
2454	if (tb[IFLA_VF_VLAN_LIST]) {
2455		struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2456		struct nlattr *attr;
2457		int rem, len = 0;
2458
2459		err = -EOPNOTSUPP;
2460		if (!ops->ndo_set_vf_vlan)
2461			return err;
2462
2463		nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2464			if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2465			    nla_len(attr) < NLA_HDRLEN) {
2466				return -EINVAL;
2467			}
2468			if (len >= MAX_VLAN_LIST_LEN)
2469				return -EOPNOTSUPP;
2470			ivvl[len] = nla_data(attr);
2471
2472			len++;
2473		}
2474		if (len == 0)
2475			return -EINVAL;
2476
2477		if (ivvl[0]->vf >= INT_MAX)
2478			return -EINVAL;
2479		err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2480					   ivvl[0]->qos, ivvl[0]->vlan_proto);
2481		if (err < 0)
2482			return err;
2483	}
2484
2485	if (tb[IFLA_VF_TX_RATE]) {
2486		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2487		struct ifla_vf_info ivf;
2488
2489		if (ivt->vf >= INT_MAX)
2490			return -EINVAL;
2491		err = -EOPNOTSUPP;
2492		if (ops->ndo_get_vf_config)
2493			err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2494		if (err < 0)
2495			return err;
2496
2497		err = rtnl_set_vf_rate(dev, ivt->vf,
2498				       ivf.min_tx_rate, ivt->rate);
2499		if (err < 0)
2500			return err;
2501	}
2502
2503	if (tb[IFLA_VF_RATE]) {
2504		struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2505
2506		if (ivt->vf >= INT_MAX)
2507			return -EINVAL;
2508
2509		err = rtnl_set_vf_rate(dev, ivt->vf,
2510				       ivt->min_tx_rate, ivt->max_tx_rate);
2511		if (err < 0)
2512			return err;
2513	}
2514
2515	if (tb[IFLA_VF_SPOOFCHK]) {
2516		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2517
2518		if (ivs->vf >= INT_MAX)
2519			return -EINVAL;
2520		err = -EOPNOTSUPP;
2521		if (ops->ndo_set_vf_spoofchk)
2522			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2523						       ivs->setting);
2524		if (err < 0)
2525			return err;
2526	}
2527
2528	if (tb[IFLA_VF_LINK_STATE]) {
2529		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2530
2531		if (ivl->vf >= INT_MAX)
2532			return -EINVAL;
2533		err = -EOPNOTSUPP;
2534		if (ops->ndo_set_vf_link_state)
2535			err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2536							 ivl->link_state);
2537		if (err < 0)
2538			return err;
2539	}
2540
2541	if (tb[IFLA_VF_RSS_QUERY_EN]) {
2542		struct ifla_vf_rss_query_en *ivrssq_en;
2543
2544		err = -EOPNOTSUPP;
2545		ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2546		if (ivrssq_en->vf >= INT_MAX)
2547			return -EINVAL;
2548		if (ops->ndo_set_vf_rss_query_en)
2549			err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2550							   ivrssq_en->setting);
2551		if (err < 0)
2552			return err;
2553	}
2554
2555	if (tb[IFLA_VF_TRUST]) {
2556		struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2557
2558		if (ivt->vf >= INT_MAX)
2559			return -EINVAL;
2560		err = -EOPNOTSUPP;
2561		if (ops->ndo_set_vf_trust)
2562			err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2563		if (err < 0)
2564			return err;
2565	}
2566
2567	if (tb[IFLA_VF_IB_NODE_GUID]) {
2568		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2569
2570		if (ivt->vf >= INT_MAX)
2571			return -EINVAL;
2572		if (!ops->ndo_set_vf_guid)
2573			return -EOPNOTSUPP;
2574		return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2575	}
2576
2577	if (tb[IFLA_VF_IB_PORT_GUID]) {
2578		struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2579
2580		if (ivt->vf >= INT_MAX)
2581			return -EINVAL;
2582		if (!ops->ndo_set_vf_guid)
2583			return -EOPNOTSUPP;
2584
2585		return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2586	}
2587
2588	return err;
2589}
2590
2591static int do_set_master(struct net_device *dev, int ifindex,
2592			 struct netlink_ext_ack *extack)
2593{
2594	struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2595	const struct net_device_ops *ops;
2596	int err;
2597
2598	if (upper_dev) {
2599		if (upper_dev->ifindex == ifindex)
2600			return 0;
2601		ops = upper_dev->netdev_ops;
2602		if (ops->ndo_del_slave) {
2603			err = ops->ndo_del_slave(upper_dev, dev);
2604			if (err)
2605				return err;
2606		} else {
2607			return -EOPNOTSUPP;
2608		}
2609	}
2610
2611	if (ifindex) {
2612		upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2613		if (!upper_dev)
2614			return -EINVAL;
2615		ops = upper_dev->netdev_ops;
2616		if (ops->ndo_add_slave) {
2617			err = ops->ndo_add_slave(upper_dev, dev, extack);
2618			if (err)
2619				return err;
2620		} else {
2621			return -EOPNOTSUPP;
2622		}
2623	}
2624	return 0;
2625}
2626
2627static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2628	[IFLA_PROTO_DOWN_REASON_MASK]	= { .type = NLA_U32 },
2629	[IFLA_PROTO_DOWN_REASON_VALUE]	= { .type = NLA_U32 },
2630};
2631
2632static int do_set_proto_down(struct net_device *dev,
2633			     struct nlattr *nl_proto_down,
2634			     struct nlattr *nl_proto_down_reason,
2635			     struct netlink_ext_ack *extack)
2636{
2637	struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2638	unsigned long mask = 0;
2639	u32 value;
2640	bool proto_down;
2641	int err;
2642
2643	if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2644		NL_SET_ERR_MSG(extack,  "Protodown not supported by device");
2645		return -EOPNOTSUPP;
2646	}
2647
2648	if (nl_proto_down_reason) {
2649		err = nla_parse_nested_deprecated(pdreason,
2650						  IFLA_PROTO_DOWN_REASON_MAX,
2651						  nl_proto_down_reason,
2652						  ifla_proto_down_reason_policy,
2653						  NULL);
2654		if (err < 0)
2655			return err;
2656
2657		if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2658			NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2659			return -EINVAL;
2660		}
2661
2662		value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2663
2664		if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2665			mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2666
2667		dev_change_proto_down_reason(dev, mask, value);
2668	}
2669
2670	if (nl_proto_down) {
2671		proto_down = nla_get_u8(nl_proto_down);
2672
2673		/* Don't turn off protodown if there are active reasons */
2674		if (!proto_down && dev->proto_down_reason) {
2675			NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2676			return -EBUSY;
2677		}
2678		err = dev_change_proto_down(dev,
2679					    proto_down);
2680		if (err)
2681			return err;
2682	}
2683
2684	return 0;
2685}
2686
2687#define DO_SETLINK_MODIFIED	0x01
2688/* notify flag means notify + modified. */
2689#define DO_SETLINK_NOTIFY	0x03
2690static int do_setlink(const struct sk_buff *skb,
2691		      struct net_device *dev, struct ifinfomsg *ifm,
2692		      struct netlink_ext_ack *extack,
2693		      struct nlattr **tb, int status)
2694{
2695	const struct net_device_ops *ops = dev->netdev_ops;
2696	char ifname[IFNAMSIZ];
2697	int err;
2698
2699	err = validate_linkmsg(dev, tb, extack);
2700	if (err < 0)
2701		return err;
2702
2703	if (tb[IFLA_IFNAME])
2704		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2705	else
2706		ifname[0] = '\0';
2707
2708	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2709		const char *pat = ifname[0] ? ifname : NULL;
2710		struct net *net;
2711		int new_ifindex;
2712
2713		net = rtnl_link_get_net_capable(skb, dev_net(dev),
2714						tb, CAP_NET_ADMIN);
2715		if (IS_ERR(net)) {
2716			err = PTR_ERR(net);
2717			goto errout;
2718		}
2719
2720		if (tb[IFLA_NEW_IFINDEX])
2721			new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2722		else
2723			new_ifindex = 0;
2724
2725		err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2726		put_net(net);
2727		if (err)
2728			goto errout;
2729		status |= DO_SETLINK_MODIFIED;
2730	}
2731
2732	if (tb[IFLA_MAP]) {
2733		struct rtnl_link_ifmap *u_map;
2734		struct ifmap k_map;
2735
2736		if (!ops->ndo_set_config) {
2737			err = -EOPNOTSUPP;
2738			goto errout;
2739		}
2740
2741		if (!netif_device_present(dev)) {
2742			err = -ENODEV;
2743			goto errout;
2744		}
2745
2746		u_map = nla_data(tb[IFLA_MAP]);
2747		k_map.mem_start = (unsigned long) u_map->mem_start;
2748		k_map.mem_end = (unsigned long) u_map->mem_end;
2749		k_map.base_addr = (unsigned short) u_map->base_addr;
2750		k_map.irq = (unsigned char) u_map->irq;
2751		k_map.dma = (unsigned char) u_map->dma;
2752		k_map.port = (unsigned char) u_map->port;
2753
2754		err = ops->ndo_set_config(dev, &k_map);
2755		if (err < 0)
2756			goto errout;
2757
2758		status |= DO_SETLINK_NOTIFY;
2759	}
2760
2761	if (tb[IFLA_ADDRESS]) {
2762		struct sockaddr *sa;
2763		int len;
2764
2765		len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2766						  sizeof(*sa));
 
 
 
 
 
 
 
 
 
2767		sa = kmalloc(len, GFP_KERNEL);
2768		if (!sa) {
2769			err = -ENOMEM;
2770			goto errout;
2771		}
2772		sa->sa_family = dev->type;
2773		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2774		       dev->addr_len);
2775		err = dev_set_mac_address_user(dev, sa, extack);
2776		kfree(sa);
2777		if (err)
2778			goto errout;
2779		status |= DO_SETLINK_MODIFIED;
 
 
2780	}
2781
2782	if (tb[IFLA_MTU]) {
2783		err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2784		if (err < 0)
2785			goto errout;
2786		status |= DO_SETLINK_MODIFIED;
2787	}
2788
2789	if (tb[IFLA_GROUP]) {
2790		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2791		status |= DO_SETLINK_NOTIFY;
2792	}
2793
2794	/*
2795	 * Interface selected by interface index but interface
2796	 * name provided implies that a name change has been
2797	 * requested.
2798	 */
2799	if (ifm->ifi_index > 0 && ifname[0]) {
2800		err = dev_change_name(dev, ifname);
2801		if (err < 0)
2802			goto errout;
2803		status |= DO_SETLINK_MODIFIED;
2804	}
2805
2806	if (tb[IFLA_IFALIAS]) {
2807		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2808				    nla_len(tb[IFLA_IFALIAS]));
2809		if (err < 0)
2810			goto errout;
2811		status |= DO_SETLINK_NOTIFY;
2812	}
2813
2814	if (tb[IFLA_BROADCAST]) {
2815		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2816		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2817	}
2818
2819	if (tb[IFLA_MASTER]) {
2820		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2821		if (err)
2822			goto errout;
2823		status |= DO_SETLINK_MODIFIED;
2824	}
2825
2826	if (ifm->ifi_flags || ifm->ifi_change) {
2827		err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2828				       extack);
2829		if (err < 0)
2830			goto errout;
2831	}
2832
2833	if (tb[IFLA_CARRIER]) {
2834		err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2835		if (err)
2836			goto errout;
2837		status |= DO_SETLINK_MODIFIED;
2838	}
2839
2840	if (tb[IFLA_TXQLEN]) {
2841		unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2842
2843		err = dev_change_tx_queue_len(dev, value);
2844		if (err)
2845			goto errout;
2846		status |= DO_SETLINK_MODIFIED;
2847	}
2848
2849	if (tb[IFLA_GSO_MAX_SIZE]) {
2850		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2851
2852		if (max_size > dev->tso_max_size) {
2853			err = -EINVAL;
2854			goto errout;
2855		}
2856
2857		if (dev->gso_max_size ^ max_size) {
2858			netif_set_gso_max_size(dev, max_size);
2859			status |= DO_SETLINK_MODIFIED;
2860		}
2861	}
2862
2863	if (tb[IFLA_GSO_MAX_SEGS]) {
2864		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2865
2866		if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2867			err = -EINVAL;
2868			goto errout;
2869		}
2870
2871		if (dev->gso_max_segs ^ max_segs) {
2872			netif_set_gso_max_segs(dev, max_segs);
2873			status |= DO_SETLINK_MODIFIED;
2874		}
2875	}
2876
2877	if (tb[IFLA_GRO_MAX_SIZE]) {
2878		u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2879
2880		if (dev->gro_max_size ^ gro_max_size) {
2881			netif_set_gro_max_size(dev, gro_max_size);
2882			status |= DO_SETLINK_MODIFIED;
2883		}
2884	}
2885
2886	if (tb[IFLA_OPERSTATE])
2887		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2888
2889	if (tb[IFLA_LINKMODE]) {
2890		unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2891
2892		write_lock(&dev_base_lock);
2893		if (dev->link_mode ^ value)
2894			status |= DO_SETLINK_NOTIFY;
2895		dev->link_mode = value;
2896		write_unlock(&dev_base_lock);
2897	}
2898
2899	if (tb[IFLA_VFINFO_LIST]) {
2900		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2901		struct nlattr *attr;
2902		int rem;
2903
2904		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2905			if (nla_type(attr) != IFLA_VF_INFO ||
2906			    nla_len(attr) < NLA_HDRLEN) {
2907				err = -EINVAL;
2908				goto errout;
2909			}
2910			err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2911							  attr,
2912							  ifla_vf_policy,
2913							  NULL);
2914			if (err < 0)
2915				goto errout;
2916			err = do_setvfinfo(dev, vfinfo);
2917			if (err < 0)
2918				goto errout;
2919			status |= DO_SETLINK_NOTIFY;
2920		}
2921	}
2922	err = 0;
2923
2924	if (tb[IFLA_VF_PORTS]) {
2925		struct nlattr *port[IFLA_PORT_MAX+1];
2926		struct nlattr *attr;
2927		int vf;
2928		int rem;
2929
2930		err = -EOPNOTSUPP;
2931		if (!ops->ndo_set_vf_port)
2932			goto errout;
2933
2934		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2935			if (nla_type(attr) != IFLA_VF_PORT ||
2936			    nla_len(attr) < NLA_HDRLEN) {
2937				err = -EINVAL;
2938				goto errout;
2939			}
2940			err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2941							  attr,
2942							  ifla_port_policy,
2943							  NULL);
2944			if (err < 0)
2945				goto errout;
2946			if (!port[IFLA_PORT_VF]) {
2947				err = -EOPNOTSUPP;
2948				goto errout;
2949			}
2950			vf = nla_get_u32(port[IFLA_PORT_VF]);
2951			err = ops->ndo_set_vf_port(dev, vf, port);
2952			if (err < 0)
2953				goto errout;
2954			status |= DO_SETLINK_NOTIFY;
2955		}
2956	}
2957	err = 0;
2958
2959	if (tb[IFLA_PORT_SELF]) {
2960		struct nlattr *port[IFLA_PORT_MAX+1];
2961
2962		err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2963						  tb[IFLA_PORT_SELF],
2964						  ifla_port_policy, NULL);
2965		if (err < 0)
2966			goto errout;
2967
2968		err = -EOPNOTSUPP;
2969		if (ops->ndo_set_vf_port)
2970			err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2971		if (err < 0)
2972			goto errout;
2973		status |= DO_SETLINK_NOTIFY;
2974	}
2975
2976	if (tb[IFLA_AF_SPEC]) {
2977		struct nlattr *af;
2978		int rem;
2979
2980		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2981			const struct rtnl_af_ops *af_ops;
2982
2983			BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
 
2984
2985			err = af_ops->set_link_af(dev, af, extack);
2986			if (err < 0)
2987				goto errout;
2988
2989			status |= DO_SETLINK_NOTIFY;
2990		}
2991	}
2992	err = 0;
2993
2994	if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2995		err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2996					tb[IFLA_PROTO_DOWN_REASON], extack);
2997		if (err)
2998			goto errout;
2999		status |= DO_SETLINK_NOTIFY;
3000	}
3001
3002	if (tb[IFLA_XDP]) {
3003		struct nlattr *xdp[IFLA_XDP_MAX + 1];
3004		u32 xdp_flags = 0;
3005
3006		err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
3007						  tb[IFLA_XDP],
3008						  ifla_xdp_policy, NULL);
3009		if (err < 0)
3010			goto errout;
3011
3012		if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
3013			err = -EINVAL;
3014			goto errout;
3015		}
3016
3017		if (xdp[IFLA_XDP_FLAGS]) {
3018			xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
3019			if (xdp_flags & ~XDP_FLAGS_MASK) {
3020				err = -EINVAL;
3021				goto errout;
3022			}
3023			if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
3024				err = -EINVAL;
3025				goto errout;
3026			}
3027		}
3028
3029		if (xdp[IFLA_XDP_FD]) {
3030			int expected_fd = -1;
3031
3032			if (xdp_flags & XDP_FLAGS_REPLACE) {
3033				if (!xdp[IFLA_XDP_EXPECTED_FD]) {
3034					err = -EINVAL;
3035					goto errout;
3036				}
3037				expected_fd =
3038					nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3039			}
3040
3041			err = dev_change_xdp_fd(dev, extack,
3042						nla_get_s32(xdp[IFLA_XDP_FD]),
3043						expected_fd,
3044						xdp_flags);
3045			if (err)
3046				goto errout;
3047			status |= DO_SETLINK_NOTIFY;
3048		}
3049	}
3050
3051errout:
3052	if (status & DO_SETLINK_MODIFIED) {
3053		if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3054			netdev_state_change(dev);
3055
3056		if (err < 0)
3057			net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3058					     dev->name);
3059	}
3060
3061	return err;
3062}
3063
3064static struct net_device *rtnl_dev_get(struct net *net,
3065				       struct nlattr *tb[])
3066{
3067	char ifname[ALTIFNAMSIZ];
3068
3069	if (tb[IFLA_IFNAME])
3070		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3071	else if (tb[IFLA_ALT_IFNAME])
3072		nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3073	else
3074		return NULL;
3075
3076	return __dev_get_by_name(net, ifname);
3077}
3078
3079static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3080			struct netlink_ext_ack *extack)
3081{
3082	struct net *net = sock_net(skb->sk);
3083	struct ifinfomsg *ifm;
3084	struct net_device *dev;
3085	int err;
3086	struct nlattr *tb[IFLA_MAX+1];
 
3087
3088	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3089				     ifla_policy, extack);
3090	if (err < 0)
3091		goto errout;
3092
3093	err = rtnl_ensure_unique_netns(tb, extack, false);
3094	if (err < 0)
3095		goto errout;
 
3096
3097	err = -EINVAL;
3098	ifm = nlmsg_data(nlh);
3099	if (ifm->ifi_index > 0)
3100		dev = __dev_get_by_index(net, ifm->ifi_index);
3101	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3102		dev = rtnl_dev_get(net, tb);
3103	else
3104		goto errout;
3105
3106	if (dev == NULL) {
3107		err = -ENODEV;
3108		goto errout;
3109	}
3110
3111	err = do_setlink(skb, dev, ifm, extack, tb, 0);
 
 
 
 
3112errout:
3113	return err;
3114}
3115
3116static int rtnl_group_dellink(const struct net *net, int group)
3117{
3118	struct net_device *dev, *aux;
3119	LIST_HEAD(list_kill);
3120	bool found = false;
3121
3122	if (!group)
3123		return -EPERM;
3124
3125	for_each_netdev(net, dev) {
3126		if (dev->group == group) {
3127			const struct rtnl_link_ops *ops;
3128
3129			found = true;
3130			ops = dev->rtnl_link_ops;
3131			if (!ops || !ops->dellink)
3132				return -EOPNOTSUPP;
3133		}
3134	}
3135
3136	if (!found)
3137		return -ENODEV;
3138
3139	for_each_netdev_safe(net, dev, aux) {
3140		if (dev->group == group) {
3141			const struct rtnl_link_ops *ops;
3142
3143			ops = dev->rtnl_link_ops;
3144			ops->dellink(dev, &list_kill);
3145		}
3146	}
3147	unregister_netdevice_many(&list_kill);
3148
3149	return 0;
3150}
3151
3152int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
3153{
3154	const struct rtnl_link_ops *ops;
3155	LIST_HEAD(list_kill);
3156
3157	ops = dev->rtnl_link_ops;
3158	if (!ops || !ops->dellink)
3159		return -EOPNOTSUPP;
3160
3161	ops->dellink(dev, &list_kill);
3162	unregister_netdevice_many_notify(&list_kill, portid, nlh);
3163
3164	return 0;
3165}
3166EXPORT_SYMBOL_GPL(rtnl_delete_link);
3167
3168static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3169			struct netlink_ext_ack *extack)
3170{
3171	struct net *net = sock_net(skb->sk);
3172	u32 portid = NETLINK_CB(skb).portid;
3173	struct net *tgt_net = net;
3174	struct net_device *dev = NULL;
3175	struct ifinfomsg *ifm;
 
3176	struct nlattr *tb[IFLA_MAX+1];
3177	int err;
3178	int netnsid = -1;
3179
3180	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3181				     ifla_policy, extack);
3182	if (err < 0)
3183		return err;
3184
3185	err = rtnl_ensure_unique_netns(tb, extack, true);
3186	if (err < 0)
3187		return err;
3188
3189	if (tb[IFLA_TARGET_NETNSID]) {
3190		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3191		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3192		if (IS_ERR(tgt_net))
3193			return PTR_ERR(tgt_net);
3194	}
3195
3196	err = -EINVAL;
3197	ifm = nlmsg_data(nlh);
3198	if (ifm->ifi_index > 0)
3199		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3200	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3201		dev = rtnl_dev_get(net, tb);
3202	else if (tb[IFLA_GROUP])
3203		err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3204	else
3205		goto out;
3206
3207	if (!dev) {
3208		if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3209			err = -ENODEV;
3210
3211		goto out;
3212	}
3213
3214	err = rtnl_delete_link(dev, portid, nlh);
 
3215
3216out:
3217	if (netnsid >= 0)
3218		put_net(tgt_net);
3219
3220	return err;
 
 
 
3221}
3222
3223int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
3224			u32 portid, const struct nlmsghdr *nlh)
3225{
3226	unsigned int old_flags;
3227	int err;
3228
3229	old_flags = dev->flags;
3230	if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3231		err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3232					 NULL);
3233		if (err < 0)
3234			return err;
3235	}
3236
3237	if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3238		__dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
3239	} else {
3240		dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3241		__dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
3242	}
3243	return 0;
3244}
3245EXPORT_SYMBOL(rtnl_configure_link);
3246
3247struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3248				    unsigned char name_assign_type,
3249				    const struct rtnl_link_ops *ops,
3250				    struct nlattr *tb[],
3251				    struct netlink_ext_ack *extack)
3252{
 
3253	struct net_device *dev;
3254	unsigned int num_tx_queues = 1;
3255	unsigned int num_rx_queues = 1;
3256
3257	if (tb[IFLA_NUM_TX_QUEUES])
3258		num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3259	else if (ops->get_num_tx_queues)
3260		num_tx_queues = ops->get_num_tx_queues();
3261
3262	if (tb[IFLA_NUM_RX_QUEUES])
3263		num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3264	else if (ops->get_num_rx_queues)
3265		num_rx_queues = ops->get_num_rx_queues();
3266
3267	if (num_tx_queues < 1 || num_tx_queues > 4096) {
3268		NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3269		return ERR_PTR(-EINVAL);
3270	}
3271
3272	if (num_rx_queues < 1 || num_rx_queues > 4096) {
3273		NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3274		return ERR_PTR(-EINVAL);
3275	}
3276
3277	if (ops->alloc) {
3278		dev = ops->alloc(tb, ifname, name_assign_type,
3279				 num_tx_queues, num_rx_queues);
3280		if (IS_ERR(dev))
3281			return dev;
3282	} else {
3283		dev = alloc_netdev_mqs(ops->priv_size, ifname,
3284				       name_assign_type, ops->setup,
3285				       num_tx_queues, num_rx_queues);
3286	}
3287
 
 
3288	if (!dev)
3289		return ERR_PTR(-ENOMEM);
3290
3291	dev_net_set(dev, net);
3292	dev->rtnl_link_ops = ops;
3293	dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3294
3295	if (tb[IFLA_MTU]) {
3296		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3297		int err;
3298
3299		err = dev_validate_mtu(dev, mtu, extack);
3300		if (err) {
3301			free_netdev(dev);
3302			return ERR_PTR(err);
3303		}
3304		dev->mtu = mtu;
3305	}
3306	if (tb[IFLA_ADDRESS]) {
3307		__dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3308			       nla_len(tb[IFLA_ADDRESS]));
3309		dev->addr_assign_type = NET_ADDR_SET;
3310	}
3311	if (tb[IFLA_BROADCAST])
3312		memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3313				nla_len(tb[IFLA_BROADCAST]));
3314	if (tb[IFLA_TXQLEN])
3315		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3316	if (tb[IFLA_OPERSTATE])
3317		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3318	if (tb[IFLA_LINKMODE])
3319		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3320	if (tb[IFLA_GROUP])
3321		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3322	if (tb[IFLA_GSO_MAX_SIZE])
3323		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3324	if (tb[IFLA_GSO_MAX_SEGS])
3325		netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3326	if (tb[IFLA_GRO_MAX_SIZE])
3327		netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3328
3329	return dev;
 
 
 
3330}
3331EXPORT_SYMBOL(rtnl_create_link);
3332
3333static int rtnl_group_changelink(const struct sk_buff *skb,
3334		struct net *net, int group,
3335		struct ifinfomsg *ifm,
3336		struct netlink_ext_ack *extack,
3337		struct nlattr **tb)
3338{
3339	struct net_device *dev, *aux;
3340	int err;
3341
3342	for_each_netdev_safe(net, dev, aux) {
3343		if (dev->group == group) {
3344			err = do_setlink(skb, dev, ifm, extack, tb, 0);
3345			if (err < 0)
3346				return err;
3347		}
3348	}
3349
3350	return 0;
3351}
3352
3353static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3354			       const struct rtnl_link_ops *ops,
3355			       const struct nlmsghdr *nlh,
3356			       struct nlattr **tb, struct nlattr **data,
3357			       struct netlink_ext_ack *extack)
3358{
3359	unsigned char name_assign_type = NET_NAME_USER;
3360	struct net *net = sock_net(skb->sk);
3361	u32 portid = NETLINK_CB(skb).portid;
3362	struct net *dest_net, *link_net;
3363	struct net_device *dev;
3364	char ifname[IFNAMSIZ];
3365	int err;
3366
3367	if (!ops->alloc && !ops->setup)
3368		return -EOPNOTSUPP;
3369
3370	if (tb[IFLA_IFNAME]) {
3371		nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3372	} else {
3373		snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3374		name_assign_type = NET_NAME_ENUM;
3375	}
3376
3377	dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3378	if (IS_ERR(dest_net))
3379		return PTR_ERR(dest_net);
3380
3381	if (tb[IFLA_LINK_NETNSID]) {
3382		int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3383
3384		link_net = get_net_ns_by_id(dest_net, id);
3385		if (!link_net) {
3386			NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3387			err =  -EINVAL;
3388			goto out;
3389		}
3390		err = -EPERM;
3391		if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3392			goto out;
3393	} else {
3394		link_net = NULL;
3395	}
3396
3397	dev = rtnl_create_link(link_net ? : dest_net, ifname,
3398			       name_assign_type, ops, tb, extack);
3399	if (IS_ERR(dev)) {
3400		err = PTR_ERR(dev);
3401		goto out;
3402	}
3403
3404	dev->ifindex = ifm->ifi_index;
3405
3406	if (ops->newlink)
3407		err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3408	else
3409		err = register_netdevice(dev);
3410	if (err < 0) {
3411		free_netdev(dev);
3412		goto out;
3413	}
3414
3415	err = rtnl_configure_link(dev, ifm, portid, nlh);
3416	if (err < 0)
3417		goto out_unregister;
3418	if (link_net) {
3419		err = dev_change_net_namespace(dev, dest_net, ifname);
3420		if (err < 0)
3421			goto out_unregister;
3422	}
3423	if (tb[IFLA_MASTER]) {
3424		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3425		if (err)
3426			goto out_unregister;
3427	}
3428out:
3429	if (link_net)
3430		put_net(link_net);
3431	put_net(dest_net);
3432	return err;
3433out_unregister:
3434	if (ops->newlink) {
3435		LIST_HEAD(list_kill);
3436
3437		ops->dellink(dev, &list_kill);
3438		unregister_netdevice_many(&list_kill);
3439	} else {
3440		unregister_netdevice(dev);
3441	}
3442	goto out;
3443}
3444
3445struct rtnl_newlink_tbs {
3446	struct nlattr *tb[IFLA_MAX + 1];
3447	struct nlattr *attr[RTNL_MAX_TYPE + 1];
3448	struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3449};
3450
3451static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3452			  struct rtnl_newlink_tbs *tbs,
3453			  struct netlink_ext_ack *extack)
3454{
3455	struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3456	struct nlattr ** const tb = tbs->tb;
3457	const struct rtnl_link_ops *m_ops;
3458	struct net_device *master_dev;
3459	struct net *net = sock_net(skb->sk);
3460	const struct rtnl_link_ops *ops;
3461	struct nlattr **slave_data;
3462	char kind[MODULE_NAME_LEN];
3463	struct net_device *dev;
3464	struct ifinfomsg *ifm;
3465	struct nlattr **data;
3466	bool link_specified;
 
 
3467	int err;
3468
3469#ifdef CONFIG_MODULES
3470replay:
3471#endif
3472	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3473				     ifla_policy, extack);
3474	if (err < 0)
3475		return err;
3476
3477	err = rtnl_ensure_unique_netns(tb, extack, false);
3478	if (err < 0)
3479		return err;
 
3480
3481	ifm = nlmsg_data(nlh);
3482	if (ifm->ifi_index > 0) {
3483		link_specified = true;
3484		dev = __dev_get_by_index(net, ifm->ifi_index);
3485	} else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3486		link_specified = true;
3487		dev = rtnl_dev_get(net, tb);
3488	} else {
3489		link_specified = false;
3490		dev = NULL;
3491	}
3492
3493	master_dev = NULL;
3494	m_ops = NULL;
3495	if (dev) {
3496		master_dev = netdev_master_upper_dev_get(dev);
3497		if (master_dev)
3498			m_ops = master_dev->rtnl_link_ops;
3499	}
3500
3501	err = validate_linkmsg(dev, tb, extack);
3502	if (err < 0)
3503		return err;
3504
3505	if (tb[IFLA_LINKINFO]) {
3506		err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3507						  tb[IFLA_LINKINFO],
3508						  ifla_info_policy, NULL);
3509		if (err < 0)
3510			return err;
3511	} else
3512		memset(linkinfo, 0, sizeof(linkinfo));
3513
3514	if (linkinfo[IFLA_INFO_KIND]) {
3515		nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3516		ops = rtnl_link_ops_get(kind);
3517	} else {
3518		kind[0] = '\0';
3519		ops = NULL;
3520	}
3521
3522	data = NULL;
3523	if (ops) {
3524		if (ops->maxtype > RTNL_MAX_TYPE)
3525			return -EINVAL;
3526
3527		if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3528			err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3529							  linkinfo[IFLA_INFO_DATA],
3530							  ops->policy, extack);
3531			if (err < 0)
3532				return err;
3533			data = tbs->attr;
3534		}
3535		if (ops->validate) {
3536			err = ops->validate(tb, data, extack);
3537			if (err < 0)
3538				return err;
3539		}
3540	}
3541
3542	slave_data = NULL;
3543	if (m_ops) {
3544		if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3545			return -EINVAL;
3546
3547		if (m_ops->slave_maxtype &&
3548		    linkinfo[IFLA_INFO_SLAVE_DATA]) {
3549			err = nla_parse_nested_deprecated(tbs->slave_attr,
3550							  m_ops->slave_maxtype,
3551							  linkinfo[IFLA_INFO_SLAVE_DATA],
3552							  m_ops->slave_policy,
3553							  extack);
3554			if (err < 0)
3555				return err;
3556			slave_data = tbs->slave_attr;
3557		}
3558	}
3559
3560	if (dev) {
3561		int status = 0;
3562
3563		if (nlh->nlmsg_flags & NLM_F_EXCL)
3564			return -EEXIST;
3565		if (nlh->nlmsg_flags & NLM_F_REPLACE)
3566			return -EOPNOTSUPP;
3567
3568		if (linkinfo[IFLA_INFO_DATA]) {
3569			if (!ops || ops != dev->rtnl_link_ops ||
3570			    !ops->changelink)
3571				return -EOPNOTSUPP;
3572
3573			err = ops->changelink(dev, tb, data, extack);
3574			if (err < 0)
3575				return err;
3576			status |= DO_SETLINK_NOTIFY;
3577		}
3578
3579		if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3580			if (!m_ops || !m_ops->slave_changelink)
3581				return -EOPNOTSUPP;
 
 
3582
3583			err = m_ops->slave_changelink(master_dev, dev, tb,
3584						      slave_data, extack);
3585			if (err < 0)
3586				return err;
3587			status |= DO_SETLINK_NOTIFY;
3588		}
3589
3590		return do_setlink(skb, dev, ifm, extack, tb, status);
3591	}
3592
3593	if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3594		/* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3595		 * or it's for a group
3596		*/
3597		if (link_specified)
3598			return -ENODEV;
3599		if (tb[IFLA_GROUP])
3600			return rtnl_group_changelink(skb, net,
3601						nla_get_u32(tb[IFLA_GROUP]),
3602						ifm, extack, tb);
3603		return -ENODEV;
3604	}
3605
3606	if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3607		return -EOPNOTSUPP;
 
 
3608
3609	if (!ops) {
3610#ifdef CONFIG_MODULES
3611		if (kind[0]) {
3612			__rtnl_unlock();
3613			request_module("rtnl-link-%s", kind);
3614			rtnl_lock();
3615			ops = rtnl_link_ops_get(kind);
3616			if (ops)
3617				goto replay;
3618		}
3619#endif
3620		NL_SET_ERR_MSG(extack, "Unknown device type");
3621		return -EOPNOTSUPP;
3622	}
3623
3624	return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
3625}
3626
3627static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3628			struct netlink_ext_ack *extack)
3629{
3630	struct rtnl_newlink_tbs *tbs;
3631	int ret;
3632
3633	tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3634	if (!tbs)
3635		return -ENOMEM;
3636
3637	ret = __rtnl_newlink(skb, nlh, tbs, extack);
3638	kfree(tbs);
3639	return ret;
3640}
3641
3642static int rtnl_valid_getlink_req(struct sk_buff *skb,
3643				  const struct nlmsghdr *nlh,
3644				  struct nlattr **tb,
3645				  struct netlink_ext_ack *extack)
3646{
3647	struct ifinfomsg *ifm;
3648	int i, err;
3649
3650	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3651		NL_SET_ERR_MSG(extack, "Invalid header for get link");
3652		return -EINVAL;
3653	}
3654
3655	if (!netlink_strict_get_check(skb))
3656		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3657					      ifla_policy, extack);
 
 
 
3658
3659	ifm = nlmsg_data(nlh);
3660	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3661	    ifm->ifi_change) {
3662		NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3663		return -EINVAL;
3664	}
3665
3666	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3667					    ifla_policy, extack);
3668	if (err)
 
 
3669		return err;
3670
3671	for (i = 0; i <= IFLA_MAX; i++) {
3672		if (!tb[i])
3673			continue;
3674
3675		switch (i) {
3676		case IFLA_IFNAME:
3677		case IFLA_ALT_IFNAME:
3678		case IFLA_EXT_MASK:
3679		case IFLA_TARGET_NETNSID:
3680			break;
3681		default:
3682			NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3683			return -EINVAL;
3684		}
3685	}
3686
3687	return 0;
3688}
3689
3690static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3691			struct netlink_ext_ack *extack)
3692{
3693	struct net *net = sock_net(skb->sk);
3694	struct net *tgt_net = net;
3695	struct ifinfomsg *ifm;
 
3696	struct nlattr *tb[IFLA_MAX+1];
3697	struct net_device *dev = NULL;
3698	struct sk_buff *nskb;
3699	int netnsid = -1;
3700	int err;
3701	u32 ext_filter_mask = 0;
3702
3703	err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3704	if (err < 0)
3705		return err;
3706
3707	err = rtnl_ensure_unique_netns(tb, extack, true);
3708	if (err < 0)
3709		return err;
3710
3711	if (tb[IFLA_TARGET_NETNSID]) {
3712		netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3713		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3714		if (IS_ERR(tgt_net))
3715			return PTR_ERR(tgt_net);
3716	}
3717
3718	if (tb[IFLA_EXT_MASK])
3719		ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3720
3721	err = -EINVAL;
3722	ifm = nlmsg_data(nlh);
3723	if (ifm->ifi_index > 0)
3724		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3725	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3726		dev = rtnl_dev_get(tgt_net, tb);
3727	else
3728		goto out;
3729
3730	err = -ENODEV;
3731	if (dev == NULL)
3732		goto out;
3733
3734	err = -ENOBUFS;
3735	nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3736	if (nskb == NULL)
3737		goto out;
3738
3739	err = rtnl_fill_ifinfo(nskb, dev, net,
3740			       RTM_NEWLINK, NETLINK_CB(skb).portid,
3741			       nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3742			       0, NULL, 0, netnsid, GFP_KERNEL);
3743	if (err < 0) {
3744		/* -EMSGSIZE implies BUG in if_nlmsg_size */
3745		WARN_ON(err == -EMSGSIZE);
3746		kfree_skb(nskb);
3747	} else
3748		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3749out:
3750	if (netnsid >= 0)
3751		put_net(tgt_net);
3752
3753	return err;
3754}
3755
3756static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3757			   bool *changed, struct netlink_ext_ack *extack)
3758{
3759	char *alt_ifname;
3760	size_t size;
3761	int err;
3762
3763	err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3764	if (err)
3765		return err;
3766
3767	if (cmd == RTM_NEWLINKPROP) {
3768		size = rtnl_prop_list_size(dev);
3769		size += nla_total_size(ALTIFNAMSIZ);
3770		if (size >= U16_MAX) {
3771			NL_SET_ERR_MSG(extack,
3772				       "effective property list too long");
3773			return -EINVAL;
3774		}
3775	}
3776
3777	alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3778	if (!alt_ifname)
3779		return -ENOMEM;
3780
3781	if (cmd == RTM_NEWLINKPROP) {
3782		err = netdev_name_node_alt_create(dev, alt_ifname);
3783		if (!err)
3784			alt_ifname = NULL;
3785	} else if (cmd == RTM_DELLINKPROP) {
3786		err = netdev_name_node_alt_destroy(dev, alt_ifname);
3787	} else {
3788		WARN_ON_ONCE(1);
3789		err = -EINVAL;
3790	}
3791
3792	kfree(alt_ifname);
3793	if (!err)
3794		*changed = true;
3795	return err;
3796}
3797
3798static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3799			 struct netlink_ext_ack *extack)
3800{
3801	struct net *net = sock_net(skb->sk);
3802	struct nlattr *tb[IFLA_MAX + 1];
3803	struct net_device *dev;
3804	struct ifinfomsg *ifm;
3805	bool changed = false;
3806	struct nlattr *attr;
3807	int err, rem;
3808
3809	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3810	if (err)
3811		return err;
3812
3813	err = rtnl_ensure_unique_netns(tb, extack, true);
3814	if (err)
3815		return err;
3816
3817	ifm = nlmsg_data(nlh);
3818	if (ifm->ifi_index > 0)
3819		dev = __dev_get_by_index(net, ifm->ifi_index);
3820	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3821		dev = rtnl_dev_get(net, tb);
3822	else
3823		return -EINVAL;
3824
3825	if (!dev)
3826		return -ENODEV;
3827
3828	if (!tb[IFLA_PROP_LIST])
3829		return 0;
3830
3831	nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3832		switch (nla_type(attr)) {
3833		case IFLA_ALT_IFNAME:
3834			err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3835			if (err)
3836				return err;
3837			break;
3838		}
3839	}
3840
3841	if (changed)
3842		netdev_state_change(dev);
3843	return 0;
3844}
3845
3846static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3847			    struct netlink_ext_ack *extack)
3848{
3849	return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3850}
3851
3852static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3853			    struct netlink_ext_ack *extack)
3854{
3855	return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3856}
3857
3858static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3859{
3860	struct net *net = sock_net(skb->sk);
3861	size_t min_ifinfo_dump_size = 0;
3862	struct nlattr *tb[IFLA_MAX+1];
3863	u32 ext_filter_mask = 0;
3864	struct net_device *dev;
3865	int hdrlen;
3866
3867	/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3868	hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3869		 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3870
3871	if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3872		if (tb[IFLA_EXT_MASK])
3873			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3874	}
3875
3876	if (!ext_filter_mask)
3877		return NLMSG_GOODSIZE;
3878	/*
3879	 * traverse the list of net devices and compute the minimum
3880	 * buffer size based upon the filter mask.
3881	 */
3882	rcu_read_lock();
3883	for_each_netdev_rcu(net, dev) {
3884		min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3885					   if_nlmsg_size(dev, ext_filter_mask));
3886	}
3887	rcu_read_unlock();
3888
3889	return nlmsg_total_size(min_ifinfo_dump_size);
3890}
3891
3892static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3893{
3894	int idx;
3895	int s_idx = cb->family;
3896	int type = cb->nlh->nlmsg_type - RTM_BASE;
3897	int ret = 0;
3898
3899	if (s_idx == 0)
3900		s_idx = 1;
3901
3902	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3903		struct rtnl_link __rcu **tab;
3904		struct rtnl_link *link;
3905		rtnl_dumpit_func dumpit;
3906
3907		if (idx < s_idx || idx == PF_PACKET)
3908			continue;
3909
3910		if (type < 0 || type >= RTM_NR_MSGTYPES)
3911			continue;
3912
3913		tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3914		if (!tab)
3915			continue;
3916
3917		link = rcu_dereference_rtnl(tab[type]);
3918		if (!link)
3919			continue;
3920
3921		dumpit = link->dumpit;
3922		if (!dumpit)
3923			continue;
3924
3925		if (idx > s_idx) {
3926			memset(&cb->args[0], 0, sizeof(cb->args));
3927			cb->prev_seq = 0;
3928			cb->seq = 0;
3929		}
3930		ret = dumpit(skb, cb);
3931		if (ret)
3932			break;
3933	}
3934	cb->family = idx;
3935
3936	return skb->len ? : ret;
3937}
3938
3939struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3940				       unsigned int change,
3941				       u32 event, gfp_t flags, int *new_nsid,
3942				       int new_ifindex, u32 portid, u32 seq)
3943{
3944	struct net *net = dev_net(dev);
3945	struct sk_buff *skb;
3946	int err = -ENOBUFS;
 
3947
3948	skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3949	if (skb == NULL)
3950		goto errout;
3951
3952	err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3953			       type, portid, seq, change, 0, 0, event,
3954			       new_nsid, new_ifindex, -1, flags);
3955	if (err < 0) {
3956		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
3957		WARN_ON(err == -EMSGSIZE);
3958		kfree_skb(skb);
3959		goto errout;
3960	}
3961	return skb;
 
3962errout:
3963	if (err < 0)
3964		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3965	return NULL;
3966}
3967
3968void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
3969		       u32 portid, const struct nlmsghdr *nlh)
3970{
3971	struct net *net = dev_net(dev);
3972
3973	rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
3974}
3975
3976static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3977			       unsigned int change, u32 event,
3978			       gfp_t flags, int *new_nsid, int new_ifindex,
3979			       u32 portid, const struct nlmsghdr *nlh)
3980{
3981	struct sk_buff *skb;
3982
3983	if (dev->reg_state != NETREG_REGISTERED)
3984		return;
3985
3986	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3987				     new_ifindex, portid, nlmsg_seq(nlh));
3988	if (skb)
3989		rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
3990}
3991
3992void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3993		  gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
3994{
3995	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3996			   NULL, 0, portid, nlh);
3997}
3998
3999void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
4000			 gfp_t flags, int *new_nsid, int new_ifindex)
4001{
4002	rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
4003			   new_nsid, new_ifindex, 0, NULL);
4004}
4005
4006static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
4007				   struct net_device *dev,
4008				   u8 *addr, u16 vid, u32 pid, u32 seq,
4009				   int type, unsigned int flags,
4010				   int nlflags, u16 ndm_state)
4011{
4012	struct nlmsghdr *nlh;
4013	struct ndmsg *ndm;
4014
4015	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
4016	if (!nlh)
4017		return -EMSGSIZE;
4018
4019	ndm = nlmsg_data(nlh);
4020	ndm->ndm_family  = AF_BRIDGE;
4021	ndm->ndm_pad1	 = 0;
4022	ndm->ndm_pad2    = 0;
4023	ndm->ndm_flags	 = flags;
4024	ndm->ndm_type	 = 0;
4025	ndm->ndm_ifindex = dev->ifindex;
4026	ndm->ndm_state   = ndm_state;
4027
4028	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
4029		goto nla_put_failure;
4030	if (vid)
4031		if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
4032			goto nla_put_failure;
4033
4034	nlmsg_end(skb, nlh);
4035	return 0;
4036
4037nla_put_failure:
4038	nlmsg_cancel(skb, nlh);
4039	return -EMSGSIZE;
4040}
4041
4042static inline size_t rtnl_fdb_nlmsg_size(void)
4043{
4044	return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4045	       nla_total_size(ETH_ALEN) +	/* NDA_LLADDR */
4046	       nla_total_size(sizeof(u16)) +	/* NDA_VLAN */
4047	       0;
4048}
4049
4050static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4051			    u16 ndm_state)
4052{
4053	struct net *net = dev_net(dev);
4054	struct sk_buff *skb;
4055	int err = -ENOBUFS;
4056
4057	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
4058	if (!skb)
4059		goto errout;
4060
4061	err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4062				      0, 0, type, NTF_SELF, 0, ndm_state);
4063	if (err < 0) {
4064		kfree_skb(skb);
4065		goto errout;
4066	}
4067
4068	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4069	return;
4070errout:
4071	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4072}
4073
4074/*
4075 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4076 */
4077int ndo_dflt_fdb_add(struct ndmsg *ndm,
4078		     struct nlattr *tb[],
4079		     struct net_device *dev,
4080		     const unsigned char *addr, u16 vid,
4081		     u16 flags)
4082{
4083	int err = -EINVAL;
4084
4085	/* If aging addresses are supported device will need to
4086	 * implement its own handler for this.
4087	 */
4088	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4089		netdev_info(dev, "default FDB implementation only supports local addresses\n");
4090		return err;
4091	}
4092
4093	if (tb[NDA_FLAGS_EXT]) {
4094		netdev_info(dev, "invalid flags given to default FDB implementation\n");
4095		return err;
4096	}
4097
4098	if (vid) {
4099		netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4100		return err;
4101	}
4102
4103	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4104		err = dev_uc_add_excl(dev, addr);
4105	else if (is_multicast_ether_addr(addr))
4106		err = dev_mc_add_excl(dev, addr);
4107
4108	/* Only return duplicate errors if NLM_F_EXCL is set */
4109	if (err == -EEXIST && !(flags & NLM_F_EXCL))
4110		err = 0;
4111
4112	return err;
4113}
4114EXPORT_SYMBOL(ndo_dflt_fdb_add);
4115
4116static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4117			 struct netlink_ext_ack *extack)
4118{
4119	u16 vid = 0;
4120
4121	if (vlan_attr) {
4122		if (nla_len(vlan_attr) != sizeof(u16)) {
4123			NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4124			return -EINVAL;
4125		}
4126
4127		vid = nla_get_u16(vlan_attr);
4128
4129		if (!vid || vid >= VLAN_VID_MASK) {
4130			NL_SET_ERR_MSG(extack, "invalid vlan id");
4131			return -EINVAL;
4132		}
4133	}
4134	*p_vid = vid;
4135	return 0;
4136}
4137
4138static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4139			struct netlink_ext_ack *extack)
4140{
4141	struct net *net = sock_net(skb->sk);
 
4142	struct ndmsg *ndm;
4143	struct nlattr *tb[NDA_MAX+1];
4144	struct net_device *dev;
4145	u8 *addr;
4146	u16 vid;
4147	int err;
4148
4149	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4150				     extack);
4151	if (err < 0)
4152		return err;
4153
4154	ndm = nlmsg_data(nlh);
4155	if (ndm->ndm_ifindex == 0) {
4156		NL_SET_ERR_MSG(extack, "invalid ifindex");
4157		return -EINVAL;
4158	}
4159
4160	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4161	if (dev == NULL) {
4162		NL_SET_ERR_MSG(extack, "unknown ifindex");
4163		return -ENODEV;
4164	}
4165
4166	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4167		NL_SET_ERR_MSG(extack, "invalid address");
4168		return -EINVAL;
4169	}
4170
4171	if (dev->type != ARPHRD_ETHER) {
4172		NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
 
4173		return -EINVAL;
4174	}
4175
4176	addr = nla_data(tb[NDA_LLADDR]);
4177
4178	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4179	if (err)
4180		return err;
4181
4182	err = -EOPNOTSUPP;
4183
4184	/* Support fdb on master device the net/bridge default case */
4185	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4186	    netif_is_bridge_port(dev)) {
4187		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4188		const struct net_device_ops *ops = br_dev->netdev_ops;
4189
4190		err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4191				       nlh->nlmsg_flags, extack);
4192		if (err)
4193			goto out;
4194		else
4195			ndm->ndm_flags &= ~NTF_MASTER;
4196	}
4197
4198	/* Embedded bridge, macvlan, and any other device support */
4199	if ((ndm->ndm_flags & NTF_SELF)) {
4200		if (dev->netdev_ops->ndo_fdb_add)
4201			err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4202							   vid,
4203							   nlh->nlmsg_flags,
4204							   extack);
4205		else
4206			err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4207					       nlh->nlmsg_flags);
4208
4209		if (!err) {
4210			rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4211					ndm->ndm_state);
4212			ndm->ndm_flags &= ~NTF_SELF;
4213		}
4214	}
4215out:
4216	return err;
4217}
4218
4219/*
4220 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4221 */
4222int ndo_dflt_fdb_del(struct ndmsg *ndm,
4223		     struct nlattr *tb[],
4224		     struct net_device *dev,
4225		     const unsigned char *addr, u16 vid)
4226{
4227	int err = -EINVAL;
4228
4229	/* If aging addresses are supported device will need to
4230	 * implement its own handler for this.
4231	 */
4232	if (!(ndm->ndm_state & NUD_PERMANENT)) {
4233		netdev_info(dev, "default FDB implementation only supports local addresses\n");
4234		return err;
4235	}
4236
4237	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4238		err = dev_uc_del(dev, addr);
4239	else if (is_multicast_ether_addr(addr))
4240		err = dev_mc_del(dev, addr);
4241
4242	return err;
4243}
4244EXPORT_SYMBOL(ndo_dflt_fdb_del);
4245
4246static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4247	[NDA_VLAN]	= { .type = NLA_U16 },
4248	[NDA_IFINDEX]	= NLA_POLICY_MIN(NLA_S32, 1),
4249	[NDA_NDM_STATE_MASK]	= { .type = NLA_U16  },
4250	[NDA_NDM_FLAGS_MASK]	= { .type = NLA_U8 },
4251};
4252
4253static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4254			struct netlink_ext_ack *extack)
4255{
4256	bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4257	struct net *net = sock_net(skb->sk);
4258	const struct net_device_ops *ops;
4259	struct ndmsg *ndm;
4260	struct nlattr *tb[NDA_MAX+1];
4261	struct net_device *dev;
4262	__u8 *addr = NULL;
4263	int err;
4264	u16 vid;
4265
4266	if (!netlink_capable(skb, CAP_NET_ADMIN))
4267		return -EPERM;
4268
4269	if (!del_bulk) {
4270		err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4271					     NULL, extack);
4272	} else {
4273		err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4274				  fdb_del_bulk_policy, extack);
4275	}
4276	if (err < 0)
4277		return err;
4278
4279	ndm = nlmsg_data(nlh);
4280	if (ndm->ndm_ifindex == 0) {
4281		NL_SET_ERR_MSG(extack, "invalid ifindex");
4282		return -EINVAL;
4283	}
4284
4285	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4286	if (dev == NULL) {
4287		NL_SET_ERR_MSG(extack, "unknown ifindex");
4288		return -ENODEV;
4289	}
4290
4291	if (!del_bulk) {
4292		if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4293			NL_SET_ERR_MSG(extack, "invalid address");
4294			return -EINVAL;
4295		}
4296		addr = nla_data(tb[NDA_LLADDR]);
4297	}
4298
4299	if (dev->type != ARPHRD_ETHER) {
4300		NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4301		return -EINVAL;
4302	}
4303
4304	err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4305	if (err)
4306		return err;
4307
4308	err = -EOPNOTSUPP;
4309
4310	/* Support fdb on master device the net/bridge default case */
4311	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4312	    netif_is_bridge_port(dev)) {
4313		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4314
4315		ops = br_dev->netdev_ops;
4316		if (!del_bulk) {
4317			if (ops->ndo_fdb_del)
4318				err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4319		} else {
4320			if (ops->ndo_fdb_del_bulk)
4321				err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4322							    extack);
4323		}
4324
4325		if (err)
4326			goto out;
4327		else
4328			ndm->ndm_flags &= ~NTF_MASTER;
4329	}
4330
4331	/* Embedded bridge, macvlan, and any other device support */
4332	if (ndm->ndm_flags & NTF_SELF) {
4333		ops = dev->netdev_ops;
4334		if (!del_bulk) {
4335			if (ops->ndo_fdb_del)
4336				err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4337			else
4338				err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4339		} else {
4340			/* in case err was cleared by NTF_MASTER call */
4341			err = -EOPNOTSUPP;
4342			if (ops->ndo_fdb_del_bulk)
4343				err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4344							    extack);
4345		}
4346
4347		if (!err) {
4348			if (!del_bulk)
4349				rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4350						ndm->ndm_state);
4351			ndm->ndm_flags &= ~NTF_SELF;
4352		}
4353	}
4354out:
4355	return err;
4356}
4357
4358static int nlmsg_populate_fdb(struct sk_buff *skb,
4359			      struct netlink_callback *cb,
4360			      struct net_device *dev,
4361			      int *idx,
4362			      struct netdev_hw_addr_list *list)
4363{
4364	struct netdev_hw_addr *ha;
4365	int err;
4366	u32 portid, seq;
4367
4368	portid = NETLINK_CB(cb->skb).portid;
4369	seq = cb->nlh->nlmsg_seq;
4370
4371	list_for_each_entry(ha, &list->list, list) {
4372		if (*idx < cb->args[2])
4373			goto skip;
4374
4375		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4376					      portid, seq,
4377					      RTM_NEWNEIGH, NTF_SELF,
4378					      NLM_F_MULTI, NUD_PERMANENT);
4379		if (err < 0)
4380			return err;
4381skip:
4382		*idx += 1;
4383	}
4384	return 0;
4385}
4386
4387/**
4388 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4389 * @skb: socket buffer to store message in
4390 * @cb: netlink callback
4391 * @dev: netdevice
4392 * @filter_dev: ignored
4393 * @idx: the number of FDB table entries dumped is added to *@idx
4394 *
4395 * Default netdevice operation to dump the existing unicast address list.
4396 * Returns number of addresses from list put in skb.
4397 */
4398int ndo_dflt_fdb_dump(struct sk_buff *skb,
4399		      struct netlink_callback *cb,
4400		      struct net_device *dev,
4401		      struct net_device *filter_dev,
4402		      int *idx)
4403{
4404	int err;
4405
4406	if (dev->type != ARPHRD_ETHER)
4407		return -EINVAL;
4408
4409	netif_addr_lock_bh(dev);
4410	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4411	if (err)
4412		goto out;
4413	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4414out:
4415	netif_addr_unlock_bh(dev);
4416	return err;
4417}
4418EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4419
4420static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4421				 int *br_idx, int *brport_idx,
4422				 struct netlink_ext_ack *extack)
4423{
4424	struct nlattr *tb[NDA_MAX + 1];
4425	struct ndmsg *ndm;
4426	int err, i;
4427
4428	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4429		NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4430		return -EINVAL;
4431	}
4432
4433	ndm = nlmsg_data(nlh);
4434	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
4435	    ndm->ndm_flags || ndm->ndm_type) {
4436		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4437		return -EINVAL;
4438	}
4439
4440	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4441					    NDA_MAX, NULL, extack);
4442	if (err < 0)
4443		return err;
4444
4445	*brport_idx = ndm->ndm_ifindex;
4446	for (i = 0; i <= NDA_MAX; ++i) {
4447		if (!tb[i])
4448			continue;
4449
4450		switch (i) {
4451		case NDA_IFINDEX:
4452			if (nla_len(tb[i]) != sizeof(u32)) {
4453				NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4454				return -EINVAL;
4455			}
4456			*brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4457			break;
4458		case NDA_MASTER:
4459			if (nla_len(tb[i]) != sizeof(u32)) {
4460				NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4461				return -EINVAL;
4462			}
4463			*br_idx = nla_get_u32(tb[NDA_MASTER]);
4464			break;
4465		default:
4466			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4467			return -EINVAL;
4468		}
4469	}
4470
4471	return 0;
4472}
4473
4474static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4475				 int *br_idx, int *brport_idx,
4476				 struct netlink_ext_ack *extack)
4477{
4478	struct nlattr *tb[IFLA_MAX+1];
4479	int err;
4480
4481	/* A hack to preserve kernel<->userspace interface.
4482	 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4483	 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4484	 * So, check for ndmsg with an optional u32 attribute (not used here).
4485	 * Fortunately these sizes don't conflict with the size of ifinfomsg
4486	 * with an optional attribute.
4487	 */
4488	if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4489	    (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4490	     nla_attr_size(sizeof(u32)))) {
4491		struct ifinfomsg *ifm;
4492
4493		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4494					     tb, IFLA_MAX, ifla_policy,
4495					     extack);
4496		if (err < 0) {
4497			return -EINVAL;
4498		} else if (err == 0) {
4499			if (tb[IFLA_MASTER])
4500				*br_idx = nla_get_u32(tb[IFLA_MASTER]);
4501		}
4502
4503		ifm = nlmsg_data(nlh);
4504		*brport_idx = ifm->ifi_index;
4505	}
4506	return 0;
4507}
4508
4509static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4510{
4511	struct net_device *dev;
4512	struct net_device *br_dev = NULL;
4513	const struct net_device_ops *ops = NULL;
4514	const struct net_device_ops *cops = NULL;
4515	struct net *net = sock_net(skb->sk);
4516	struct hlist_head *head;
4517	int brport_idx = 0;
4518	int br_idx = 0;
4519	int h, s_h;
4520	int idx = 0, s_idx;
4521	int err = 0;
4522	int fidx = 0;
4523
4524	if (cb->strict_check)
4525		err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4526					    cb->extack);
4527	else
4528		err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4529					    cb->extack);
4530	if (err < 0)
4531		return err;
4532
4533	if (br_idx) {
4534		br_dev = __dev_get_by_index(net, br_idx);
4535		if (!br_dev)
4536			return -ENODEV;
4537
4538		ops = br_dev->netdev_ops;
4539	}
4540
4541	s_h = cb->args[0];
4542	s_idx = cb->args[1];
4543
4544	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4545		idx = 0;
4546		head = &net->dev_index_head[h];
4547		hlist_for_each_entry(dev, head, index_hlist) {
4548
4549			if (brport_idx && (dev->ifindex != brport_idx))
4550				continue;
4551
4552			if (!br_idx) { /* user did not specify a specific bridge */
4553				if (netif_is_bridge_port(dev)) {
4554					br_dev = netdev_master_upper_dev_get(dev);
4555					cops = br_dev->netdev_ops;
4556				}
4557			} else {
4558				if (dev != br_dev &&
4559				    !netif_is_bridge_port(dev))
4560					continue;
4561
4562				if (br_dev != netdev_master_upper_dev_get(dev) &&
4563				    !netif_is_bridge_master(dev))
4564					continue;
4565				cops = ops;
4566			}
4567
4568			if (idx < s_idx)
4569				goto cont;
4570
4571			if (netif_is_bridge_port(dev)) {
4572				if (cops && cops->ndo_fdb_dump) {
4573					err = cops->ndo_fdb_dump(skb, cb,
4574								br_dev, dev,
4575								&fidx);
4576					if (err == -EMSGSIZE)
4577						goto out;
4578				}
4579			}
4580
4581			if (dev->netdev_ops->ndo_fdb_dump)
4582				err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4583								    dev, NULL,
4584								    &fidx);
4585			else
4586				err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4587							&fidx);
4588			if (err == -EMSGSIZE)
4589				goto out;
4590
4591			cops = NULL;
4592
4593			/* reset fdb offset to 0 for rest of the interfaces */
4594			cb->args[2] = 0;
4595			fidx = 0;
4596cont:
4597			idx++;
4598		}
4599	}
4600
4601out:
4602	cb->args[0] = h;
4603	cb->args[1] = idx;
4604	cb->args[2] = fidx;
4605
4606	return skb->len;
4607}
4608
4609static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4610				struct nlattr **tb, u8 *ndm_flags,
4611				int *br_idx, int *brport_idx, u8 **addr,
4612				u16 *vid, struct netlink_ext_ack *extack)
4613{
4614	struct ndmsg *ndm;
4615	int err, i;
4616
4617	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4618		NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4619		return -EINVAL;
4620	}
4621
4622	ndm = nlmsg_data(nlh);
4623	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
4624	    ndm->ndm_type) {
4625		NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4626		return -EINVAL;
4627	}
4628
4629	if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4630		NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4631		return -EINVAL;
4632	}
4633
4634	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4635					    NDA_MAX, nda_policy, extack);
4636	if (err < 0)
4637		return err;
4638
4639	*ndm_flags = ndm->ndm_flags;
4640	*brport_idx = ndm->ndm_ifindex;
4641	for (i = 0; i <= NDA_MAX; ++i) {
4642		if (!tb[i])
4643			continue;
4644
4645		switch (i) {
4646		case NDA_MASTER:
4647			*br_idx = nla_get_u32(tb[i]);
4648			break;
4649		case NDA_LLADDR:
4650			if (nla_len(tb[i]) != ETH_ALEN) {
4651				NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4652				return -EINVAL;
4653			}
4654			*addr = nla_data(tb[i]);
4655			break;
4656		case NDA_VLAN:
4657			err = fdb_vid_parse(tb[i], vid, extack);
4658			if (err)
4659				return err;
4660			break;
4661		case NDA_VNI:
4662			break;
4663		default:
4664			NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4665			return -EINVAL;
4666		}
4667	}
4668
4669	return 0;
4670}
4671
4672static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4673			struct netlink_ext_ack *extack)
4674{
4675	struct net_device *dev = NULL, *br_dev = NULL;
4676	const struct net_device_ops *ops = NULL;
4677	struct net *net = sock_net(in_skb->sk);
4678	struct nlattr *tb[NDA_MAX + 1];
4679	struct sk_buff *skb;
4680	int brport_idx = 0;
4681	u8 ndm_flags = 0;
4682	int br_idx = 0;
4683	u8 *addr = NULL;
4684	u16 vid = 0;
4685	int err;
4686
4687	err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4688				   &brport_idx, &addr, &vid, extack);
4689	if (err < 0)
4690		return err;
4691
4692	if (!addr) {
4693		NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4694		return -EINVAL;
4695	}
4696
4697	if (brport_idx) {
4698		dev = __dev_get_by_index(net, brport_idx);
4699		if (!dev) {
4700			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4701			return -ENODEV;
4702		}
4703	}
4704
4705	if (br_idx) {
4706		if (dev) {
4707			NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4708			return -EINVAL;
4709		}
4710
4711		br_dev = __dev_get_by_index(net, br_idx);
4712		if (!br_dev) {
4713			NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4714			return -EINVAL;
4715		}
4716		ops = br_dev->netdev_ops;
4717	}
4718
4719	if (dev) {
4720		if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4721			if (!netif_is_bridge_port(dev)) {
4722				NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4723				return -EINVAL;
4724			}
4725			br_dev = netdev_master_upper_dev_get(dev);
4726			if (!br_dev) {
4727				NL_SET_ERR_MSG(extack, "Master of device not found");
4728				return -EINVAL;
4729			}
4730			ops = br_dev->netdev_ops;
4731		} else {
4732			if (!(ndm_flags & NTF_SELF)) {
4733				NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4734				return -EINVAL;
4735			}
4736			ops = dev->netdev_ops;
4737		}
4738	}
4739
4740	if (!br_dev && !dev) {
4741		NL_SET_ERR_MSG(extack, "No device specified");
4742		return -ENODEV;
4743	}
4744
4745	if (!ops || !ops->ndo_fdb_get) {
4746		NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4747		return -EOPNOTSUPP;
4748	}
4749
4750	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4751	if (!skb)
4752		return -ENOBUFS;
4753
4754	if (br_dev)
4755		dev = br_dev;
4756	err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4757			       NETLINK_CB(in_skb).portid,
4758			       nlh->nlmsg_seq, extack);
4759	if (err)
4760		goto out;
4761
4762	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4763out:
4764	kfree_skb(skb);
4765	return err;
4766}
4767
4768static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4769			       unsigned int attrnum, unsigned int flag)
4770{
4771	if (mask & flag)
4772		return nla_put_u8(skb, attrnum, !!(flags & flag));
4773	return 0;
4774}
4775
4776int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4777			    struct net_device *dev, u16 mode,
4778			    u32 flags, u32 mask, int nlflags,
4779			    u32 filter_mask,
4780			    int (*vlan_fill)(struct sk_buff *skb,
4781					     struct net_device *dev,
4782					     u32 filter_mask))
4783{
4784	struct nlmsghdr *nlh;
4785	struct ifinfomsg *ifm;
4786	struct nlattr *br_afspec;
4787	struct nlattr *protinfo;
4788	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4789	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4790	int err = 0;
4791
4792	nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4793	if (nlh == NULL)
4794		return -EMSGSIZE;
4795
4796	ifm = nlmsg_data(nlh);
4797	ifm->ifi_family = AF_BRIDGE;
4798	ifm->__ifi_pad = 0;
4799	ifm->ifi_type = dev->type;
4800	ifm->ifi_index = dev->ifindex;
4801	ifm->ifi_flags = dev_get_flags(dev);
4802	ifm->ifi_change = 0;
4803
4804
4805	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4806	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4807	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4808	    (br_dev &&
4809	     nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4810	    (dev->addr_len &&
4811	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4812	    (dev->ifindex != dev_get_iflink(dev) &&
4813	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4814		goto nla_put_failure;
4815
4816	br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4817	if (!br_afspec)
4818		goto nla_put_failure;
4819
4820	if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4821		nla_nest_cancel(skb, br_afspec);
4822		goto nla_put_failure;
4823	}
4824
4825	if (mode != BRIDGE_MODE_UNDEF) {
4826		if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4827			nla_nest_cancel(skb, br_afspec);
4828			goto nla_put_failure;
4829		}
4830	}
4831	if (vlan_fill) {
4832		err = vlan_fill(skb, dev, filter_mask);
4833		if (err) {
4834			nla_nest_cancel(skb, br_afspec);
4835			goto nla_put_failure;
4836		}
4837	}
4838	nla_nest_end(skb, br_afspec);
4839
4840	protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4841	if (!protinfo)
4842		goto nla_put_failure;
4843
4844	if (brport_nla_put_flag(skb, flags, mask,
4845				IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4846	    brport_nla_put_flag(skb, flags, mask,
4847				IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4848	    brport_nla_put_flag(skb, flags, mask,
4849				IFLA_BRPORT_FAST_LEAVE,
4850				BR_MULTICAST_FAST_LEAVE) ||
4851	    brport_nla_put_flag(skb, flags, mask,
4852				IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4853	    brport_nla_put_flag(skb, flags, mask,
4854				IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4855	    brport_nla_put_flag(skb, flags, mask,
4856				IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4857	    brport_nla_put_flag(skb, flags, mask,
4858				IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4859	    brport_nla_put_flag(skb, flags, mask,
4860				IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4861	    brport_nla_put_flag(skb, flags, mask,
4862				IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4863	    brport_nla_put_flag(skb, flags, mask,
4864				IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4865		nla_nest_cancel(skb, protinfo);
4866		goto nla_put_failure;
4867	}
4868
4869	nla_nest_end(skb, protinfo);
4870
4871	nlmsg_end(skb, nlh);
4872	return 0;
4873nla_put_failure:
4874	nlmsg_cancel(skb, nlh);
4875	return err ? err : -EMSGSIZE;
4876}
4877EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4878
4879static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4880				    bool strict_check, u32 *filter_mask,
4881				    struct netlink_ext_ack *extack)
4882{
4883	struct nlattr *tb[IFLA_MAX+1];
4884	int err, i;
4885
4886	if (strict_check) {
4887		struct ifinfomsg *ifm;
4888
4889		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4890			NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4891			return -EINVAL;
4892		}
4893
4894		ifm = nlmsg_data(nlh);
4895		if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4896		    ifm->ifi_change || ifm->ifi_index) {
4897			NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4898			return -EINVAL;
4899		}
4900
4901		err = nlmsg_parse_deprecated_strict(nlh,
4902						    sizeof(struct ifinfomsg),
4903						    tb, IFLA_MAX, ifla_policy,
4904						    extack);
4905	} else {
4906		err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4907					     tb, IFLA_MAX, ifla_policy,
4908					     extack);
4909	}
4910	if (err < 0)
4911		return err;
4912
4913	/* new attributes should only be added with strict checking */
4914	for (i = 0; i <= IFLA_MAX; ++i) {
4915		if (!tb[i])
4916			continue;
4917
4918		switch (i) {
4919		case IFLA_EXT_MASK:
4920			*filter_mask = nla_get_u32(tb[i]);
4921			break;
4922		default:
4923			if (strict_check) {
4924				NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4925				return -EINVAL;
4926			}
4927		}
4928	}
4929
4930	return 0;
4931}
4932
4933static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4934{
4935	const struct nlmsghdr *nlh = cb->nlh;
4936	struct net *net = sock_net(skb->sk);
4937	struct net_device *dev;
4938	int idx = 0;
4939	u32 portid = NETLINK_CB(cb->skb).portid;
4940	u32 seq = nlh->nlmsg_seq;
4941	u32 filter_mask = 0;
4942	int err;
4943
4944	err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4945				       cb->extack);
4946	if (err < 0 && cb->strict_check)
4947		return err;
4948
4949	rcu_read_lock();
4950	for_each_netdev_rcu(net, dev) {
4951		const struct net_device_ops *ops = dev->netdev_ops;
4952		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
 
4953
4954		if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4955			if (idx >= cb->args[0]) {
4956				err = br_dev->netdev_ops->ndo_bridge_getlink(
4957						skb, portid, seq, dev,
4958						filter_mask, NLM_F_MULTI);
4959				if (err < 0 && err != -EOPNOTSUPP) {
4960					if (likely(skb->len))
4961						break;
4962
4963					goto out_err;
4964				}
4965			}
4966			idx++;
4967		}
4968
4969		if (ops->ndo_bridge_getlink) {
4970			if (idx >= cb->args[0]) {
4971				err = ops->ndo_bridge_getlink(skb, portid,
4972							      seq, dev,
4973							      filter_mask,
4974							      NLM_F_MULTI);
4975				if (err < 0 && err != -EOPNOTSUPP) {
4976					if (likely(skb->len))
4977						break;
4978
4979					goto out_err;
4980				}
4981			}
4982			idx++;
4983		}
4984	}
4985	err = skb->len;
4986out_err:
4987	rcu_read_unlock();
4988	cb->args[0] = idx;
4989
4990	return err;
4991}
4992
4993static inline size_t bridge_nlmsg_size(void)
4994{
4995	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4996		+ nla_total_size(IFNAMSIZ)	/* IFLA_IFNAME */
4997		+ nla_total_size(MAX_ADDR_LEN)	/* IFLA_ADDRESS */
4998		+ nla_total_size(sizeof(u32))	/* IFLA_MASTER */
4999		+ nla_total_size(sizeof(u32))	/* IFLA_MTU */
5000		+ nla_total_size(sizeof(u32))	/* IFLA_LINK */
5001		+ nla_total_size(sizeof(u32))	/* IFLA_OPERSTATE */
5002		+ nla_total_size(sizeof(u8))	/* IFLA_PROTINFO */
5003		+ nla_total_size(sizeof(struct nlattr))	/* IFLA_AF_SPEC */
5004		+ nla_total_size(sizeof(u16))	/* IFLA_BRIDGE_FLAGS */
5005		+ nla_total_size(sizeof(u16));	/* IFLA_BRIDGE_MODE */
5006}
5007
5008static int rtnl_bridge_notify(struct net_device *dev)
5009{
5010	struct net *net = dev_net(dev);
5011	struct sk_buff *skb;
5012	int err = -EOPNOTSUPP;
5013
5014	if (!dev->netdev_ops->ndo_bridge_getlink)
5015		return 0;
5016
5017	skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
5018	if (!skb) {
5019		err = -ENOMEM;
5020		goto errout;
5021	}
5022
5023	err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
5024	if (err < 0)
5025		goto errout;
5026
5027	/* Notification info is only filled for bridge ports, not the bridge
5028	 * device itself. Therefore, a zero notification length is valid and
5029	 * should not result in an error.
5030	 */
5031	if (!skb->len)
5032		goto errout;
5033
5034	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
5035	return 0;
5036errout:
5037	WARN_ON(err == -EMSGSIZE);
5038	kfree_skb(skb);
5039	if (err)
5040		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
5041	return err;
5042}
5043
5044static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
5045			       struct netlink_ext_ack *extack)
5046{
5047	struct net *net = sock_net(skb->sk);
5048	struct ifinfomsg *ifm;
5049	struct net_device *dev;
5050	struct nlattr *br_spec, *attr = NULL;
5051	int rem, err = -EOPNOTSUPP;
5052	u16 flags = 0;
5053	bool have_flags = false;
5054
5055	if (nlmsg_len(nlh) < sizeof(*ifm))
5056		return -EINVAL;
5057
5058	ifm = nlmsg_data(nlh);
5059	if (ifm->ifi_family != AF_BRIDGE)
5060		return -EPFNOSUPPORT;
5061
5062	dev = __dev_get_by_index(net, ifm->ifi_index);
5063	if (!dev) {
5064		NL_SET_ERR_MSG(extack, "unknown ifindex");
5065		return -ENODEV;
5066	}
5067
5068	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5069	if (br_spec) {
5070		nla_for_each_nested(attr, br_spec, rem) {
5071			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5072				if (nla_len(attr) < sizeof(flags))
5073					return -EINVAL;
5074
5075				have_flags = true;
5076				flags = nla_get_u16(attr);
5077				break;
5078			}
5079		}
5080	}
5081
5082	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5083		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5084
5085		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5086			err = -EOPNOTSUPP;
5087			goto out;
5088		}
5089
5090		err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5091							     extack);
5092		if (err)
5093			goto out;
5094
5095		flags &= ~BRIDGE_FLAGS_MASTER;
5096	}
5097
5098	if ((flags & BRIDGE_FLAGS_SELF)) {
5099		if (!dev->netdev_ops->ndo_bridge_setlink)
5100			err = -EOPNOTSUPP;
5101		else
5102			err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5103								  flags,
5104								  extack);
5105		if (!err) {
5106			flags &= ~BRIDGE_FLAGS_SELF;
5107
5108			/* Generate event to notify upper layer of bridge
5109			 * change
5110			 */
5111			err = rtnl_bridge_notify(dev);
5112		}
5113	}
5114
5115	if (have_flags)
5116		memcpy(nla_data(attr), &flags, sizeof(flags));
5117out:
5118	return err;
5119}
5120
5121static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5122			       struct netlink_ext_ack *extack)
5123{
5124	struct net *net = sock_net(skb->sk);
5125	struct ifinfomsg *ifm;
5126	struct net_device *dev;
5127	struct nlattr *br_spec, *attr = NULL;
5128	int rem, err = -EOPNOTSUPP;
5129	u16 flags = 0;
5130	bool have_flags = false;
5131
5132	if (nlmsg_len(nlh) < sizeof(*ifm))
5133		return -EINVAL;
5134
5135	ifm = nlmsg_data(nlh);
5136	if (ifm->ifi_family != AF_BRIDGE)
5137		return -EPFNOSUPPORT;
5138
5139	dev = __dev_get_by_index(net, ifm->ifi_index);
5140	if (!dev) {
5141		NL_SET_ERR_MSG(extack, "unknown ifindex");
5142		return -ENODEV;
5143	}
5144
5145	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5146	if (br_spec) {
5147		nla_for_each_nested(attr, br_spec, rem) {
5148			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5149				if (nla_len(attr) < sizeof(flags))
5150					return -EINVAL;
5151
5152				have_flags = true;
5153				flags = nla_get_u16(attr);
5154				break;
5155			}
5156		}
5157	}
5158
5159	if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5160		struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5161
5162		if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5163			err = -EOPNOTSUPP;
5164			goto out;
5165		}
5166
5167		err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5168		if (err)
5169			goto out;
5170
5171		flags &= ~BRIDGE_FLAGS_MASTER;
5172	}
5173
5174	if ((flags & BRIDGE_FLAGS_SELF)) {
5175		if (!dev->netdev_ops->ndo_bridge_dellink)
5176			err = -EOPNOTSUPP;
5177		else
5178			err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5179								  flags);
5180
5181		if (!err) {
5182			flags &= ~BRIDGE_FLAGS_SELF;
5183
5184			/* Generate event to notify upper layer of bridge
5185			 * change
5186			 */
5187			err = rtnl_bridge_notify(dev);
5188		}
5189	}
5190
5191	if (have_flags)
5192		memcpy(nla_data(attr), &flags, sizeof(flags));
5193out:
5194	return err;
5195}
5196
5197static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5198{
5199	return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5200	       (!idxattr || idxattr == attrid);
5201}
5202
5203static bool
5204rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5205{
5206	return dev->netdev_ops &&
5207	       dev->netdev_ops->ndo_has_offload_stats &&
5208	       dev->netdev_ops->ndo_get_offload_stats &&
5209	       dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5210}
5211
5212static unsigned int
5213rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5214{
5215	return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5216	       sizeof(struct rtnl_link_stats64) : 0;
5217}
5218
5219static int
5220rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5221			     struct sk_buff *skb)
5222{
5223	unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5224	struct nlattr *attr = NULL;
5225	void *attr_data;
5226	int err;
5227
5228	if (!size)
5229		return -ENODATA;
5230
5231	attr = nla_reserve_64bit(skb, attr_id, size,
5232				 IFLA_OFFLOAD_XSTATS_UNSPEC);
5233	if (!attr)
5234		return -EMSGSIZE;
5235
5236	attr_data = nla_data(attr);
5237	memset(attr_data, 0, size);
5238
5239	err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5240	if (err)
5241		return err;
5242
5243	return 0;
5244}
5245
5246static unsigned int
5247rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5248				   enum netdev_offload_xstats_type type)
5249{
5250	bool enabled = netdev_offload_xstats_enabled(dev, type);
5251
5252	return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5253}
5254
5255struct rtnl_offload_xstats_request_used {
5256	bool request;
5257	bool used;
5258};
5259
5260static int
5261rtnl_offload_xstats_get_stats(struct net_device *dev,
5262			      enum netdev_offload_xstats_type type,
5263			      struct rtnl_offload_xstats_request_used *ru,
5264			      struct rtnl_hw_stats64 *stats,
5265			      struct netlink_ext_ack *extack)
5266{
5267	bool request;
5268	bool used;
5269	int err;
5270
5271	request = netdev_offload_xstats_enabled(dev, type);
5272	if (!request) {
5273		used = false;
5274		goto out;
5275	}
5276
5277	err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5278	if (err)
5279		return err;
5280
5281out:
5282	if (ru) {
5283		ru->request = request;
5284		ru->used = used;
5285	}
5286	return 0;
5287}
5288
5289static int
5290rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5291				       struct rtnl_offload_xstats_request_used *ru)
5292{
5293	struct nlattr *nest;
5294
5295	nest = nla_nest_start(skb, attr_id);
5296	if (!nest)
5297		return -EMSGSIZE;
5298
5299	if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5300		goto nla_put_failure;
5301
5302	if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5303		goto nla_put_failure;
5304
5305	nla_nest_end(skb, nest);
5306	return 0;
5307
5308nla_put_failure:
5309	nla_nest_cancel(skb, nest);
5310	return -EMSGSIZE;
5311}
5312
5313static int
5314rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5315				   struct netlink_ext_ack *extack)
5316{
5317	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5318	struct rtnl_offload_xstats_request_used ru_l3;
5319	struct nlattr *nest;
5320	int err;
5321
5322	err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5323	if (err)
5324		return err;
5325
5326	nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5327	if (!nest)
5328		return -EMSGSIZE;
5329
5330	if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5331						   IFLA_OFFLOAD_XSTATS_L3_STATS,
5332						   &ru_l3))
5333		goto nla_put_failure;
5334
5335	nla_nest_end(skb, nest);
5336	return 0;
5337
5338nla_put_failure:
5339	nla_nest_cancel(skb, nest);
5340	return -EMSGSIZE;
5341}
5342
5343static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5344				    int *prividx, u32 off_filter_mask,
5345				    struct netlink_ext_ack *extack)
5346{
5347	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5348	int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5349	int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5350	int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5351	bool have_data = false;
5352	int err;
5353
5354	if (*prividx <= attr_id_cpu_hit &&
5355	    (off_filter_mask &
5356	     IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5357		err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5358		if (!err) {
5359			have_data = true;
5360		} else if (err != -ENODATA) {
5361			*prividx = attr_id_cpu_hit;
5362			return err;
5363		}
5364	}
5365
5366	if (*prividx <= attr_id_hw_s_info &&
5367	    (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5368		*prividx = attr_id_hw_s_info;
5369
5370		err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5371		if (err)
5372			return err;
5373
5374		have_data = true;
5375		*prividx = 0;
5376	}
5377
5378	if (*prividx <= attr_id_l3_stats &&
5379	    (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5380		unsigned int size_l3;
5381		struct nlattr *attr;
5382
5383		*prividx = attr_id_l3_stats;
5384
5385		size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5386		if (!size_l3)
5387			goto skip_l3_stats;
5388		attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5389					 IFLA_OFFLOAD_XSTATS_UNSPEC);
5390		if (!attr)
5391			return -EMSGSIZE;
5392
5393		err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5394						    nla_data(attr), extack);
5395		if (err)
5396			return err;
5397
5398		have_data = true;
5399skip_l3_stats:
5400		*prividx = 0;
5401	}
5402
5403	if (!have_data)
5404		return -ENODATA;
5405
5406	*prividx = 0;
5407	return 0;
5408}
5409
5410static unsigned int
5411rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5412					   enum netdev_offload_xstats_type type)
5413{
5414	bool enabled = netdev_offload_xstats_enabled(dev, type);
5415
5416	return nla_total_size(0) +
5417		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5418		nla_total_size(sizeof(u8)) +
5419		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5420		(enabled ? nla_total_size(sizeof(u8)) : 0) +
5421		0;
5422}
5423
5424static unsigned int
5425rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5426{
5427	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5428
5429	return nla_total_size(0) +
5430		/* IFLA_OFFLOAD_XSTATS_L3_STATS */
5431		rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5432		0;
5433}
5434
5435static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5436					u32 off_filter_mask)
5437{
5438	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5439	int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5440	int nla_size = 0;
5441	int size;
5442
5443	if (off_filter_mask &
5444	    IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5445		size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5446		nla_size += nla_total_size_64bit(size);
5447	}
5448
5449	if (off_filter_mask &
5450	    IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5451		nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5452
5453	if (off_filter_mask &
5454	    IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5455		size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5456		nla_size += nla_total_size_64bit(size);
5457	}
5458
5459	if (nla_size != 0)
5460		nla_size += nla_total_size(0);
5461
5462	return nla_size;
5463}
5464
5465struct rtnl_stats_dump_filters {
5466	/* mask[0] filters outer attributes. Then individual nests have their
5467	 * filtering mask at the index of the nested attribute.
5468	 */
5469	u32 mask[IFLA_STATS_MAX + 1];
5470};
5471
5472static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5473			       int type, u32 pid, u32 seq, u32 change,
5474			       unsigned int flags,
5475			       const struct rtnl_stats_dump_filters *filters,
5476			       int *idxattr, int *prividx,
5477			       struct netlink_ext_ack *extack)
5478{
5479	unsigned int filter_mask = filters->mask[0];
5480	struct if_stats_msg *ifsm;
5481	struct nlmsghdr *nlh;
5482	struct nlattr *attr;
5483	int s_prividx = *prividx;
5484	int err;
5485
5486	ASSERT_RTNL();
5487
5488	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5489	if (!nlh)
5490		return -EMSGSIZE;
5491
5492	ifsm = nlmsg_data(nlh);
5493	ifsm->family = PF_UNSPEC;
5494	ifsm->pad1 = 0;
5495	ifsm->pad2 = 0;
5496	ifsm->ifindex = dev->ifindex;
5497	ifsm->filter_mask = filter_mask;
5498
5499	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5500		struct rtnl_link_stats64 *sp;
5501
5502		attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5503					 sizeof(struct rtnl_link_stats64),
5504					 IFLA_STATS_UNSPEC);
5505		if (!attr) {
5506			err = -EMSGSIZE;
5507			goto nla_put_failure;
5508		}
5509
5510		sp = nla_data(attr);
5511		dev_get_stats(dev, sp);
5512	}
5513
5514	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5515		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5516
5517		if (ops && ops->fill_linkxstats) {
5518			*idxattr = IFLA_STATS_LINK_XSTATS;
5519			attr = nla_nest_start_noflag(skb,
5520						     IFLA_STATS_LINK_XSTATS);
5521			if (!attr) {
5522				err = -EMSGSIZE;
5523				goto nla_put_failure;
5524			}
5525
5526			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5527			nla_nest_end(skb, attr);
5528			if (err)
5529				goto nla_put_failure;
5530			*idxattr = 0;
5531		}
5532	}
5533
5534	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5535			     *idxattr)) {
5536		const struct rtnl_link_ops *ops = NULL;
5537		const struct net_device *master;
5538
5539		master = netdev_master_upper_dev_get(dev);
5540		if (master)
5541			ops = master->rtnl_link_ops;
5542		if (ops && ops->fill_linkxstats) {
5543			*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5544			attr = nla_nest_start_noflag(skb,
5545						     IFLA_STATS_LINK_XSTATS_SLAVE);
5546			if (!attr) {
5547				err = -EMSGSIZE;
5548				goto nla_put_failure;
5549			}
5550
5551			err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5552			nla_nest_end(skb, attr);
5553			if (err)
5554				goto nla_put_failure;
5555			*idxattr = 0;
5556		}
5557	}
5558
5559	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5560			     *idxattr)) {
5561		u32 off_filter_mask;
5562
5563		off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5564		*idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5565		attr = nla_nest_start_noflag(skb,
5566					     IFLA_STATS_LINK_OFFLOAD_XSTATS);
5567		if (!attr) {
5568			err = -EMSGSIZE;
5569			goto nla_put_failure;
5570		}
5571
5572		err = rtnl_offload_xstats_fill(skb, dev, prividx,
5573					       off_filter_mask, extack);
5574		if (err == -ENODATA)
5575			nla_nest_cancel(skb, attr);
5576		else
5577			nla_nest_end(skb, attr);
5578
5579		if (err && err != -ENODATA)
5580			goto nla_put_failure;
5581		*idxattr = 0;
5582	}
5583
5584	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5585		struct rtnl_af_ops *af_ops;
5586
5587		*idxattr = IFLA_STATS_AF_SPEC;
5588		attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5589		if (!attr) {
5590			err = -EMSGSIZE;
5591			goto nla_put_failure;
5592		}
5593
5594		rcu_read_lock();
5595		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5596			if (af_ops->fill_stats_af) {
5597				struct nlattr *af;
5598
5599				af = nla_nest_start_noflag(skb,
5600							   af_ops->family);
5601				if (!af) {
5602					rcu_read_unlock();
5603					err = -EMSGSIZE;
5604					goto nla_put_failure;
5605				}
5606				err = af_ops->fill_stats_af(skb, dev);
5607
5608				if (err == -ENODATA) {
5609					nla_nest_cancel(skb, af);
5610				} else if (err < 0) {
5611					rcu_read_unlock();
5612					goto nla_put_failure;
5613				}
5614
5615				nla_nest_end(skb, af);
5616			}
5617		}
5618		rcu_read_unlock();
5619
5620		nla_nest_end(skb, attr);
5621
5622		*idxattr = 0;
5623	}
5624
5625	nlmsg_end(skb, nlh);
5626
5627	return 0;
5628
5629nla_put_failure:
5630	/* not a multi message or no progress mean a real error */
5631	if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5632		nlmsg_cancel(skb, nlh);
5633	else
5634		nlmsg_end(skb, nlh);
5635
5636	return err;
5637}
5638
5639static size_t if_nlmsg_stats_size(const struct net_device *dev,
5640				  const struct rtnl_stats_dump_filters *filters)
5641{
5642	size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5643	unsigned int filter_mask = filters->mask[0];
5644
5645	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5646		size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5647
5648	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5649		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5650		int attr = IFLA_STATS_LINK_XSTATS;
5651
5652		if (ops && ops->get_linkxstats_size) {
5653			size += nla_total_size(ops->get_linkxstats_size(dev,
5654									attr));
5655			/* for IFLA_STATS_LINK_XSTATS */
5656			size += nla_total_size(0);
5657		}
5658	}
5659
5660	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5661		struct net_device *_dev = (struct net_device *)dev;
5662		const struct rtnl_link_ops *ops = NULL;
5663		const struct net_device *master;
5664
5665		/* netdev_master_upper_dev_get can't take const */
5666		master = netdev_master_upper_dev_get(_dev);
5667		if (master)
5668			ops = master->rtnl_link_ops;
5669		if (ops && ops->get_linkxstats_size) {
5670			int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5671
5672			size += nla_total_size(ops->get_linkxstats_size(dev,
5673									attr));
5674			/* for IFLA_STATS_LINK_XSTATS_SLAVE */
5675			size += nla_total_size(0);
5676		}
5677	}
5678
5679	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5680		u32 off_filter_mask;
5681
5682		off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5683		size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5684	}
5685
5686	if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5687		struct rtnl_af_ops *af_ops;
5688
5689		/* for IFLA_STATS_AF_SPEC */
5690		size += nla_total_size(0);
5691
5692		rcu_read_lock();
5693		list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5694			if (af_ops->get_stats_af_size) {
5695				size += nla_total_size(
5696					af_ops->get_stats_af_size(dev));
5697
5698				/* for AF_* */
5699				size += nla_total_size(0);
5700			}
5701		}
5702		rcu_read_unlock();
5703	}
5704
5705	return size;
5706}
5707
5708#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5709
5710static const struct nla_policy
5711rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5712	[IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5713		    NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5714};
5715
5716static const struct nla_policy
5717rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5718	[IFLA_STATS_GET_FILTERS] =
5719		    NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5720};
5721
5722static const struct nla_policy
5723ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5724	[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5725};
5726
5727static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5728					struct rtnl_stats_dump_filters *filters,
5729					struct netlink_ext_ack *extack)
5730{
5731	struct nlattr *tb[IFLA_STATS_MAX + 1];
5732	int err;
5733	int at;
5734
5735	err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5736			       rtnl_stats_get_policy_filters, extack);
5737	if (err < 0)
5738		return err;
5739
5740	for (at = 1; at <= IFLA_STATS_MAX; at++) {
5741		if (tb[at]) {
5742			if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5743				NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5744				return -EINVAL;
5745			}
5746			filters->mask[at] = nla_get_u32(tb[at]);
5747		}
5748	}
5749
5750	return 0;
5751}
5752
5753static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5754				u32 filter_mask,
5755				struct rtnl_stats_dump_filters *filters,
5756				struct netlink_ext_ack *extack)
5757{
5758	struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5759	int err;
5760	int i;
5761
5762	filters->mask[0] = filter_mask;
5763	for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5764		filters->mask[i] = -1U;
5765
5766	err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5767			  IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5768	if (err < 0)
5769		return err;
5770
5771	if (tb[IFLA_STATS_GET_FILTERS]) {
5772		err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5773						   filters, extack);
5774		if (err)
5775			return err;
5776	}
5777
5778	return 0;
5779}
5780
5781static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5782				bool is_dump, struct netlink_ext_ack *extack)
5783{
5784	struct if_stats_msg *ifsm;
5785
5786	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5787		NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5788		return -EINVAL;
5789	}
5790
5791	if (!strict_check)
5792		return 0;
5793
5794	ifsm = nlmsg_data(nlh);
5795
5796	/* only requests using strict checks can pass data to influence
5797	 * the dump. The legacy exception is filter_mask.
5798	 */
5799	if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5800		NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5801		return -EINVAL;
5802	}
5803	if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5804		NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5805		return -EINVAL;
5806	}
5807
5808	return 0;
5809}
5810
5811static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5812			  struct netlink_ext_ack *extack)
5813{
5814	struct rtnl_stats_dump_filters filters;
5815	struct net *net = sock_net(skb->sk);
5816	struct net_device *dev = NULL;
5817	int idxattr = 0, prividx = 0;
5818	struct if_stats_msg *ifsm;
5819	struct sk_buff *nskb;
5820	int err;
5821
5822	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5823				   false, extack);
5824	if (err)
5825		return err;
5826
5827	ifsm = nlmsg_data(nlh);
5828	if (ifsm->ifindex > 0)
5829		dev = __dev_get_by_index(net, ifsm->ifindex);
5830	else
5831		return -EINVAL;
5832
5833	if (!dev)
5834		return -ENODEV;
5835
5836	if (!ifsm->filter_mask) {
5837		NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5838		return -EINVAL;
5839	}
5840
5841	err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5842	if (err)
5843		return err;
5844
5845	nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5846	if (!nskb)
5847		return -ENOBUFS;
5848
5849	err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5850				  NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5851				  0, &filters, &idxattr, &prividx, extack);
5852	if (err < 0) {
5853		/* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5854		WARN_ON(err == -EMSGSIZE);
5855		kfree_skb(nskb);
5856	} else {
5857		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5858	}
5859
5860	return err;
5861}
5862
5863static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5864{
5865	struct netlink_ext_ack *extack = cb->extack;
5866	int h, s_h, err, s_idx, s_idxattr, s_prividx;
5867	struct rtnl_stats_dump_filters filters;
5868	struct net *net = sock_net(skb->sk);
5869	unsigned int flags = NLM_F_MULTI;
5870	struct if_stats_msg *ifsm;
5871	struct hlist_head *head;
5872	struct net_device *dev;
5873	int idx = 0;
5874
5875	s_h = cb->args[0];
5876	s_idx = cb->args[1];
5877	s_idxattr = cb->args[2];
5878	s_prividx = cb->args[3];
5879
5880	cb->seq = net->dev_base_seq;
5881
5882	err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5883	if (err)
5884		return err;
5885
5886	ifsm = nlmsg_data(cb->nlh);
5887	if (!ifsm->filter_mask) {
5888		NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5889		return -EINVAL;
5890	}
5891
5892	err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5893				   extack);
5894	if (err)
5895		return err;
5896
5897	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5898		idx = 0;
5899		head = &net->dev_index_head[h];
5900		hlist_for_each_entry(dev, head, index_hlist) {
5901			if (idx < s_idx)
5902				goto cont;
5903			err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5904						  NETLINK_CB(cb->skb).portid,
5905						  cb->nlh->nlmsg_seq, 0,
5906						  flags, &filters,
5907						  &s_idxattr, &s_prividx,
5908						  extack);
5909			/* If we ran out of room on the first message,
5910			 * we're in trouble
5911			 */
5912			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5913
5914			if (err < 0)
5915				goto out;
5916			s_prividx = 0;
5917			s_idxattr = 0;
5918			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5919cont:
5920			idx++;
5921		}
5922	}
5923out:
5924	cb->args[3] = s_prividx;
5925	cb->args[2] = s_idxattr;
5926	cb->args[1] = idx;
5927	cb->args[0] = h;
5928
 
5929	return skb->len;
5930}
5931
5932void rtnl_offload_xstats_notify(struct net_device *dev)
5933{
5934	struct rtnl_stats_dump_filters response_filters = {};
5935	struct net *net = dev_net(dev);
5936	int idxattr = 0, prividx = 0;
5937	struct sk_buff *skb;
5938	int err = -ENOBUFS;
5939
5940	ASSERT_RTNL();
5941
5942	response_filters.mask[0] |=
5943		IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5944	response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5945		IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5946
5947	skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5948			GFP_KERNEL);
5949	if (!skb)
5950		goto errout;
5951
5952	err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5953				  &response_filters, &idxattr, &prividx, NULL);
5954	if (err < 0) {
5955		kfree_skb(skb);
5956		goto errout;
5957	}
5958
5959	rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
5960	return;
5961
5962errout:
5963	rtnl_set_sk_err(net, RTNLGRP_STATS, err);
5964}
5965EXPORT_SYMBOL(rtnl_offload_xstats_notify);
5966
5967static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
5968			  struct netlink_ext_ack *extack)
5969{
5970	enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5971	struct rtnl_stats_dump_filters response_filters = {};
5972	struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5973	struct net *net = sock_net(skb->sk);
5974	struct net_device *dev = NULL;
5975	struct if_stats_msg *ifsm;
5976	bool notify = false;
5977	int err;
5978
5979	err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5980				   false, extack);
5981	if (err)
5982		return err;
5983
5984	ifsm = nlmsg_data(nlh);
5985	if (ifsm->family != AF_UNSPEC) {
5986		NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
5987		return -EINVAL;
5988	}
5989
5990	if (ifsm->ifindex > 0)
5991		dev = __dev_get_by_index(net, ifsm->ifindex);
5992	else
5993		return -EINVAL;
5994
5995	if (!dev)
5996		return -ENODEV;
5997
5998	if (ifsm->filter_mask) {
5999		NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
6000		return -EINVAL;
6001	}
6002
6003	err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
6004			  ifla_stats_set_policy, extack);
6005	if (err < 0)
6006		return err;
6007
6008	if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
6009		u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
6010
6011		if (req)
6012			err = netdev_offload_xstats_enable(dev, t_l3, extack);
6013		else
6014			err = netdev_offload_xstats_disable(dev, t_l3);
6015
6016		if (!err)
6017			notify = true;
6018		else if (err != -EALREADY)
6019			return err;
6020
6021		response_filters.mask[0] |=
6022			IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
6023		response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
6024			IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
6025	}
6026
6027	if (notify)
6028		rtnl_offload_xstats_notify(dev);
6029
6030	return 0;
6031}
6032
6033/* Process one rtnetlink message. */
6034
6035static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
6036			     struct netlink_ext_ack *extack)
6037{
6038	struct net *net = sock_net(skb->sk);
6039	struct rtnl_link *link;
6040	enum rtnl_kinds kind;
6041	struct module *owner;
6042	int err = -EOPNOTSUPP;
6043	rtnl_doit_func doit;
6044	unsigned int flags;
 
6045	int family;
6046	int type;
 
6047
6048	type = nlh->nlmsg_type;
6049	if (type > RTM_MAX)
6050		return -EOPNOTSUPP;
6051
6052	type -= RTM_BASE;
6053
6054	/* All the messages must have at least 1 byte length */
6055	if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6056		return 0;
6057
6058	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6059	kind = rtnl_msgtype_kind(type);
 
6060
6061	if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6062		return -EPERM;
6063
6064	rcu_read_lock();
6065	if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6066		struct sock *rtnl;
6067		rtnl_dumpit_func dumpit;
6068		u32 min_dump_alloc = 0;
6069
6070		link = rtnl_get_link(family, type);
6071		if (!link || !link->dumpit) {
6072			family = PF_UNSPEC;
6073			link = rtnl_get_link(family, type);
6074			if (!link || !link->dumpit)
6075				goto err_unlock;
6076		}
6077		owner = link->owner;
6078		dumpit = link->dumpit;
6079
6080		if (type == RTM_GETLINK - RTM_BASE)
6081			min_dump_alloc = rtnl_calcit(skb, nlh);
6082
6083		err = 0;
6084		/* need to do this before rcu_read_unlock() */
6085		if (!try_module_get(owner))
6086			err = -EPROTONOSUPPORT;
6087
6088		rcu_read_unlock();
 
 
 
 
 
6089
 
6090		rtnl = net->rtnl;
6091		if (err == 0) {
6092			struct netlink_dump_control c = {
6093				.dump		= dumpit,
6094				.min_dump_alloc	= min_dump_alloc,
6095				.module		= owner,
6096			};
6097			err = netlink_dump_start(rtnl, skb, nlh, &c);
6098			/* netlink_dump_start() will keep a reference on
6099			 * module if dump is still in progress.
6100			 */
6101			module_put(owner);
6102		}
 
6103		return err;
6104	}
6105
6106	link = rtnl_get_link(family, type);
6107	if (!link || !link->doit) {
6108		family = PF_UNSPEC;
6109		link = rtnl_get_link(PF_UNSPEC, type);
6110		if (!link || !link->doit)
6111			goto out_unlock;
6112	}
6113
6114	owner = link->owner;
6115	if (!try_module_get(owner)) {
6116		err = -EPROTONOSUPPORT;
6117		goto out_unlock;
6118	}
6119
6120	flags = link->flags;
6121	if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6122	    !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6123		NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6124		module_put(owner);
6125		goto err_unlock;
6126	}
6127
6128	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6129		doit = link->doit;
6130		rcu_read_unlock();
6131		if (doit)
6132			err = doit(skb, nlh, extack);
6133		module_put(owner);
6134		return err;
6135	}
6136	rcu_read_unlock();
6137
6138	rtnl_lock();
6139	link = rtnl_get_link(family, type);
6140	if (link && link->doit)
6141		err = link->doit(skb, nlh, extack);
6142	rtnl_unlock();
6143
6144	module_put(owner);
 
 
6145
6146	return err;
 
 
 
 
 
 
 
 
 
6147
6148out_unlock:
6149	rcu_read_unlock();
6150	return err;
6151
6152err_unlock:
6153	rcu_read_unlock();
6154	return -EOPNOTSUPP;
6155}
6156
6157static void rtnetlink_rcv(struct sk_buff *skb)
6158{
 
6159	netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6160}
6161
6162static int rtnetlink_bind(struct net *net, int group)
6163{
6164	switch (group) {
6165	case RTNLGRP_IPV4_MROUTE_R:
6166	case RTNLGRP_IPV6_MROUTE_R:
6167		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6168			return -EPERM;
6169		break;
6170	}
6171	return 0;
6172}
6173
6174static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6175{
6176	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6177
6178	switch (event) {
6179	case NETDEV_REBOOT:
6180	case NETDEV_CHANGEMTU:
6181	case NETDEV_CHANGEADDR:
6182	case NETDEV_CHANGENAME:
6183	case NETDEV_FEAT_CHANGE:
6184	case NETDEV_BONDING_FAILOVER:
6185	case NETDEV_POST_TYPE_CHANGE:
6186	case NETDEV_NOTIFY_PEERS:
6187	case NETDEV_CHANGEUPPER:
6188	case NETDEV_RESEND_IGMP:
6189	case NETDEV_CHANGEINFODATA:
6190	case NETDEV_CHANGELOWERSTATE:
6191	case NETDEV_CHANGE_TX_QUEUE_LEN:
6192		rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6193				   GFP_KERNEL, NULL, 0, 0, NULL);
6194		break;
6195	default:
 
6196		break;
6197	}
6198	return NOTIFY_DONE;
6199}
6200
6201static struct notifier_block rtnetlink_dev_notifier = {
6202	.notifier_call	= rtnetlink_event,
6203};
6204
6205
6206static int __net_init rtnetlink_net_init(struct net *net)
6207{
6208	struct sock *sk;
6209	struct netlink_kernel_cfg cfg = {
6210		.groups		= RTNLGRP_MAX,
6211		.input		= rtnetlink_rcv,
6212		.cb_mutex	= &rtnl_mutex,
6213		.flags		= NL_CFG_F_NONROOT_RECV,
6214		.bind		= rtnetlink_bind,
6215	};
6216
6217	sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6218	if (!sk)
6219		return -ENOMEM;
6220	net->rtnl = sk;
6221	return 0;
6222}
6223
6224static void __net_exit rtnetlink_net_exit(struct net *net)
6225{
6226	netlink_kernel_release(net->rtnl);
6227	net->rtnl = NULL;
6228}
6229
6230static struct pernet_operations rtnetlink_net_ops = {
6231	.init = rtnetlink_net_init,
6232	.exit = rtnetlink_net_exit,
6233};
6234
6235void __init rtnetlink_init(void)
6236{
 
 
 
 
 
 
 
 
 
 
6237	if (register_pernet_subsys(&rtnetlink_net_ops))
6238		panic("rtnetlink_init: cannot initialize rtnetlink\n");
6239
 
6240	register_netdevice_notifier(&rtnetlink_dev_notifier);
6241
6242	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6243		      rtnl_dump_ifinfo, 0);
6244	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6245	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6246	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6247
6248	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6249	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6250	rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6251
6252	rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6253	rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6254
6255	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6256	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6257		      RTNL_FLAG_BULK_DEL_SUPPORTED);
6258	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6259
6260	rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6261	rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6262	rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6263
6264	rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6265		      0);
6266	rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6267}
v3.5.6
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Routing netlink socket interface: protocol independent part.
   7 *
   8 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   9 *
  10 *		This program is free software; you can redistribute it and/or
  11 *		modify it under the terms of the GNU General Public License
  12 *		as published by the Free Software Foundation; either version
  13 *		2 of the License, or (at your option) any later version.
  14 *
  15 *	Fixes:
  16 *	Vitaly E. Lavrov		RTA_OK arithmetics was wrong.
  17 */
  18
 
  19#include <linux/errno.h>
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/socket.h>
  23#include <linux/kernel.h>
  24#include <linux/timer.h>
  25#include <linux/string.h>
  26#include <linux/sockios.h>
  27#include <linux/net.h>
  28#include <linux/fcntl.h>
  29#include <linux/mm.h>
  30#include <linux/slab.h>
  31#include <linux/interrupt.h>
  32#include <linux/capability.h>
  33#include <linux/skbuff.h>
  34#include <linux/init.h>
  35#include <linux/security.h>
  36#include <linux/mutex.h>
  37#include <linux/if_addr.h>
  38#include <linux/if_bridge.h>
 
  39#include <linux/pci.h>
  40#include <linux/etherdevice.h>
 
  41
  42#include <asm/uaccess.h>
  43
  44#include <linux/inet.h>
  45#include <linux/netdevice.h>
  46#include <net/ip.h>
  47#include <net/protocol.h>
  48#include <net/arp.h>
  49#include <net/route.h>
  50#include <net/udp.h>
 
  51#include <net/sock.h>
  52#include <net/pkt_sched.h>
  53#include <net/fib_rules.h>
  54#include <net/rtnetlink.h>
  55#include <net/net_namespace.h>
 
 
 
 
 
 
  56
  57struct rtnl_link {
  58	rtnl_doit_func		doit;
  59	rtnl_dumpit_func	dumpit;
  60	rtnl_calcit_func 	calcit;
 
 
  61};
  62
  63static DEFINE_MUTEX(rtnl_mutex);
  64
  65void rtnl_lock(void)
  66{
  67	mutex_lock(&rtnl_mutex);
  68}
  69EXPORT_SYMBOL(rtnl_lock);
  70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  71void __rtnl_unlock(void)
  72{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73	mutex_unlock(&rtnl_mutex);
 
 
 
 
 
 
 
 
  74}
  75
  76void rtnl_unlock(void)
  77{
  78	/* This fellow will unlock it for us. */
  79	netdev_run_todo();
  80}
  81EXPORT_SYMBOL(rtnl_unlock);
  82
  83int rtnl_trylock(void)
  84{
  85	return mutex_trylock(&rtnl_mutex);
  86}
  87EXPORT_SYMBOL(rtnl_trylock);
  88
  89int rtnl_is_locked(void)
  90{
  91	return mutex_is_locked(&rtnl_mutex);
  92}
  93EXPORT_SYMBOL(rtnl_is_locked);
  94
 
 
 
 
 
 
  95#ifdef CONFIG_PROVE_LOCKING
  96int lockdep_rtnl_is_held(void)
  97{
  98	return lockdep_is_held(&rtnl_mutex);
  99}
 100EXPORT_SYMBOL(lockdep_rtnl_is_held);
 101#endif /* #ifdef CONFIG_PROVE_LOCKING */
 102
 103static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
 104
 105static inline int rtm_msgindex(int msgtype)
 106{
 107	int msgindex = msgtype - RTM_BASE;
 108
 109	/*
 110	 * msgindex < 0 implies someone tried to register a netlink
 111	 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
 112	 * the message type has not been added to linux/rtnetlink.h
 113	 */
 114	BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
 115
 116	return msgindex;
 117}
 118
 119static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
 120{
 121	struct rtnl_link *tab;
 122
 123	if (protocol <= RTNL_FAMILY_MAX)
 124		tab = rtnl_msg_handlers[protocol];
 125	else
 126		tab = NULL;
 127
 128	if (tab == NULL || tab[msgindex].doit == NULL)
 129		tab = rtnl_msg_handlers[PF_UNSPEC];
 
 130
 131	return tab ? tab[msgindex].doit : NULL;
 132}
 133
 134static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
 
 
 
 135{
 136	struct rtnl_link *tab;
 
 
 
 137
 138	if (protocol <= RTNL_FAMILY_MAX)
 139		tab = rtnl_msg_handlers[protocol];
 140	else
 141		tab = NULL;
 142
 143	if (tab == NULL || tab[msgindex].dumpit == NULL)
 144		tab = rtnl_msg_handlers[PF_UNSPEC];
 
 
 
 
 
 
 
 
 145
 146	return tab ? tab[msgindex].dumpit : NULL;
 147}
 
 
 
 
 
 
 
 
 148
 149static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
 150{
 151	struct rtnl_link *tab;
 152
 153	if (protocol <= RTNL_FAMILY_MAX)
 154		tab = rtnl_msg_handlers[protocol];
 155	else
 156		tab = NULL;
 
 
 157
 158	if (tab == NULL || tab[msgindex].calcit == NULL)
 159		tab = rtnl_msg_handlers[PF_UNSPEC];
 
 
 
 
 
 
 
 
 
 
 
 160
 161	return tab ? tab[msgindex].calcit : NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162}
 
 163
 164/**
 165 * __rtnl_register - Register a rtnetlink message type
 166 * @protocol: Protocol family or PF_UNSPEC
 167 * @msgtype: rtnetlink message type
 168 * @doit: Function pointer called for each request message
 169 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
 170 * @calcit: Function pointer to calc size of dump message
 171 *
 172 * Registers the specified function pointers (at least one of them has
 173 * to be non-NULL) to be called whenever a request message for the
 174 * specified protocol family and message type is received.
 175 *
 176 * The special protocol family PF_UNSPEC may be used to define fallback
 177 * function pointers for the case when no entry for the specific protocol
 178 * family exists.
 179 *
 180 * Returns 0 on success or a negative error code.
 181 */
 182int __rtnl_register(int protocol, int msgtype,
 183		    rtnl_doit_func doit, rtnl_dumpit_func dumpit,
 184		    rtnl_calcit_func calcit)
 185{
 186	struct rtnl_link *tab;
 187	int msgindex;
 188
 189	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 190	msgindex = rtm_msgindex(msgtype);
 191
 192	tab = rtnl_msg_handlers[protocol];
 193	if (tab == NULL) {
 194		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
 195		if (tab == NULL)
 196			return -ENOBUFS;
 197
 198		rtnl_msg_handlers[protocol] = tab;
 199	}
 200
 201	if (doit)
 202		tab[msgindex].doit = doit;
 203
 204	if (dumpit)
 205		tab[msgindex].dumpit = dumpit;
 206
 207	if (calcit)
 208		tab[msgindex].calcit = calcit;
 209
 210	return 0;
 211}
 212EXPORT_SYMBOL_GPL(__rtnl_register);
 213
 214/**
 215 * rtnl_register - Register a rtnetlink message type
 216 *
 217 * Identical to __rtnl_register() but panics on failure. This is useful
 218 * as failure of this function is very unlikely, it can only happen due
 219 * to lack of memory when allocating the chain to store all message
 220 * handlers for a protocol. Meant for use in init functions where lack
 221 * of memory implies no sense in continuing.
 222 */
 223void rtnl_register(int protocol, int msgtype,
 224		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
 225		   rtnl_calcit_func calcit)
 226{
 227	if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
 228		panic("Unable to register rtnetlink message handler, "
 229		      "protocol = %d, message type = %d\n",
 230		      protocol, msgtype);
 
 
 
 231}
 232EXPORT_SYMBOL_GPL(rtnl_register);
 233
 234/**
 235 * rtnl_unregister - Unregister a rtnetlink message type
 236 * @protocol: Protocol family or PF_UNSPEC
 237 * @msgtype: rtnetlink message type
 238 *
 239 * Returns 0 on success or a negative error code.
 240 */
 241int rtnl_unregister(int protocol, int msgtype)
 242{
 
 
 243	int msgindex;
 244
 245	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 246	msgindex = rtm_msgindex(msgtype);
 247
 248	if (rtnl_msg_handlers[protocol] == NULL)
 
 
 
 249		return -ENOENT;
 
 250
 251	rtnl_msg_handlers[protocol][msgindex].doit = NULL;
 252	rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
 
 
 
 253
 254	return 0;
 255}
 256EXPORT_SYMBOL_GPL(rtnl_unregister);
 257
 258/**
 259 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
 260 * @protocol : Protocol family or PF_UNSPEC
 261 *
 262 * Identical to calling rtnl_unregster() for all registered message types
 263 * of a certain protocol family.
 264 */
 265void rtnl_unregister_all(int protocol)
 266{
 
 
 
 
 267	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 268
 269	kfree(rtnl_msg_handlers[protocol]);
 270	rtnl_msg_handlers[protocol] = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271}
 272EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 273
 274static LIST_HEAD(link_ops);
 275
 276static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
 277{
 278	const struct rtnl_link_ops *ops;
 279
 280	list_for_each_entry(ops, &link_ops, list) {
 281		if (!strcmp(ops->kind, kind))
 282			return ops;
 283	}
 284	return NULL;
 285}
 286
 287/**
 288 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 289 * @ops: struct rtnl_link_ops * to register
 290 *
 291 * The caller must hold the rtnl_mutex. This function should be used
 292 * by drivers that create devices during module initialization. It
 293 * must be called before registering the devices.
 294 *
 295 * Returns 0 on success or a negative error code.
 296 */
 297int __rtnl_link_register(struct rtnl_link_ops *ops)
 298{
 299	if (rtnl_link_ops_get(ops->kind))
 300		return -EEXIST;
 301
 302	if (!ops->dellink)
 
 
 
 
 
 303		ops->dellink = unregister_netdevice_queue;
 304
 305	list_add_tail(&ops->list, &link_ops);
 306	return 0;
 307}
 308EXPORT_SYMBOL_GPL(__rtnl_link_register);
 309
 310/**
 311 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 312 * @ops: struct rtnl_link_ops * to register
 313 *
 314 * Returns 0 on success or a negative error code.
 315 */
 316int rtnl_link_register(struct rtnl_link_ops *ops)
 317{
 318	int err;
 319
 
 
 
 
 
 320	rtnl_lock();
 321	err = __rtnl_link_register(ops);
 322	rtnl_unlock();
 323	return err;
 324}
 325EXPORT_SYMBOL_GPL(rtnl_link_register);
 326
 327static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
 328{
 329	struct net_device *dev;
 330	LIST_HEAD(list_kill);
 331
 332	for_each_netdev(net, dev) {
 333		if (dev->rtnl_link_ops == ops)
 334			ops->dellink(dev, &list_kill);
 335	}
 336	unregister_netdevice_many(&list_kill);
 337}
 338
 339/**
 340 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 341 * @ops: struct rtnl_link_ops * to unregister
 342 *
 343 * The caller must hold the rtnl_mutex.
 
 
 344 */
 345void __rtnl_link_unregister(struct rtnl_link_ops *ops)
 346{
 347	struct net *net;
 348
 349	for_each_net(net) {
 350		__rtnl_kill_links(net, ops);
 351	}
 352	list_del(&ops->list);
 353}
 354EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 355
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356/**
 357 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 358 * @ops: struct rtnl_link_ops * to unregister
 359 */
 360void rtnl_link_unregister(struct rtnl_link_ops *ops)
 361{
 362	rtnl_lock();
 
 
 363	__rtnl_link_unregister(ops);
 364	rtnl_unlock();
 
 365}
 366EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 367
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368static size_t rtnl_link_get_size(const struct net_device *dev)
 369{
 370	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
 371	size_t size;
 372
 373	if (!ops)
 374		return 0;
 375
 376	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
 377	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
 378
 379	if (ops->get_size)
 380		/* IFLA_INFO_DATA + nested data */
 381		size += nla_total_size(sizeof(struct nlattr)) +
 382			ops->get_size(dev);
 383
 384	if (ops->get_xstats_size)
 385		/* IFLA_INFO_XSTATS */
 386		size += nla_total_size(ops->get_xstats_size(dev));
 387
 
 
 388	return size;
 389}
 390
 391static LIST_HEAD(rtnl_af_ops);
 392
 393static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
 394{
 395	const struct rtnl_af_ops *ops;
 396
 
 
 397	list_for_each_entry(ops, &rtnl_af_ops, list) {
 398		if (ops->family == family)
 399			return ops;
 400	}
 401
 402	return NULL;
 403}
 404
 405/**
 406 * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
 407 * @ops: struct rtnl_af_ops * to register
 408 *
 409 * The caller must hold the rtnl_mutex.
 410 *
 411 * Returns 0 on success or a negative error code.
 412 */
 413int __rtnl_af_register(struct rtnl_af_ops *ops)
 414{
 415	list_add_tail(&ops->list, &rtnl_af_ops);
 416	return 0;
 417}
 418EXPORT_SYMBOL_GPL(__rtnl_af_register);
 419
 420/**
 421 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
 422 * @ops: struct rtnl_af_ops * to register
 423 *
 424 * Returns 0 on success or a negative error code.
 425 */
 426int rtnl_af_register(struct rtnl_af_ops *ops)
 427{
 428	int err;
 429
 430	rtnl_lock();
 431	err = __rtnl_af_register(ops);
 432	rtnl_unlock();
 433	return err;
 434}
 435EXPORT_SYMBOL_GPL(rtnl_af_register);
 436
 437/**
 438 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
 439 * @ops: struct rtnl_af_ops * to unregister
 440 *
 441 * The caller must hold the rtnl_mutex.
 442 */
 443void __rtnl_af_unregister(struct rtnl_af_ops *ops)
 444{
 445	list_del(&ops->list);
 446}
 447EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
 448
 449/**
 450 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
 451 * @ops: struct rtnl_af_ops * to unregister
 452 */
 453void rtnl_af_unregister(struct rtnl_af_ops *ops)
 454{
 455	rtnl_lock();
 456	__rtnl_af_unregister(ops);
 457	rtnl_unlock();
 
 
 458}
 459EXPORT_SYMBOL_GPL(rtnl_af_unregister);
 460
 461static size_t rtnl_link_get_af_size(const struct net_device *dev)
 
 462{
 463	struct rtnl_af_ops *af_ops;
 464	size_t size;
 465
 466	/* IFLA_AF_SPEC */
 467	size = nla_total_size(sizeof(struct nlattr));
 468
 469	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
 
 470		if (af_ops->get_link_af_size) {
 471			/* AF_* + nested data */
 472			size += nla_total_size(sizeof(struct nlattr)) +
 473				af_ops->get_link_af_size(dev);
 474		}
 475	}
 
 476
 477	return size;
 478}
 479
 480static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 481{
 482	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
 483	struct nlattr *linkinfo, *data;
 484	int err = -EMSGSIZE;
 485
 486	linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
 487	if (linkinfo == NULL)
 488		goto out;
 489
 
 
 490	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
 491		goto err_cancel_link;
 492	if (ops->fill_xstats) {
 493		err = ops->fill_xstats(skb, dev);
 494		if (err < 0)
 495			goto err_cancel_link;
 496	}
 497	if (ops->fill_info) {
 498		data = nla_nest_start(skb, IFLA_INFO_DATA);
 499		if (data == NULL)
 500			goto err_cancel_link;
 501		err = ops->fill_info(skb, dev);
 502		if (err < 0)
 503			goto err_cancel_data;
 504		nla_nest_end(skb, data);
 505	}
 506
 507	nla_nest_end(skb, linkinfo);
 508	return 0;
 509
 510err_cancel_data:
 511	nla_nest_cancel(skb, data);
 512err_cancel_link:
 513	nla_nest_cancel(skb, linkinfo);
 514out:
 515	return err;
 516}
 517
 518static const int rtm_min[RTM_NR_FAMILIES] =
 519{
 520	[RTM_FAM(RTM_NEWLINK)]      = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
 521	[RTM_FAM(RTM_NEWADDR)]      = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
 522	[RTM_FAM(RTM_NEWROUTE)]     = NLMSG_LENGTH(sizeof(struct rtmsg)),
 523	[RTM_FAM(RTM_NEWRULE)]      = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)),
 524	[RTM_FAM(RTM_NEWQDISC)]     = NLMSG_LENGTH(sizeof(struct tcmsg)),
 525	[RTM_FAM(RTM_NEWTCLASS)]    = NLMSG_LENGTH(sizeof(struct tcmsg)),
 526	[RTM_FAM(RTM_NEWTFILTER)]   = NLMSG_LENGTH(sizeof(struct tcmsg)),
 527	[RTM_FAM(RTM_NEWACTION)]    = NLMSG_LENGTH(sizeof(struct tcamsg)),
 528	[RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
 529	[RTM_FAM(RTM_GETANYCAST)]   = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
 530};
 531
 532static const int rta_max[RTM_NR_FAMILIES] =
 533{
 534	[RTM_FAM(RTM_NEWLINK)]      = IFLA_MAX,
 535	[RTM_FAM(RTM_NEWADDR)]      = IFA_MAX,
 536	[RTM_FAM(RTM_NEWROUTE)]     = RTA_MAX,
 537	[RTM_FAM(RTM_NEWRULE)]      = FRA_MAX,
 538	[RTM_FAM(RTM_NEWQDISC)]     = TCA_MAX,
 539	[RTM_FAM(RTM_NEWTCLASS)]    = TCA_MAX,
 540	[RTM_FAM(RTM_NEWTFILTER)]   = TCA_MAX,
 541	[RTM_FAM(RTM_NEWACTION)]    = TCAA_MAX,
 542};
 543
 544void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
 545{
 546	struct rtattr *rta;
 547	int size = RTA_LENGTH(attrlen);
 548
 549	rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
 550	rta->rta_type = attrtype;
 551	rta->rta_len = size;
 552	memcpy(RTA_DATA(rta), data, attrlen);
 553	memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
 554}
 555EXPORT_SYMBOL(__rta_fill);
 556
 557int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
 558{
 559	struct sock *rtnl = net->rtnl;
 560	int err = 0;
 561
 562	NETLINK_CB(skb).dst_group = group;
 563	if (echo)
 564		atomic_inc(&skb->users);
 565	netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
 566	if (echo)
 567		err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
 568	return err;
 569}
 570
 571int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
 572{
 573	struct sock *rtnl = net->rtnl;
 574
 575	return nlmsg_unicast(rtnl, skb, pid);
 576}
 577EXPORT_SYMBOL(rtnl_unicast);
 578
 579void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
 580		 struct nlmsghdr *nlh, gfp_t flags)
 581{
 582	struct sock *rtnl = net->rtnl;
 583	int report = 0;
 584
 585	if (nlh)
 586		report = nlmsg_report(nlh);
 587
 588	nlmsg_notify(rtnl, skb, pid, group, report, flags);
 589}
 590EXPORT_SYMBOL(rtnl_notify);
 591
 592void rtnl_set_sk_err(struct net *net, u32 group, int error)
 593{
 594	struct sock *rtnl = net->rtnl;
 595
 596	netlink_set_err(rtnl, 0, group, error);
 597}
 598EXPORT_SYMBOL(rtnl_set_sk_err);
 599
 600int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
 601{
 602	struct nlattr *mx;
 603	int i, valid = 0;
 604
 605	mx = nla_nest_start(skb, RTA_METRICS);
 
 
 
 
 606	if (mx == NULL)
 607		return -ENOBUFS;
 608
 609	for (i = 0; i < RTAX_MAX; i++) {
 610		if (metrics[i]) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611			valid++;
 612			if (nla_put_u32(skb, i+1, metrics[i]))
 613				goto nla_put_failure;
 614		}
 615	}
 616
 617	if (!valid) {
 618		nla_nest_cancel(skb, mx);
 619		return 0;
 620	}
 621
 622	return nla_nest_end(skb, mx);
 623
 624nla_put_failure:
 625	nla_nest_cancel(skb, mx);
 626	return -EMSGSIZE;
 627}
 628EXPORT_SYMBOL(rtnetlink_put_metrics);
 629
 630int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
 631		       u32 ts, u32 tsage, long expires, u32 error)
 632{
 633	struct rta_cacheinfo ci = {
 634		.rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
 635		.rta_used = dst->__use,
 636		.rta_clntref = atomic_read(&(dst->__refcnt)),
 637		.rta_error = error,
 638		.rta_id =  id,
 639		.rta_ts = ts,
 640		.rta_tsage = tsage,
 641	};
 642
 643	if (expires)
 644		ci.rta_expires = jiffies_to_clock_t(expires);
 645
 
 
 
 
 
 
 
 
 
 646	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
 647}
 648EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
 649
 650static void set_operstate(struct net_device *dev, unsigned char transition)
 651{
 652	unsigned char operstate = dev->operstate;
 653
 654	switch (transition) {
 655	case IF_OPER_UP:
 656		if ((operstate == IF_OPER_DORMANT ||
 
 657		     operstate == IF_OPER_UNKNOWN) &&
 658		    !netif_dormant(dev))
 659			operstate = IF_OPER_UP;
 660		break;
 661
 
 
 
 
 
 662	case IF_OPER_DORMANT:
 663		if (operstate == IF_OPER_UP ||
 664		    operstate == IF_OPER_UNKNOWN)
 665			operstate = IF_OPER_DORMANT;
 666		break;
 667	}
 668
 669	if (dev->operstate != operstate) {
 670		write_lock_bh(&dev_base_lock);
 671		dev->operstate = operstate;
 672		write_unlock_bh(&dev_base_lock);
 673		netdev_state_change(dev);
 674	}
 675}
 676
 677static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
 678{
 679	return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
 680	       (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
 681}
 682
 683static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
 684					   const struct ifinfomsg *ifm)
 685{
 686	unsigned int flags = ifm->ifi_flags;
 687
 688	/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
 689	if (ifm->ifi_change)
 690		flags = (flags & ifm->ifi_change) |
 691			(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
 692
 693	return flags;
 694}
 695
 696static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
 697				 const struct rtnl_link_stats64 *b)
 698{
 699	a->rx_packets = b->rx_packets;
 700	a->tx_packets = b->tx_packets;
 701	a->rx_bytes = b->rx_bytes;
 702	a->tx_bytes = b->tx_bytes;
 703	a->rx_errors = b->rx_errors;
 704	a->tx_errors = b->tx_errors;
 705	a->rx_dropped = b->rx_dropped;
 706	a->tx_dropped = b->tx_dropped;
 707
 708	a->multicast = b->multicast;
 709	a->collisions = b->collisions;
 710
 711	a->rx_length_errors = b->rx_length_errors;
 712	a->rx_over_errors = b->rx_over_errors;
 713	a->rx_crc_errors = b->rx_crc_errors;
 714	a->rx_frame_errors = b->rx_frame_errors;
 715	a->rx_fifo_errors = b->rx_fifo_errors;
 716	a->rx_missed_errors = b->rx_missed_errors;
 717
 718	a->tx_aborted_errors = b->tx_aborted_errors;
 719	a->tx_carrier_errors = b->tx_carrier_errors;
 720	a->tx_fifo_errors = b->tx_fifo_errors;
 721	a->tx_heartbeat_errors = b->tx_heartbeat_errors;
 722	a->tx_window_errors = b->tx_window_errors;
 723
 724	a->rx_compressed = b->rx_compressed;
 725	a->tx_compressed = b->tx_compressed;
 726}
 727
 728static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
 729{
 730	memcpy(v, b, sizeof(*b));
 731}
 732
 733/* All VF info */
 734static inline int rtnl_vfinfo_size(const struct net_device *dev,
 735				   u32 ext_filter_mask)
 736{
 737	if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
 738	    (ext_filter_mask & RTEXT_FILTER_VF)) {
 739		int num_vfs = dev_num_vf(dev->dev.parent);
 740		size_t size = nla_total_size(sizeof(struct nlattr));
 741		size += nla_total_size(num_vfs * sizeof(struct nlattr));
 742		size += num_vfs *
 743			(nla_total_size(sizeof(struct ifla_vf_mac)) +
 
 
 744			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
 
 
 
 
 745			 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
 746			 nla_total_size(sizeof(struct ifla_vf_spoofchk)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747		return size;
 748	} else
 749		return 0;
 750}
 751
 752static size_t rtnl_port_size(const struct net_device *dev)
 
 753{
 754	size_t port_size = nla_total_size(4)		/* PORT_VF */
 755		+ nla_total_size(PORT_PROFILE_MAX)	/* PORT_PROFILE */
 756		+ nla_total_size(sizeof(struct ifla_port_vsi))
 757							/* PORT_VSI_TYPE */
 758		+ nla_total_size(PORT_UUID_MAX)		/* PORT_INSTANCE_UUID */
 759		+ nla_total_size(PORT_UUID_MAX)		/* PORT_HOST_UUID */
 760		+ nla_total_size(1)			/* PROT_VDP_REQUEST */
 761		+ nla_total_size(2);			/* PORT_VDP_RESPONSE */
 762	size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
 763	size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
 764		+ port_size;
 765	size_t port_self_size = nla_total_size(sizeof(struct nlattr))
 766		+ port_size;
 767
 768	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
 
 769		return 0;
 770	if (dev_num_vf(dev->dev.parent))
 771		return port_self_size + vf_ports_size +
 772			vf_port_size * dev_num_vf(dev->dev.parent);
 773	else
 774		return port_self_size;
 775}
 776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777static noinline size_t if_nlmsg_size(const struct net_device *dev,
 778				     u32 ext_filter_mask)
 779{
 780	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
 781	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
 782	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
 783	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
 784	       + nla_total_size(sizeof(struct rtnl_link_ifmap))
 785	       + nla_total_size(sizeof(struct rtnl_link_stats))
 786	       + nla_total_size(sizeof(struct rtnl_link_stats64))
 787	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
 788	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
 789	       + nla_total_size(4) /* IFLA_TXQLEN */
 790	       + nla_total_size(4) /* IFLA_WEIGHT */
 791	       + nla_total_size(4) /* IFLA_MTU */
 792	       + nla_total_size(4) /* IFLA_LINK */
 793	       + nla_total_size(4) /* IFLA_MASTER */
 
 794	       + nla_total_size(4) /* IFLA_PROMISCUITY */
 
 
 
 
 
 
 
 
 795	       + nla_total_size(1) /* IFLA_OPERSTATE */
 796	       + nla_total_size(1) /* IFLA_LINKMODE */
 
 
 
 797	       + nla_total_size(ext_filter_mask
 798			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
 799	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
 800	       + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
 801	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
 802	       + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803}
 804
 805static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
 806{
 807	struct nlattr *vf_ports;
 808	struct nlattr *vf_port;
 809	int vf;
 810	int err;
 811
 812	vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
 813	if (!vf_ports)
 814		return -EMSGSIZE;
 815
 816	for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
 817		vf_port = nla_nest_start(skb, IFLA_VF_PORT);
 818		if (!vf_port)
 819			goto nla_put_failure;
 820		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
 821			goto nla_put_failure;
 822		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
 823		if (err == -EMSGSIZE)
 824			goto nla_put_failure;
 825		if (err) {
 826			nla_nest_cancel(skb, vf_port);
 827			continue;
 828		}
 829		nla_nest_end(skb, vf_port);
 830	}
 831
 832	nla_nest_end(skb, vf_ports);
 833
 834	return 0;
 835
 836nla_put_failure:
 837	nla_nest_cancel(skb, vf_ports);
 838	return -EMSGSIZE;
 839}
 840
 841static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
 842{
 843	struct nlattr *port_self;
 844	int err;
 845
 846	port_self = nla_nest_start(skb, IFLA_PORT_SELF);
 847	if (!port_self)
 848		return -EMSGSIZE;
 849
 850	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
 851	if (err) {
 852		nla_nest_cancel(skb, port_self);
 853		return (err == -EMSGSIZE) ? err : 0;
 854	}
 855
 856	nla_nest_end(skb, port_self);
 857
 858	return 0;
 859}
 860
 861static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
 
 862{
 863	int err;
 864
 865	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
 
 866		return 0;
 867
 868	err = rtnl_port_self_fill(skb, dev);
 869	if (err)
 870		return err;
 871
 872	if (dev_num_vf(dev->dev.parent)) {
 873		err = rtnl_vf_ports_fill(skb, dev);
 874		if (err)
 875			return err;
 876	}
 877
 878	return 0;
 879}
 880
 881static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882			    int type, u32 pid, u32 seq, u32 change,
 883			    unsigned int flags, u32 ext_filter_mask)
 
 
 884{
 885	struct ifinfomsg *ifm;
 886	struct nlmsghdr *nlh;
 887	struct rtnl_link_stats64 temp;
 888	const struct rtnl_link_stats64 *stats;
 889	struct nlattr *attr, *af_spec;
 890	struct rtnl_af_ops *af_ops;
 891
 892	ASSERT_RTNL();
 893	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
 894	if (nlh == NULL)
 895		return -EMSGSIZE;
 896
 897	ifm = nlmsg_data(nlh);
 898	ifm->ifi_family = AF_UNSPEC;
 899	ifm->__ifi_pad = 0;
 900	ifm->ifi_type = dev->type;
 901	ifm->ifi_index = dev->ifindex;
 902	ifm->ifi_flags = dev_get_flags(dev);
 903	ifm->ifi_change = change;
 904
 
 
 
 
 905	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
 906	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
 907	    nla_put_u8(skb, IFLA_OPERSTATE,
 908		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
 909	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
 910	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
 
 
 911	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
 912	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
 913	    (dev->ifindex != dev->iflink &&
 914	     nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
 915	    (dev->master &&
 916	     nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
 917	    (dev->qdisc &&
 918	     nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
 919	    (dev->ifalias &&
 920	     nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
 921		goto nla_put_failure;
 922
 923	if (1) {
 924		struct rtnl_link_ifmap map = {
 925			.mem_start   = dev->mem_start,
 926			.mem_end     = dev->mem_end,
 927			.base_addr   = dev->base_addr,
 928			.irq         = dev->irq,
 929			.dma         = dev->dma,
 930			.port        = dev->if_port,
 931		};
 932		if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
 
 
 
 
 
 
 
 
 
 933			goto nla_put_failure;
 934	}
 935
 
 
 
 936	if (dev->addr_len) {
 937		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
 938		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
 939			goto nla_put_failure;
 940	}
 941
 942	attr = nla_reserve(skb, IFLA_STATS,
 943			sizeof(struct rtnl_link_stats));
 944	if (attr == NULL)
 945		goto nla_put_failure;
 946
 947	stats = dev_get_stats(dev, &temp);
 948	copy_rtnl_link_stats(nla_data(attr), stats);
 949
 950	attr = nla_reserve(skb, IFLA_STATS64,
 951			sizeof(struct rtnl_link_stats64));
 952	if (attr == NULL)
 953		goto nla_put_failure;
 954	copy_rtnl_link_stats64(nla_data(attr), stats);
 955
 956	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
 957	    nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
 958		goto nla_put_failure;
 959
 960	if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
 961	    && (ext_filter_mask & RTEXT_FILTER_VF)) {
 962		int i;
 963
 964		struct nlattr *vfinfo, *vf;
 965		int num_vfs = dev_num_vf(dev->dev.parent);
 966
 967		vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
 968		if (!vfinfo)
 969			goto nla_put_failure;
 970		for (i = 0; i < num_vfs; i++) {
 971			struct ifla_vf_info ivi;
 972			struct ifla_vf_mac vf_mac;
 973			struct ifla_vf_vlan vf_vlan;
 974			struct ifla_vf_tx_rate vf_tx_rate;
 975			struct ifla_vf_spoofchk vf_spoofchk;
 976
 977			/*
 978			 * Not all SR-IOV capable drivers support the
 979			 * spoofcheck query.  Preset to -1 so the user
 980			 * space tool can detect that the driver didn't
 981			 * report anything.
 982			 */
 983			ivi.spoofchk = -1;
 984			if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
 985				break;
 986			vf_mac.vf =
 987				vf_vlan.vf =
 988				vf_tx_rate.vf =
 989				vf_spoofchk.vf = ivi.vf;
 990
 991			memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
 992			vf_vlan.vlan = ivi.vlan;
 993			vf_vlan.qos = ivi.qos;
 994			vf_tx_rate.rate = ivi.tx_rate;
 995			vf_spoofchk.setting = ivi.spoofchk;
 996			vf = nla_nest_start(skb, IFLA_VF_INFO);
 997			if (!vf) {
 998				nla_nest_cancel(skb, vfinfo);
 999				goto nla_put_failure;
1000			}
1001			if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1002			    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1003			    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1004				    &vf_tx_rate) ||
1005			    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1006				    &vf_spoofchk))
1007				goto nla_put_failure;
1008			nla_nest_end(skb, vf);
1009		}
1010		nla_nest_end(skb, vfinfo);
1011	}
1012
1013	if (rtnl_port_fill(skb, dev))
1014		goto nla_put_failure;
1015
1016	if (dev->rtnl_link_ops) {
1017		if (rtnl_link_fill(skb, dev) < 0)
1018			goto nla_put_failure;
1019	}
1020
1021	if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
1022		goto nla_put_failure;
1023
1024	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
1025		if (af_ops->fill_link_af) {
1026			struct nlattr *af;
1027			int err;
 
 
 
 
 
 
1028
1029			if (!(af = nla_nest_start(skb, af_ops->family)))
1030				goto nla_put_failure;
 
 
1031
1032			err = af_ops->fill_link_af(skb, dev);
 
1033
1034			/*
1035			 * Caller may return ENODATA to indicate that there
1036			 * was no data to be dumped. This is not an error, it
1037			 * means we should trim the attribute header and
1038			 * continue.
1039			 */
1040			if (err == -ENODATA)
1041				nla_nest_cancel(skb, af);
1042			else if (err < 0)
1043				goto nla_put_failure;
1044
1045			nla_nest_end(skb, af);
1046		}
1047	}
 
1048
1049	nla_nest_end(skb, af_spec);
 
1050
1051	return nlmsg_end(skb, nlh);
 
1052
 
 
1053nla_put_failure:
1054	nlmsg_cancel(skb, nlh);
1055	return -EMSGSIZE;
1056}
1057
1058static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1059{
1060	struct net *net = sock_net(skb->sk);
1061	int h, s_h;
1062	int idx = 0, s_idx;
1063	struct net_device *dev;
1064	struct hlist_head *head;
1065	struct hlist_node *node;
1066	struct nlattr *tb[IFLA_MAX+1];
1067	u32 ext_filter_mask = 0;
1068
1069	s_h = cb->args[0];
1070	s_idx = cb->args[1];
1071
1072	rcu_read_lock();
1073	cb->seq = net->dev_base_seq;
1074
1075	if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1076			ifla_policy) >= 0) {
1077
1078		if (tb[IFLA_EXT_MASK])
1079			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1080	}
1081
1082	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1083		idx = 0;
1084		head = &net->dev_index_head[h];
1085		hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1086			if (idx < s_idx)
1087				goto cont;
1088			if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1089					     NETLINK_CB(cb->skb).pid,
1090					     cb->nlh->nlmsg_seq, 0,
1091					     NLM_F_MULTI,
1092					     ext_filter_mask) <= 0)
1093				goto out;
1094
1095			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1096cont:
1097			idx++;
1098		}
1099	}
1100out:
1101	rcu_read_unlock();
1102	cb->args[1] = idx;
1103	cb->args[0] = h;
1104
1105	return skb->len;
1106}
1107
1108const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1109	[IFLA_IFNAME]		= { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1110	[IFLA_ADDRESS]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1111	[IFLA_BROADCAST]	= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1112	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
1113	[IFLA_MTU]		= { .type = NLA_U32 },
1114	[IFLA_LINK]		= { .type = NLA_U32 },
1115	[IFLA_MASTER]		= { .type = NLA_U32 },
 
1116	[IFLA_TXQLEN]		= { .type = NLA_U32 },
1117	[IFLA_WEIGHT]		= { .type = NLA_U32 },
1118	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
1119	[IFLA_LINKMODE]		= { .type = NLA_U8 },
1120	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
1121	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },
1122	[IFLA_NET_NS_FD]	= { .type = NLA_U32 },
1123	[IFLA_IFALIAS]	        = { .type = NLA_STRING, .len = IFALIASZ-1 },
 
 
 
1124	[IFLA_VFINFO_LIST]	= {. type = NLA_NESTED },
1125	[IFLA_VF_PORTS]		= { .type = NLA_NESTED },
1126	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
1127	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
1128	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
1129	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1130};
1131EXPORT_SYMBOL(ifla_policy);
1132
1133static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1134	[IFLA_INFO_KIND]	= { .type = NLA_STRING },
1135	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
1136};
1137
1138static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
1139	[IFLA_VF_INFO]		= { .type = NLA_NESTED },
1140};
1141
1142static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1143	[IFLA_VF_MAC]		= { .type = NLA_BINARY,
1144				    .len = sizeof(struct ifla_vf_mac) },
1145	[IFLA_VF_VLAN]		= { .type = NLA_BINARY,
1146				    .len = sizeof(struct ifla_vf_vlan) },
1147	[IFLA_VF_TX_RATE]	= { .type = NLA_BINARY,
1148				    .len = sizeof(struct ifla_vf_tx_rate) },
1149	[IFLA_VF_SPOOFCHK]	= { .type = NLA_BINARY,
1150				    .len = sizeof(struct ifla_vf_spoofchk) },
 
 
 
 
 
1151};
1152
1153static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1154	[IFLA_PORT_VF]		= { .type = NLA_U32 },
1155	[IFLA_PORT_PROFILE]	= { .type = NLA_STRING,
1156				    .len = PORT_PROFILE_MAX },
1157	[IFLA_PORT_VSI_TYPE]	= { .type = NLA_BINARY,
1158				    .len = sizeof(struct ifla_port_vsi)},
1159	[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1160				      .len = PORT_UUID_MAX },
1161	[IFLA_PORT_HOST_UUID]	= { .type = NLA_STRING,
1162				    .len = PORT_UUID_MAX },
1163	[IFLA_PORT_REQUEST]	= { .type = NLA_U8, },
1164	[IFLA_PORT_RESPONSE]	= { .type = NLA_U16, },
 
 
 
 
 
 
 
1165};
1166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1168{
1169	struct net *net;
1170	/* Examine the link attributes and figure out which
1171	 * network namespace we are talking about.
1172	 */
1173	if (tb[IFLA_NET_NS_PID])
1174		net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1175	else if (tb[IFLA_NET_NS_FD])
1176		net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1177	else
1178		net = get_net(src_net);
1179	return net;
1180}
1181EXPORT_SYMBOL(rtnl_link_get_net);
1182
1183static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184{
1185	if (dev) {
1186		if (tb[IFLA_ADDRESS] &&
1187		    nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
1188			return -EINVAL;
1189
1190		if (tb[IFLA_BROADCAST] &&
1191		    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
1192			return -EINVAL;
1193	}
1194
1195	if (tb[IFLA_AF_SPEC]) {
1196		struct nlattr *af;
1197		int rem, err;
1198
1199		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1200			const struct rtnl_af_ops *af_ops;
1201
1202			if (!(af_ops = rtnl_af_lookup(nla_type(af))))
 
1203				return -EAFNOSUPPORT;
1204
1205			if (!af_ops->set_link_af)
1206				return -EOPNOTSUPP;
1207
1208			if (af_ops->validate_link_af) {
1209				err = af_ops->validate_link_af(dev, af);
1210				if (err < 0)
1211					return err;
1212			}
1213		}
1214	}
1215
1216	return 0;
1217}
1218
1219static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
 
1220{
1221	int rem, err = -EINVAL;
1222	struct nlattr *vf;
1223	const struct net_device_ops *ops = dev->netdev_ops;
1224
1225	nla_for_each_nested(vf, attr, rem) {
1226		switch (nla_type(vf)) {
1227		case IFLA_VF_MAC: {
1228			struct ifla_vf_mac *ivm;
1229			ivm = nla_data(vf);
1230			err = -EOPNOTSUPP;
1231			if (ops->ndo_set_vf_mac)
1232				err = ops->ndo_set_vf_mac(dev, ivm->vf,
1233							  ivm->mac);
1234			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1235		}
1236		case IFLA_VF_VLAN: {
1237			struct ifla_vf_vlan *ivv;
1238			ivv = nla_data(vf);
1239			err = -EOPNOTSUPP;
1240			if (ops->ndo_set_vf_vlan)
1241				err = ops->ndo_set_vf_vlan(dev, ivv->vf,
1242							   ivv->vlan,
1243							   ivv->qos);
1244			break;
1245		}
1246		case IFLA_VF_TX_RATE: {
1247			struct ifla_vf_tx_rate *ivt;
1248			ivt = nla_data(vf);
1249			err = -EOPNOTSUPP;
1250			if (ops->ndo_set_vf_tx_rate)
1251				err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
1252							      ivt->rate);
1253			break;
1254		}
1255		case IFLA_VF_SPOOFCHK: {
1256			struct ifla_vf_spoofchk *ivs;
1257			ivs = nla_data(vf);
1258			err = -EOPNOTSUPP;
1259			if (ops->ndo_set_vf_spoofchk)
1260				err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1261							       ivs->setting);
1262			break;
1263		}
1264		default:
1265			err = -EINVAL;
1266			break;
1267		}
1268		if (err)
1269			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270	}
 
1271	return err;
1272}
1273
1274static int do_set_master(struct net_device *dev, int ifindex)
 
1275{
1276	struct net_device *master_dev;
1277	const struct net_device_ops *ops;
1278	int err;
1279
1280	if (dev->master) {
1281		if (dev->master->ifindex == ifindex)
1282			return 0;
1283		ops = dev->master->netdev_ops;
1284		if (ops->ndo_del_slave) {
1285			err = ops->ndo_del_slave(dev->master, dev);
1286			if (err)
1287				return err;
1288		} else {
1289			return -EOPNOTSUPP;
1290		}
1291	}
1292
1293	if (ifindex) {
1294		master_dev = __dev_get_by_index(dev_net(dev), ifindex);
1295		if (!master_dev)
1296			return -EINVAL;
1297		ops = master_dev->netdev_ops;
1298		if (ops->ndo_add_slave) {
1299			err = ops->ndo_add_slave(master_dev, dev);
1300			if (err)
1301				return err;
1302		} else {
1303			return -EOPNOTSUPP;
1304		}
1305	}
1306	return 0;
1307}
1308
1309static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1310		      struct nlattr **tb, char *ifname, int modified)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1311{
1312	const struct net_device_ops *ops = dev->netdev_ops;
1313	int send_addr_notify = 0;
1314	int err;
1315
1316	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1317		struct net *net = rtnl_link_get_net(dev_net(dev), tb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318		if (IS_ERR(net)) {
1319			err = PTR_ERR(net);
1320			goto errout;
1321		}
1322		err = dev_change_net_namespace(dev, net, ifname);
 
 
 
 
 
 
1323		put_net(net);
1324		if (err)
1325			goto errout;
1326		modified = 1;
1327	}
1328
1329	if (tb[IFLA_MAP]) {
1330		struct rtnl_link_ifmap *u_map;
1331		struct ifmap k_map;
1332
1333		if (!ops->ndo_set_config) {
1334			err = -EOPNOTSUPP;
1335			goto errout;
1336		}
1337
1338		if (!netif_device_present(dev)) {
1339			err = -ENODEV;
1340			goto errout;
1341		}
1342
1343		u_map = nla_data(tb[IFLA_MAP]);
1344		k_map.mem_start = (unsigned long) u_map->mem_start;
1345		k_map.mem_end = (unsigned long) u_map->mem_end;
1346		k_map.base_addr = (unsigned short) u_map->base_addr;
1347		k_map.irq = (unsigned char) u_map->irq;
1348		k_map.dma = (unsigned char) u_map->dma;
1349		k_map.port = (unsigned char) u_map->port;
1350
1351		err = ops->ndo_set_config(dev, &k_map);
1352		if (err < 0)
1353			goto errout;
1354
1355		modified = 1;
1356	}
1357
1358	if (tb[IFLA_ADDRESS]) {
1359		struct sockaddr *sa;
1360		int len;
1361
1362		if (!ops->ndo_set_mac_address) {
1363			err = -EOPNOTSUPP;
1364			goto errout;
1365		}
1366
1367		if (!netif_device_present(dev)) {
1368			err = -ENODEV;
1369			goto errout;
1370		}
1371
1372		len = sizeof(sa_family_t) + dev->addr_len;
1373		sa = kmalloc(len, GFP_KERNEL);
1374		if (!sa) {
1375			err = -ENOMEM;
1376			goto errout;
1377		}
1378		sa->sa_family = dev->type;
1379		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
1380		       dev->addr_len);
1381		err = ops->ndo_set_mac_address(dev, sa);
1382		kfree(sa);
1383		if (err)
1384			goto errout;
1385		send_addr_notify = 1;
1386		modified = 1;
1387		add_device_randomness(dev->dev_addr, dev->addr_len);
1388	}
1389
1390	if (tb[IFLA_MTU]) {
1391		err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1392		if (err < 0)
1393			goto errout;
1394		modified = 1;
1395	}
1396
1397	if (tb[IFLA_GROUP]) {
1398		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
1399		modified = 1;
1400	}
1401
1402	/*
1403	 * Interface selected by interface index but interface
1404	 * name provided implies that a name change has been
1405	 * requested.
1406	 */
1407	if (ifm->ifi_index > 0 && ifname[0]) {
1408		err = dev_change_name(dev, ifname);
1409		if (err < 0)
1410			goto errout;
1411		modified = 1;
1412	}
1413
1414	if (tb[IFLA_IFALIAS]) {
1415		err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
1416				    nla_len(tb[IFLA_IFALIAS]));
1417		if (err < 0)
1418			goto errout;
1419		modified = 1;
1420	}
1421
1422	if (tb[IFLA_BROADCAST]) {
1423		nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
1424		send_addr_notify = 1;
 
 
 
 
 
 
 
1425	}
1426
1427	if (ifm->ifi_flags || ifm->ifi_change) {
1428		err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
 
1429		if (err < 0)
1430			goto errout;
1431	}
1432
1433	if (tb[IFLA_MASTER]) {
1434		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
 
 
 
 
 
 
 
 
 
1435		if (err)
1436			goto errout;
1437		modified = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1438	}
1439
1440	if (tb[IFLA_TXQLEN])
1441		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
 
 
 
 
 
 
1442
1443	if (tb[IFLA_OPERSTATE])
1444		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
1445
1446	if (tb[IFLA_LINKMODE]) {
1447		write_lock_bh(&dev_base_lock);
1448		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
1449		write_unlock_bh(&dev_base_lock);
 
 
 
 
1450	}
1451
1452	if (tb[IFLA_VFINFO_LIST]) {
 
1453		struct nlattr *attr;
1454		int rem;
 
1455		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
1456			if (nla_type(attr) != IFLA_VF_INFO) {
 
1457				err = -EINVAL;
1458				goto errout;
1459			}
1460			err = do_setvfinfo(dev, attr);
 
 
 
1461			if (err < 0)
1462				goto errout;
1463			modified = 1;
 
 
 
1464		}
1465	}
1466	err = 0;
1467
1468	if (tb[IFLA_VF_PORTS]) {
1469		struct nlattr *port[IFLA_PORT_MAX+1];
1470		struct nlattr *attr;
1471		int vf;
1472		int rem;
1473
1474		err = -EOPNOTSUPP;
1475		if (!ops->ndo_set_vf_port)
1476			goto errout;
1477
1478		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
1479			if (nla_type(attr) != IFLA_VF_PORT)
1480				continue;
1481			err = nla_parse_nested(port, IFLA_PORT_MAX,
1482				attr, ifla_port_policy);
 
 
 
 
 
1483			if (err < 0)
1484				goto errout;
1485			if (!port[IFLA_PORT_VF]) {
1486				err = -EOPNOTSUPP;
1487				goto errout;
1488			}
1489			vf = nla_get_u32(port[IFLA_PORT_VF]);
1490			err = ops->ndo_set_vf_port(dev, vf, port);
1491			if (err < 0)
1492				goto errout;
1493			modified = 1;
1494		}
1495	}
1496	err = 0;
1497
1498	if (tb[IFLA_PORT_SELF]) {
1499		struct nlattr *port[IFLA_PORT_MAX+1];
1500
1501		err = nla_parse_nested(port, IFLA_PORT_MAX,
1502			tb[IFLA_PORT_SELF], ifla_port_policy);
 
1503		if (err < 0)
1504			goto errout;
1505
1506		err = -EOPNOTSUPP;
1507		if (ops->ndo_set_vf_port)
1508			err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
1509		if (err < 0)
1510			goto errout;
1511		modified = 1;
1512	}
1513
1514	if (tb[IFLA_AF_SPEC]) {
1515		struct nlattr *af;
1516		int rem;
1517
1518		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1519			const struct rtnl_af_ops *af_ops;
1520
1521			if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1522				BUG();
1523
1524			err = af_ops->set_link_af(dev, af);
1525			if (err < 0)
1526				goto errout;
1527
1528			modified = 1;
1529		}
1530	}
1531	err = 0;
1532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1533errout:
1534	if (err < 0 && modified)
1535		net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
1536				     dev->name);
1537
1538	if (send_addr_notify)
1539		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
 
 
1540
1541	return err;
1542}
1543
1544static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545{
1546	struct net *net = sock_net(skb->sk);
1547	struct ifinfomsg *ifm;
1548	struct net_device *dev;
1549	int err;
1550	struct nlattr *tb[IFLA_MAX+1];
1551	char ifname[IFNAMSIZ];
1552
1553	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
 
1554	if (err < 0)
1555		goto errout;
1556
1557	if (tb[IFLA_IFNAME])
1558		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
1559	else
1560		ifname[0] = '\0';
1561
1562	err = -EINVAL;
1563	ifm = nlmsg_data(nlh);
1564	if (ifm->ifi_index > 0)
1565		dev = __dev_get_by_index(net, ifm->ifi_index);
1566	else if (tb[IFLA_IFNAME])
1567		dev = __dev_get_by_name(net, ifname);
1568	else
1569		goto errout;
1570
1571	if (dev == NULL) {
1572		err = -ENODEV;
1573		goto errout;
1574	}
1575
1576	err = validate_linkmsg(dev, tb);
1577	if (err < 0)
1578		goto errout;
1579
1580	err = do_setlink(dev, ifm, tb, ifname, 0);
1581errout:
1582	return err;
1583}
1584
1585static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1586{
1587	struct net *net = sock_net(skb->sk);
1588	const struct rtnl_link_ops *ops;
1589	struct net_device *dev;
 
1590	struct ifinfomsg *ifm;
1591	char ifname[IFNAMSIZ];
1592	struct nlattr *tb[IFLA_MAX+1];
1593	int err;
1594	LIST_HEAD(list_kill);
 
 
 
 
 
1595
1596	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1597	if (err < 0)
1598		return err;
1599
1600	if (tb[IFLA_IFNAME])
1601		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
 
 
 
 
1602
 
1603	ifm = nlmsg_data(nlh);
1604	if (ifm->ifi_index > 0)
1605		dev = __dev_get_by_index(net, ifm->ifi_index);
1606	else if (tb[IFLA_IFNAME])
1607		dev = __dev_get_by_name(net, ifname);
 
 
1608	else
1609		return -EINVAL;
 
 
 
 
 
 
 
1610
1611	if (!dev)
1612		return -ENODEV;
1613
1614	ops = dev->rtnl_link_ops;
1615	if (!ops)
1616		return -EOPNOTSUPP;
1617
1618	ops->dellink(dev, &list_kill);
1619	unregister_netdevice_many(&list_kill);
1620	list_del(&list_kill);
1621	return 0;
1622}
1623
1624int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
 
1625{
1626	unsigned int old_flags;
1627	int err;
1628
1629	old_flags = dev->flags;
1630	if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
1631		err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
 
1632		if (err < 0)
1633			return err;
1634	}
1635
1636	dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
1637	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1638
1639	__dev_notify_flags(dev, old_flags);
 
 
1640	return 0;
1641}
1642EXPORT_SYMBOL(rtnl_configure_link);
1643
1644struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1645	char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
 
 
 
1646{
1647	int err;
1648	struct net_device *dev;
1649	unsigned int num_queues = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1650
1651	if (ops->get_tx_queues) {
1652		err = ops->get_tx_queues(src_net, tb);
1653		if (err < 0)
1654			goto err;
1655		num_queues = err;
 
 
 
 
1656	}
1657
1658	err = -ENOMEM;
1659	dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
1660	if (!dev)
1661		goto err;
1662
1663	dev_net_set(dev, net);
1664	dev->rtnl_link_ops = ops;
1665	dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1666
1667	if (tb[IFLA_MTU])
1668		dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1669	if (tb[IFLA_ADDRESS])
1670		memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
1671				nla_len(tb[IFLA_ADDRESS]));
 
 
 
 
 
 
 
 
 
 
 
1672	if (tb[IFLA_BROADCAST])
1673		memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
1674				nla_len(tb[IFLA_BROADCAST]));
1675	if (tb[IFLA_TXQLEN])
1676		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
1677	if (tb[IFLA_OPERSTATE])
1678		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
1679	if (tb[IFLA_LINKMODE])
1680		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
1681	if (tb[IFLA_GROUP])
1682		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
 
 
 
 
 
 
1683
1684	return dev;
1685
1686err:
1687	return ERR_PTR(err);
1688}
1689EXPORT_SYMBOL(rtnl_create_link);
1690
1691static int rtnl_group_changelink(struct net *net, int group,
 
1692		struct ifinfomsg *ifm,
 
1693		struct nlattr **tb)
1694{
1695	struct net_device *dev;
1696	int err;
1697
1698	for_each_netdev(net, dev) {
1699		if (dev->group == group) {
1700			err = do_setlink(dev, ifm, tb, NULL, 0);
1701			if (err < 0)
1702				return err;
1703		}
1704	}
1705
1706	return 0;
1707}
1708
1709static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 
 
 
1710{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1711	struct net *net = sock_net(skb->sk);
1712	const struct rtnl_link_ops *ops;
 
 
1713	struct net_device *dev;
1714	struct ifinfomsg *ifm;
1715	char kind[MODULE_NAME_LEN];
1716	char ifname[IFNAMSIZ];
1717	struct nlattr *tb[IFLA_MAX+1];
1718	struct nlattr *linkinfo[IFLA_INFO_MAX+1];
1719	int err;
1720
1721#ifdef CONFIG_MODULES
1722replay:
1723#endif
1724	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
 
1725	if (err < 0)
1726		return err;
1727
1728	if (tb[IFLA_IFNAME])
1729		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
1730	else
1731		ifname[0] = '\0';
1732
1733	ifm = nlmsg_data(nlh);
1734	if (ifm->ifi_index > 0)
 
1735		dev = __dev_get_by_index(net, ifm->ifi_index);
1736	else {
1737		if (ifname[0])
1738			dev = __dev_get_by_name(net, ifname);
1739		else
1740			dev = NULL;
 
 
 
 
 
 
 
 
 
1741	}
1742
1743	err = validate_linkmsg(dev, tb);
1744	if (err < 0)
1745		return err;
1746
1747	if (tb[IFLA_LINKINFO]) {
1748		err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
1749				       tb[IFLA_LINKINFO], ifla_info_policy);
 
1750		if (err < 0)
1751			return err;
1752	} else
1753		memset(linkinfo, 0, sizeof(linkinfo));
1754
1755	if (linkinfo[IFLA_INFO_KIND]) {
1756		nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
1757		ops = rtnl_link_ops_get(kind);
1758	} else {
1759		kind[0] = '\0';
1760		ops = NULL;
1761	}
1762
1763	if (1) {
1764		struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
1765		struct net *dest_net;
1766
1767		if (ops) {
1768			if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
1769				err = nla_parse_nested(attr, ops->maxtype,
1770						       linkinfo[IFLA_INFO_DATA],
1771						       ops->policy);
1772				if (err < 0)
1773					return err;
1774				data = attr;
1775			}
1776			if (ops->validate) {
1777				err = ops->validate(tb, data);
1778				if (err < 0)
1779					return err;
1780			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1781		}
 
 
 
 
1782
1783		if (dev) {
1784			int modified = 0;
 
 
1785
1786			if (nlh->nlmsg_flags & NLM_F_EXCL)
1787				return -EEXIST;
1788			if (nlh->nlmsg_flags & NLM_F_REPLACE)
1789				return -EOPNOTSUPP;
1790
1791			if (linkinfo[IFLA_INFO_DATA]) {
1792				if (!ops || ops != dev->rtnl_link_ops ||
1793				    !ops->changelink)
1794					return -EOPNOTSUPP;
 
1795
1796				err = ops->changelink(dev, tb, data);
1797				if (err < 0)
1798					return err;
1799				modified = 1;
1800			}
1801
1802			return do_setlink(dev, ifm, tb, ifname, modified);
 
 
 
 
1803		}
1804
1805		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1806			if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
1807				return rtnl_group_changelink(net,
 
 
 
 
 
 
 
 
1808						nla_get_u32(tb[IFLA_GROUP]),
1809						ifm, tb);
1810			return -ENODEV;
1811		}
1812
1813		if (ifm->ifi_index)
1814			return -EOPNOTSUPP;
1815		if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
1816			return -EOPNOTSUPP;
1817
1818		if (!ops) {
1819#ifdef CONFIG_MODULES
1820			if (kind[0]) {
1821				__rtnl_unlock();
1822				request_module("rtnl-link-%s", kind);
1823				rtnl_lock();
1824				ops = rtnl_link_ops_get(kind);
1825				if (ops)
1826					goto replay;
1827			}
1828#endif
1829			return -EOPNOTSUPP;
1830		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1831
1832		if (!ifname[0])
1833			snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
 
 
1834
1835		dest_net = rtnl_link_get_net(net, tb);
1836		if (IS_ERR(dest_net))
1837			return PTR_ERR(dest_net);
 
 
 
 
1838
1839		dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
 
 
 
1840
1841		if (IS_ERR(dev))
1842			err = PTR_ERR(dev);
1843		else if (ops->newlink)
1844			err = ops->newlink(net, dev, tb, data);
1845		else
1846			err = register_netdevice(dev);
1847
1848		if (err < 0 && !IS_ERR(dev))
1849			free_netdev(dev);
1850		if (err < 0)
1851			goto out;
 
 
1852
1853		err = rtnl_configure_link(dev, ifm);
1854		if (err < 0)
1855			unregister_netdevice(dev);
1856out:
1857		put_net(dest_net);
1858		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1859	}
 
 
1860}
1861
1862static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 
1863{
1864	struct net *net = sock_net(skb->sk);
 
1865	struct ifinfomsg *ifm;
1866	char ifname[IFNAMSIZ];
1867	struct nlattr *tb[IFLA_MAX+1];
1868	struct net_device *dev = NULL;
1869	struct sk_buff *nskb;
 
1870	int err;
1871	u32 ext_filter_mask = 0;
1872
1873	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
 
 
 
 
1874	if (err < 0)
1875		return err;
1876
1877	if (tb[IFLA_IFNAME])
1878		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
 
 
 
 
1879
1880	if (tb[IFLA_EXT_MASK])
1881		ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1882
 
1883	ifm = nlmsg_data(nlh);
1884	if (ifm->ifi_index > 0)
1885		dev = __dev_get_by_index(net, ifm->ifi_index);
1886	else if (tb[IFLA_IFNAME])
1887		dev = __dev_get_by_name(net, ifname);
1888	else
1889		return -EINVAL;
1890
 
1891	if (dev == NULL)
1892		return -ENODEV;
1893
 
1894	nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
1895	if (nskb == NULL)
1896		return -ENOBUFS;
1897
1898	err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
1899			       nlh->nlmsg_seq, 0, 0, ext_filter_mask);
 
 
1900	if (err < 0) {
1901		/* -EMSGSIZE implies BUG in if_nlmsg_size */
1902		WARN_ON(err == -EMSGSIZE);
1903		kfree_skb(nskb);
1904	} else
1905		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1907	return err;
1908}
1909
1910static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
 
1911{
1912	struct net *net = sock_net(skb->sk);
 
1913	struct net_device *dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1914	struct nlattr *tb[IFLA_MAX+1];
1915	u32 ext_filter_mask = 0;
1916	u16 min_ifinfo_dump_size = 0;
 
1917
1918	if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1919			ifla_policy) >= 0) {
 
 
 
1920		if (tb[IFLA_EXT_MASK])
1921			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1922	}
1923
1924	if (!ext_filter_mask)
1925		return NLMSG_GOODSIZE;
1926	/*
1927	 * traverse the list of net devices and compute the minimum
1928	 * buffer size based upon the filter mask.
1929	 */
1930	list_for_each_entry(dev, &net->dev_base_head, dev_list) {
1931		min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
1932					     if_nlmsg_size(dev,
1933						           ext_filter_mask));
1934	}
 
1935
1936	return min_ifinfo_dump_size;
1937}
1938
1939static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1940{
1941	int idx;
1942	int s_idx = cb->family;
 
 
1943
1944	if (s_idx == 0)
1945		s_idx = 1;
 
1946	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
1947		int type = cb->nlh->nlmsg_type-RTM_BASE;
 
 
 
1948		if (idx < s_idx || idx == PF_PACKET)
1949			continue;
1950		if (rtnl_msg_handlers[idx] == NULL ||
1951		    rtnl_msg_handlers[idx][type].dumpit == NULL)
 
 
 
 
1952			continue;
1953		if (idx > s_idx)
 
 
 
 
 
 
 
 
 
1954			memset(&cb->args[0], 0, sizeof(cb->args));
1955		if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
 
 
 
 
1956			break;
1957	}
1958	cb->family = idx;
1959
1960	return skb->len;
1961}
1962
1963void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
 
 
 
1964{
1965	struct net *net = dev_net(dev);
1966	struct sk_buff *skb;
1967	int err = -ENOBUFS;
1968	size_t if_info_size;
1969
1970	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
1971	if (skb == NULL)
1972		goto errout;
1973
1974	err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
 
 
1975	if (err < 0) {
1976		/* -EMSGSIZE implies BUG in if_nlmsg_size() */
1977		WARN_ON(err == -EMSGSIZE);
1978		kfree_skb(skb);
1979		goto errout;
1980	}
1981	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
1982	return;
1983errout:
1984	if (err < 0)
1985		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1986}
1987
1988static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
1989				   struct net_device *dev,
1990				   u8 *addr, u32 pid, u32 seq,
1991				   int type, unsigned int flags)
 
1992{
1993	struct nlmsghdr *nlh;
1994	struct ndmsg *ndm;
1995
1996	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
1997	if (!nlh)
1998		return -EMSGSIZE;
1999
2000	ndm = nlmsg_data(nlh);
2001	ndm->ndm_family  = AF_BRIDGE;
2002	ndm->ndm_pad1	 = 0;
2003	ndm->ndm_pad2    = 0;
2004	ndm->ndm_flags	 = flags;
2005	ndm->ndm_type	 = 0;
2006	ndm->ndm_ifindex = dev->ifindex;
2007	ndm->ndm_state   = NUD_PERMANENT;
2008
2009	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2010		goto nla_put_failure;
 
 
 
2011
2012	return nlmsg_end(skb, nlh);
 
2013
2014nla_put_failure:
2015	nlmsg_cancel(skb, nlh);
2016	return -EMSGSIZE;
2017}
2018
2019static inline size_t rtnl_fdb_nlmsg_size(void)
2020{
2021	return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
 
 
 
2022}
2023
2024static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
 
2025{
2026	struct net *net = dev_net(dev);
2027	struct sk_buff *skb;
2028	int err = -ENOBUFS;
2029
2030	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
2031	if (!skb)
2032		goto errout;
2033
2034	err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
 
2035	if (err < 0) {
2036		kfree_skb(skb);
2037		goto errout;
2038	}
2039
2040	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2041	return;
2042errout:
2043	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2044}
2045
2046static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2047{
2048	struct net *net = sock_net(skb->sk);
2049	struct net_device *master = NULL;
2050	struct ndmsg *ndm;
2051	struct nlattr *tb[NDA_MAX+1];
2052	struct net_device *dev;
2053	u8 *addr;
 
2054	int err;
2055
2056	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
 
2057	if (err < 0)
2058		return err;
2059
2060	ndm = nlmsg_data(nlh);
2061	if (ndm->ndm_ifindex == 0) {
2062		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
2063		return -EINVAL;
2064	}
2065
2066	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2067	if (dev == NULL) {
2068		pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
2069		return -ENODEV;
2070	}
2071
2072	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
2073		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
2074		return -EINVAL;
2075	}
2076
2077	addr = nla_data(tb[NDA_LLADDR]);
2078	if (!is_valid_ether_addr(addr)) {
2079		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
2080		return -EINVAL;
2081	}
2082
 
 
 
 
 
 
2083	err = -EOPNOTSUPP;
2084
2085	/* Support fdb on master device the net/bridge default case */
2086	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2087	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
2088		master = dev->master;
2089		err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
2090						      nlh->nlmsg_flags);
 
 
2091		if (err)
2092			goto out;
2093		else
2094			ndm->ndm_flags &= ~NTF_MASTER;
2095	}
2096
2097	/* Embedded bridge, macvlan, and any other device support */
2098	if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
2099		err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
2100						   nlh->nlmsg_flags);
 
 
 
 
 
 
2101
2102		if (!err) {
2103			rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
 
2104			ndm->ndm_flags &= ~NTF_SELF;
2105		}
2106	}
2107out:
2108	return err;
2109}
2110
2111static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112{
 
2113	struct net *net = sock_net(skb->sk);
 
2114	struct ndmsg *ndm;
2115	struct nlattr *llattr;
2116	struct net_device *dev;
2117	int err = -EINVAL;
2118	__u8 *addr;
 
 
 
 
2119
2120	if (nlmsg_len(nlh) < sizeof(*ndm))
2121		return -EINVAL;
 
 
 
 
 
 
 
2122
2123	ndm = nlmsg_data(nlh);
2124	if (ndm->ndm_ifindex == 0) {
2125		pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
2126		return -EINVAL;
2127	}
2128
2129	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2130	if (dev == NULL) {
2131		pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
2132		return -ENODEV;
2133	}
2134
2135	llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
2136	if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
2137		pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n");
 
 
 
 
 
 
 
2138		return -EINVAL;
2139	}
2140
2141	addr = nla_data(llattr);
 
 
 
2142	err = -EOPNOTSUPP;
2143
2144	/* Support fdb on master device the net/bridge default case */
2145	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2146	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
2147		struct net_device *master = dev->master;
2148
2149		if (master->netdev_ops->ndo_fdb_del)
2150			err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr);
 
 
 
 
 
 
 
2151
2152		if (err)
2153			goto out;
2154		else
2155			ndm->ndm_flags &= ~NTF_MASTER;
2156	}
2157
2158	/* Embedded bridge, macvlan, and any other device support */
2159	if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
2160		err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr);
 
 
 
 
 
 
 
 
 
 
 
 
2161
2162		if (!err) {
2163			rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
 
 
2164			ndm->ndm_flags &= ~NTF_SELF;
2165		}
2166	}
2167out:
2168	return err;
2169}
2170
2171static int nlmsg_populate_fdb(struct sk_buff *skb,
2172			      struct netlink_callback *cb,
2173			      struct net_device *dev,
2174			      int *idx,
2175			      struct netdev_hw_addr_list *list)
2176{
2177	struct netdev_hw_addr *ha;
2178	int err;
2179	u32 pid, seq;
2180
2181	pid = NETLINK_CB(cb->skb).pid;
2182	seq = cb->nlh->nlmsg_seq;
2183
2184	list_for_each_entry(ha, &list->list, list) {
2185		if (*idx < cb->args[0])
2186			goto skip;
2187
2188		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2189					      pid, seq, 0, NTF_SELF);
 
 
2190		if (err < 0)
2191			return err;
2192skip:
2193		*idx += 1;
2194	}
2195	return 0;
2196}
2197
2198/**
2199 * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
2200 * @nlh: netlink message header
 
2201 * @dev: netdevice
 
 
2202 *
2203 * Default netdevice operation to dump the existing unicast address list.
2204 * Returns zero on success.
2205 */
2206int ndo_dflt_fdb_dump(struct sk_buff *skb,
2207		      struct netlink_callback *cb,
2208		      struct net_device *dev,
2209		      int idx)
 
2210{
2211	int err;
2212
 
 
 
2213	netif_addr_lock_bh(dev);
2214	err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
2215	if (err)
2216		goto out;
2217	nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
2218out:
2219	netif_addr_unlock_bh(dev);
2220	return idx;
2221}
2222EXPORT_SYMBOL(ndo_dflt_fdb_dump);
2223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
2225{
2226	int idx = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227	struct net *net = sock_net(skb->sk);
2228	struct net_device *dev;
 
 
 
 
 
 
 
 
 
 
2229
2230	rcu_read_lock();
2231	for_each_netdev_rcu(net, dev) {
2232		if (dev->priv_flags & IFF_BRIDGE_PORT) {
2233			struct net_device *master = dev->master;
2234			const struct net_device_ops *ops = master->netdev_ops;
2235
2236			if (ops->ndo_fdb_dump)
2237				idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
 
 
 
 
 
 
 
 
 
 
 
2238		}
2239
2240		if (dev->netdev_ops->ndo_fdb_dump)
2241			idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
2242	}
 
 
2243	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2244
2245	cb->args[0] = idx;
2246	return skb->len;
2247}
2248
2249/* Protected by RTNL sempahore.  */
2250static struct rtattr **rta_buf;
2251static int rtattr_max;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252
2253/* Process one rtnetlink message. */
2254
2255static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
2256{
2257	struct net *net = sock_net(skb->sk);
 
 
 
 
2258	rtnl_doit_func doit;
2259	int sz_idx, kind;
2260	int min_len;
2261	int family;
2262	int type;
2263	int err;
2264
2265	type = nlh->nlmsg_type;
2266	if (type > RTM_MAX)
2267		return -EOPNOTSUPP;
2268
2269	type -= RTM_BASE;
2270
2271	/* All the messages must have at least 1 byte length */
2272	if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
2273		return 0;
2274
2275	family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
2276	sz_idx = type>>2;
2277	kind = type&3;
2278
2279	if (kind != 2 && !capable(CAP_NET_ADMIN))
2280		return -EPERM;
2281
2282	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
 
2283		struct sock *rtnl;
2284		rtnl_dumpit_func dumpit;
2285		rtnl_calcit_func calcit;
2286		u16 min_dump_alloc = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2287
2288		dumpit = rtnl_get_dumpit(family, type);
2289		if (dumpit == NULL)
2290			return -EOPNOTSUPP;
2291		calcit = rtnl_get_calcit(family, type);
2292		if (calcit)
2293			min_dump_alloc = calcit(skb, nlh);
2294
2295		__rtnl_unlock();
2296		rtnl = net->rtnl;
2297		{
2298			struct netlink_dump_control c = {
2299				.dump		= dumpit,
2300				.min_dump_alloc	= min_dump_alloc,
 
2301			};
2302			err = netlink_dump_start(rtnl, skb, nlh, &c);
 
 
 
 
2303		}
2304		rtnl_lock();
2305		return err;
2306	}
2307
2308	memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2309
2310	min_len = rtm_min[sz_idx];
2311	if (nlh->nlmsg_len < min_len)
2312		return -EINVAL;
 
 
2313
2314	if (nlh->nlmsg_len > min_len) {
2315		int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
2316		struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2317
2318		while (RTA_OK(attr, attrlen)) {
2319			unsigned int flavor = attr->rta_type;
2320			if (flavor) {
2321				if (flavor > rta_max[sz_idx])
2322					return -EINVAL;
2323				rta_buf[flavor-1] = attr;
2324			}
2325			attr = RTA_NEXT(attr, attrlen);
2326		}
2327	}
2328
2329	doit = rtnl_get_doit(family, type);
2330	if (doit == NULL)
2331		return -EOPNOTSUPP;
2332
2333	return doit(skb, nlh, (void *)&rta_buf[0]);
 
 
2334}
2335
2336static void rtnetlink_rcv(struct sk_buff *skb)
2337{
2338	rtnl_lock();
2339	netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
2340	rtnl_unlock();
 
 
 
 
 
 
 
 
 
 
 
2341}
2342
2343static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
2344{
2345	struct net_device *dev = ptr;
2346
2347	switch (event) {
2348	case NETDEV_UP:
2349	case NETDEV_DOWN:
2350	case NETDEV_PRE_UP:
2351	case NETDEV_POST_INIT:
2352	case NETDEV_REGISTER:
2353	case NETDEV_CHANGE:
2354	case NETDEV_PRE_TYPE_CHANGE:
2355	case NETDEV_GOING_DOWN:
2356	case NETDEV_UNREGISTER:
2357	case NETDEV_UNREGISTER_BATCH:
2358	case NETDEV_RELEASE:
2359	case NETDEV_JOIN:
 
 
 
2360		break;
2361	default:
2362		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
2363		break;
2364	}
2365	return NOTIFY_DONE;
2366}
2367
2368static struct notifier_block rtnetlink_dev_notifier = {
2369	.notifier_call	= rtnetlink_event,
2370};
2371
2372
2373static int __net_init rtnetlink_net_init(struct net *net)
2374{
2375	struct sock *sk;
2376	sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
2377				   rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
 
 
 
 
 
 
 
2378	if (!sk)
2379		return -ENOMEM;
2380	net->rtnl = sk;
2381	return 0;
2382}
2383
2384static void __net_exit rtnetlink_net_exit(struct net *net)
2385{
2386	netlink_kernel_release(net->rtnl);
2387	net->rtnl = NULL;
2388}
2389
2390static struct pernet_operations rtnetlink_net_ops = {
2391	.init = rtnetlink_net_init,
2392	.exit = rtnetlink_net_exit,
2393};
2394
2395void __init rtnetlink_init(void)
2396{
2397	int i;
2398
2399	rtattr_max = 0;
2400	for (i = 0; i < ARRAY_SIZE(rta_max); i++)
2401		if (rta_max[i] > rtattr_max)
2402			rtattr_max = rta_max[i];
2403	rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
2404	if (!rta_buf)
2405		panic("rtnetlink_init: cannot allocate rta_buf\n");
2406
2407	if (register_pernet_subsys(&rtnetlink_net_ops))
2408		panic("rtnetlink_init: cannot initialize rtnetlink\n");
2409
2410	netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
2411	register_netdevice_notifier(&rtnetlink_dev_notifier);
2412
2413	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
2414		      rtnl_dump_ifinfo, rtnl_calcit);
2415	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
2416	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
2417	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
2418
2419	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
2420	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
2421
2422	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
2423	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
2424	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
2425}
2426