Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	NET3	IP device support routines.
   4 *
   5 *	Derived from the IP parts of dev.c 1.0.19
   6 * 		Authors:	Ross Biro
   7 *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   8 *				Mark Evans, <evansmp@uhura.aston.ac.uk>
   9 *
  10 *	Additional Authors:
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  13 *
  14 *	Changes:
  15 *		Alexey Kuznetsov:	pa_* fields are replaced with ifaddr
  16 *					lists.
  17 *		Cyrus Durgin:		updated for kmod
  18 *		Matthias Andree:	in devinet_ioctl, compare label and
  19 *					address (4.4BSD alias style support),
  20 *					fall back to comparing just the label
  21 *					if no match found.
  22 */
  23
  24
  25#include <linux/uaccess.h>
  26#include <linux/bitops.h>
  27#include <linux/capability.h>
  28#include <linux/module.h>
  29#include <linux/types.h>
  30#include <linux/kernel.h>
  31#include <linux/sched/signal.h>
  32#include <linux/string.h>
  33#include <linux/mm.h>
  34#include <linux/socket.h>
  35#include <linux/sockios.h>
  36#include <linux/in.h>
  37#include <linux/errno.h>
  38#include <linux/interrupt.h>
  39#include <linux/if_addr.h>
  40#include <linux/if_ether.h>
  41#include <linux/inet.h>
  42#include <linux/netdevice.h>
  43#include <linux/etherdevice.h>
  44#include <linux/skbuff.h>
  45#include <linux/init.h>
  46#include <linux/notifier.h>
  47#include <linux/inetdevice.h>
  48#include <linux/igmp.h>
  49#include <linux/slab.h>
  50#include <linux/hash.h>
  51#ifdef CONFIG_SYSCTL
  52#include <linux/sysctl.h>
  53#endif
  54#include <linux/kmod.h>
  55#include <linux/netconf.h>
  56
  57#include <net/arp.h>
  58#include <net/ip.h>
  59#include <net/route.h>
  60#include <net/ip_fib.h>
  61#include <net/rtnetlink.h>
  62#include <net/net_namespace.h>
  63#include <net/addrconf.h>
  64
  65#define IPV6ONLY_FLAGS	\
  66		(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
  67		 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
  68		 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
  69
  70static struct ipv4_devconf ipv4_devconf = {
  71	.data = {
  72		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  73		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  74		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  75		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  76		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  77		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
  78	},
  79};
  80
  81static struct ipv4_devconf ipv4_devconf_dflt = {
  82	.data = {
  83		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  84		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  85		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  86		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  87		[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
  88		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  89		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
  90	},
  91};
  92
  93#define IPV4_DEVCONF_DFLT(net, attr) \
  94	IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
  95
  96static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
  97	[IFA_LOCAL]     	= { .type = NLA_U32 },
  98	[IFA_ADDRESS]   	= { .type = NLA_U32 },
  99	[IFA_BROADCAST] 	= { .type = NLA_U32 },
 100	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 101	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
 102	[IFA_FLAGS]		= { .type = NLA_U32 },
 103	[IFA_RT_PRIORITY]	= { .type = NLA_U32 },
 104	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
 105};
 106
 107struct inet_fill_args {
 108	u32 portid;
 109	u32 seq;
 110	int event;
 111	unsigned int flags;
 112	int netnsid;
 113	int ifindex;
 114};
 115
 116#define IN4_ADDR_HSIZE_SHIFT	8
 117#define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
 118
 119static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 120
 121static u32 inet_addr_hash(const struct net *net, __be32 addr)
 122{
 123	u32 val = (__force u32) addr ^ net_hash_mix(net);
 124
 125	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 126}
 127
 128static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 129{
 130	u32 hash = inet_addr_hash(net, ifa->ifa_local);
 131
 132	ASSERT_RTNL();
 133	hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
 134}
 135
 136static void inet_hash_remove(struct in_ifaddr *ifa)
 137{
 138	ASSERT_RTNL();
 139	hlist_del_init_rcu(&ifa->hash);
 140}
 141
 142/**
 143 * __ip_dev_find - find the first device with a given source address.
 144 * @net: the net namespace
 145 * @addr: the source address
 146 * @devref: if true, take a reference on the found device
 147 *
 148 * If a caller uses devref=false, it should be protected by RCU, or RTNL
 149 */
 150struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 151{
 152	struct net_device *result = NULL;
 153	struct in_ifaddr *ifa;
 154
 155	rcu_read_lock();
 156	ifa = inet_lookup_ifaddr_rcu(net, addr);
 157	if (!ifa) {
 158		struct flowi4 fl4 = { .daddr = addr };
 159		struct fib_result res = { 0 };
 160		struct fib_table *local;
 161
 162		/* Fallback to FIB local table so that communication
 163		 * over loopback subnets work.
 164		 */
 165		local = fib_get_table(net, RT_TABLE_LOCAL);
 166		if (local &&
 167		    !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
 168		    res.type == RTN_LOCAL)
 169			result = FIB_RES_DEV(res);
 170	} else {
 171		result = ifa->ifa_dev->dev;
 172	}
 173	if (result && devref)
 174		dev_hold(result);
 175	rcu_read_unlock();
 176	return result;
 177}
 178EXPORT_SYMBOL(__ip_dev_find);
 179
 180/* called under RCU lock */
 181struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
 182{
 183	u32 hash = inet_addr_hash(net, addr);
 184	struct in_ifaddr *ifa;
 185
 186	hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash)
 187		if (ifa->ifa_local == addr &&
 188		    net_eq(dev_net(ifa->ifa_dev->dev), net))
 189			return ifa;
 190
 191	return NULL;
 192}
 193
 194static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 195
 196static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
 197static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
 198static void inet_del_ifa(struct in_device *in_dev,
 199			 struct in_ifaddr __rcu **ifap,
 200			 int destroy);
 201#ifdef CONFIG_SYSCTL
 202static int devinet_sysctl_register(struct in_device *idev);
 203static void devinet_sysctl_unregister(struct in_device *idev);
 204#else
 205static int devinet_sysctl_register(struct in_device *idev)
 206{
 207	return 0;
 208}
 209static void devinet_sysctl_unregister(struct in_device *idev)
 210{
 211}
 212#endif
 213
 214/* Locks all the inet devices. */
 215
 216static struct in_ifaddr *inet_alloc_ifa(void)
 217{
 218	return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
 219}
 220
 221static void inet_rcu_free_ifa(struct rcu_head *head)
 222{
 223	struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
 224	if (ifa->ifa_dev)
 225		in_dev_put(ifa->ifa_dev);
 226	kfree(ifa);
 227}
 228
 229static void inet_free_ifa(struct in_ifaddr *ifa)
 230{
 231	call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 232}
 233
 234void in_dev_finish_destroy(struct in_device *idev)
 235{
 236	struct net_device *dev = idev->dev;
 237
 238	WARN_ON(idev->ifa_list);
 239	WARN_ON(idev->mc_list);
 240	kfree(rcu_dereference_protected(idev->mc_hash, 1));
 241#ifdef NET_REFCNT_DEBUG
 242	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
 243#endif
 244	dev_put(dev);
 245	if (!idev->dead)
 246		pr_err("Freeing alive in_device %p\n", idev);
 247	else
 248		kfree(idev);
 249}
 250EXPORT_SYMBOL(in_dev_finish_destroy);
 251
 252static struct in_device *inetdev_init(struct net_device *dev)
 253{
 254	struct in_device *in_dev;
 255	int err = -ENOMEM;
 256
 257	ASSERT_RTNL();
 258
 259	in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
 260	if (!in_dev)
 261		goto out;
 262	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
 263			sizeof(in_dev->cnf));
 264	in_dev->cnf.sysctl = NULL;
 265	in_dev->dev = dev;
 266	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
 267	if (!in_dev->arp_parms)
 268		goto out_kfree;
 269	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
 270		dev_disable_lro(dev);
 271	/* Reference in_dev->dev */
 272	dev_hold(dev);
 273	/* Account for reference dev->ip_ptr (below) */
 274	refcount_set(&in_dev->refcnt, 1);
 275
 276	err = devinet_sysctl_register(in_dev);
 277	if (err) {
 278		in_dev->dead = 1;
 279		neigh_parms_release(&arp_tbl, in_dev->arp_parms);
 280		in_dev_put(in_dev);
 281		in_dev = NULL;
 282		goto out;
 283	}
 284	ip_mc_init_dev(in_dev);
 285	if (dev->flags & IFF_UP)
 286		ip_mc_up(in_dev);
 287
 288	/* we can receive as soon as ip_ptr is set -- do this last */
 289	rcu_assign_pointer(dev->ip_ptr, in_dev);
 290out:
 291	return in_dev ?: ERR_PTR(err);
 292out_kfree:
 293	kfree(in_dev);
 294	in_dev = NULL;
 295	goto out;
 296}
 297
 298static void in_dev_rcu_put(struct rcu_head *head)
 299{
 300	struct in_device *idev = container_of(head, struct in_device, rcu_head);
 301	in_dev_put(idev);
 302}
 303
 304static void inetdev_destroy(struct in_device *in_dev)
 305{
 306	struct net_device *dev;
 307	struct in_ifaddr *ifa;
 308
 309	ASSERT_RTNL();
 310
 311	dev = in_dev->dev;
 312
 313	in_dev->dead = 1;
 314
 315	ip_mc_destroy_dev(in_dev);
 316
 317	while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
 318		inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
 319		inet_free_ifa(ifa);
 320	}
 321
 322	RCU_INIT_POINTER(dev->ip_ptr, NULL);
 323
 324	devinet_sysctl_unregister(in_dev);
 325	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
 326	arp_ifdown(dev);
 327
 328	call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
 329}
 330
 331int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
 332{
 333	const struct in_ifaddr *ifa;
 334
 335	rcu_read_lock();
 336	in_dev_for_each_ifa_rcu(ifa, in_dev) {
 337		if (inet_ifa_match(a, ifa)) {
 338			if (!b || inet_ifa_match(b, ifa)) {
 339				rcu_read_unlock();
 340				return 1;
 341			}
 342		}
 343	}
 344	rcu_read_unlock();
 345	return 0;
 346}
 347
 348static void __inet_del_ifa(struct in_device *in_dev,
 349			   struct in_ifaddr __rcu **ifap,
 350			   int destroy, struct nlmsghdr *nlh, u32 portid)
 351{
 352	struct in_ifaddr *promote = NULL;
 353	struct in_ifaddr *ifa, *ifa1;
 354	struct in_ifaddr *last_prim;
 355	struct in_ifaddr *prev_prom = NULL;
 356	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
 357
 358	ASSERT_RTNL();
 359
 360	ifa1 = rtnl_dereference(*ifap);
 361	last_prim = rtnl_dereference(in_dev->ifa_list);
 362	if (in_dev->dead)
 363		goto no_promotions;
 364
 365	/* 1. Deleting primary ifaddr forces deletion all secondaries
 366	 * unless alias promotion is set
 367	 **/
 368
 369	if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
 370		struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
 371
 372		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
 373			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
 374			    ifa1->ifa_scope <= ifa->ifa_scope)
 375				last_prim = ifa;
 376
 377			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
 378			    ifa1->ifa_mask != ifa->ifa_mask ||
 379			    !inet_ifa_match(ifa1->ifa_address, ifa)) {
 380				ifap1 = &ifa->ifa_next;
 381				prev_prom = ifa;
 382				continue;
 383			}
 384
 385			if (!do_promote) {
 386				inet_hash_remove(ifa);
 387				*ifap1 = ifa->ifa_next;
 388
 389				rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
 390				blocking_notifier_call_chain(&inetaddr_chain,
 391						NETDEV_DOWN, ifa);
 392				inet_free_ifa(ifa);
 393			} else {
 394				promote = ifa;
 395				break;
 396			}
 397		}
 398	}
 399
 400	/* On promotion all secondaries from subnet are changing
 401	 * the primary IP, we must remove all their routes silently
 402	 * and later to add them back with new prefsrc. Do this
 403	 * while all addresses are on the device list.
 404	 */
 405	for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
 406		if (ifa1->ifa_mask == ifa->ifa_mask &&
 407		    inet_ifa_match(ifa1->ifa_address, ifa))
 408			fib_del_ifaddr(ifa, ifa1);
 409	}
 410
 411no_promotions:
 412	/* 2. Unlink it */
 413
 414	*ifap = ifa1->ifa_next;
 415	inet_hash_remove(ifa1);
 416
 417	/* 3. Announce address deletion */
 418
 419	/* Send message first, then call notifier.
 420	   At first sight, FIB update triggered by notifier
 421	   will refer to already deleted ifaddr, that could confuse
 422	   netlink listeners. It is not true: look, gated sees
 423	   that route deleted and if it still thinks that ifaddr
 424	   is valid, it will try to restore deleted routes... Grr.
 425	   So that, this order is correct.
 426	 */
 427	rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
 428	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
 429
 430	if (promote) {
 431		struct in_ifaddr *next_sec;
 432
 433		next_sec = rtnl_dereference(promote->ifa_next);
 434		if (prev_prom) {
 435			struct in_ifaddr *last_sec;
 436
 437			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
 438
 439			last_sec = rtnl_dereference(last_prim->ifa_next);
 440			rcu_assign_pointer(promote->ifa_next, last_sec);
 441			rcu_assign_pointer(last_prim->ifa_next, promote);
 442		}
 443
 444		promote->ifa_flags &= ~IFA_F_SECONDARY;
 445		rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
 446		blocking_notifier_call_chain(&inetaddr_chain,
 447				NETDEV_UP, promote);
 448		for (ifa = next_sec; ifa;
 449		     ifa = rtnl_dereference(ifa->ifa_next)) {
 450			if (ifa1->ifa_mask != ifa->ifa_mask ||
 451			    !inet_ifa_match(ifa1->ifa_address, ifa))
 452					continue;
 453			fib_add_ifaddr(ifa);
 454		}
 455
 456	}
 457	if (destroy)
 458		inet_free_ifa(ifa1);
 459}
 460
 461static void inet_del_ifa(struct in_device *in_dev,
 462			 struct in_ifaddr __rcu **ifap,
 463			 int destroy)
 464{
 465	__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
 466}
 467
 468static void check_lifetime(struct work_struct *work);
 469
 470static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
 471
 472static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
 473			     u32 portid, struct netlink_ext_ack *extack)
 474{
 475	struct in_ifaddr __rcu **last_primary, **ifap;
 476	struct in_device *in_dev = ifa->ifa_dev;
 477	struct in_validator_info ivi;
 478	struct in_ifaddr *ifa1;
 479	int ret;
 480
 481	ASSERT_RTNL();
 482
 483	if (!ifa->ifa_local) {
 484		inet_free_ifa(ifa);
 485		return 0;
 486	}
 487
 488	ifa->ifa_flags &= ~IFA_F_SECONDARY;
 489	last_primary = &in_dev->ifa_list;
 490
 491	/* Don't set IPv6 only flags to IPv4 addresses */
 492	ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
 493
 494	ifap = &in_dev->ifa_list;
 495	ifa1 = rtnl_dereference(*ifap);
 496
 497	while (ifa1) {
 498		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
 499		    ifa->ifa_scope <= ifa1->ifa_scope)
 500			last_primary = &ifa1->ifa_next;
 501		if (ifa1->ifa_mask == ifa->ifa_mask &&
 502		    inet_ifa_match(ifa1->ifa_address, ifa)) {
 503			if (ifa1->ifa_local == ifa->ifa_local) {
 504				inet_free_ifa(ifa);
 505				return -EEXIST;
 506			}
 507			if (ifa1->ifa_scope != ifa->ifa_scope) {
 508				inet_free_ifa(ifa);
 509				return -EINVAL;
 510			}
 511			ifa->ifa_flags |= IFA_F_SECONDARY;
 512		}
 513
 514		ifap = &ifa1->ifa_next;
 515		ifa1 = rtnl_dereference(*ifap);
 516	}
 517
 518	/* Allow any devices that wish to register ifaddr validtors to weigh
 519	 * in now, before changes are committed.  The rntl lock is serializing
 520	 * access here, so the state should not change between a validator call
 521	 * and a final notify on commit.  This isn't invoked on promotion under
 522	 * the assumption that validators are checking the address itself, and
 523	 * not the flags.
 524	 */
 525	ivi.ivi_addr = ifa->ifa_address;
 526	ivi.ivi_dev = ifa->ifa_dev;
 527	ivi.extack = extack;
 528	ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
 529					   NETDEV_UP, &ivi);
 530	ret = notifier_to_errno(ret);
 531	if (ret) {
 532		inet_free_ifa(ifa);
 533		return ret;
 534	}
 535
 536	if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
 537		prandom_seed((__force u32) ifa->ifa_local);
 538		ifap = last_primary;
 539	}
 540
 541	rcu_assign_pointer(ifa->ifa_next, *ifap);
 542	rcu_assign_pointer(*ifap, ifa);
 543
 544	inet_hash_insert(dev_net(in_dev->dev), ifa);
 545
 546	cancel_delayed_work(&check_lifetime_work);
 547	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
 548
 549	/* Send message first, then call notifier.
 550	   Notifier will trigger FIB update, so that
 551	   listeners of netlink will know about new ifaddr */
 552	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
 553	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
 554
 555	return 0;
 556}
 557
 558static int inet_insert_ifa(struct in_ifaddr *ifa)
 559{
 560	return __inet_insert_ifa(ifa, NULL, 0, NULL);
 561}
 562
 563static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
 564{
 565	struct in_device *in_dev = __in_dev_get_rtnl(dev);
 566
 567	ASSERT_RTNL();
 568
 569	if (!in_dev) {
 570		inet_free_ifa(ifa);
 571		return -ENOBUFS;
 572	}
 573	ipv4_devconf_setall(in_dev);
 574	neigh_parms_data_state_setall(in_dev->arp_parms);
 575	if (ifa->ifa_dev != in_dev) {
 576		WARN_ON(ifa->ifa_dev);
 577		in_dev_hold(in_dev);
 578		ifa->ifa_dev = in_dev;
 579	}
 580	if (ipv4_is_loopback(ifa->ifa_local))
 581		ifa->ifa_scope = RT_SCOPE_HOST;
 582	return inet_insert_ifa(ifa);
 583}
 584
 585/* Caller must hold RCU or RTNL :
 586 * We dont take a reference on found in_device
 587 */
 588struct in_device *inetdev_by_index(struct net *net, int ifindex)
 589{
 590	struct net_device *dev;
 591	struct in_device *in_dev = NULL;
 592
 593	rcu_read_lock();
 594	dev = dev_get_by_index_rcu(net, ifindex);
 595	if (dev)
 596		in_dev = rcu_dereference_rtnl(dev->ip_ptr);
 597	rcu_read_unlock();
 598	return in_dev;
 599}
 600EXPORT_SYMBOL(inetdev_by_index);
 601
 602/* Called only from RTNL semaphored context. No locks. */
 603
 604struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
 605				    __be32 mask)
 606{
 607	struct in_ifaddr *ifa;
 608
 609	ASSERT_RTNL();
 610
 611	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
 612		if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
 613			return ifa;
 614	}
 615	return NULL;
 616}
 617
 618static int ip_mc_autojoin_config(struct net *net, bool join,
 619				 const struct in_ifaddr *ifa)
 620{
 621#if defined(CONFIG_IP_MULTICAST)
 622	struct ip_mreqn mreq = {
 623		.imr_multiaddr.s_addr = ifa->ifa_address,
 624		.imr_ifindex = ifa->ifa_dev->dev->ifindex,
 625	};
 626	struct sock *sk = net->ipv4.mc_autojoin_sk;
 627	int ret;
 628
 629	ASSERT_RTNL();
 630
 631	lock_sock(sk);
 632	if (join)
 633		ret = ip_mc_join_group(sk, &mreq);
 634	else
 635		ret = ip_mc_leave_group(sk, &mreq);
 636	release_sock(sk);
 637
 638	return ret;
 639#else
 640	return -EOPNOTSUPP;
 641#endif
 642}
 643
 644static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
 645			    struct netlink_ext_ack *extack)
 646{
 647	struct net *net = sock_net(skb->sk);
 648	struct in_ifaddr __rcu **ifap;
 649	struct nlattr *tb[IFA_MAX+1];
 650	struct in_device *in_dev;
 651	struct ifaddrmsg *ifm;
 652	struct in_ifaddr *ifa;
 653
 654	int err = -EINVAL;
 655
 656	ASSERT_RTNL();
 657
 658	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 659				     ifa_ipv4_policy, extack);
 660	if (err < 0)
 661		goto errout;
 662
 663	ifm = nlmsg_data(nlh);
 664	in_dev = inetdev_by_index(net, ifm->ifa_index);
 665	if (!in_dev) {
 666		err = -ENODEV;
 667		goto errout;
 668	}
 669
 670	for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL;
 671	     ifap = &ifa->ifa_next) {
 672		if (tb[IFA_LOCAL] &&
 673		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
 674			continue;
 675
 676		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
 677			continue;
 678
 679		if (tb[IFA_ADDRESS] &&
 680		    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
 681		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
 682			continue;
 683
 684		if (ipv4_is_multicast(ifa->ifa_address))
 685			ip_mc_autojoin_config(net, false, ifa);
 686		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
 687		return 0;
 688	}
 689
 690	err = -EADDRNOTAVAIL;
 691errout:
 692	return err;
 693}
 694
 695#define INFINITY_LIFE_TIME	0xFFFFFFFF
 696
 697static void check_lifetime(struct work_struct *work)
 698{
 699	unsigned long now, next, next_sec, next_sched;
 700	struct in_ifaddr *ifa;
 701	struct hlist_node *n;
 702	int i;
 703
 704	now = jiffies;
 705	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
 706
 707	for (i = 0; i < IN4_ADDR_HSIZE; i++) {
 708		bool change_needed = false;
 709
 710		rcu_read_lock();
 711		hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
 712			unsigned long age;
 713
 714			if (ifa->ifa_flags & IFA_F_PERMANENT)
 715				continue;
 716
 717			/* We try to batch several events at once. */
 718			age = (now - ifa->ifa_tstamp +
 719			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 720
 721			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
 722			    age >= ifa->ifa_valid_lft) {
 723				change_needed = true;
 724			} else if (ifa->ifa_preferred_lft ==
 725				   INFINITY_LIFE_TIME) {
 726				continue;
 727			} else if (age >= ifa->ifa_preferred_lft) {
 728				if (time_before(ifa->ifa_tstamp +
 729						ifa->ifa_valid_lft * HZ, next))
 730					next = ifa->ifa_tstamp +
 731					       ifa->ifa_valid_lft * HZ;
 732
 733				if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
 734					change_needed = true;
 735			} else if (time_before(ifa->ifa_tstamp +
 736					       ifa->ifa_preferred_lft * HZ,
 737					       next)) {
 738				next = ifa->ifa_tstamp +
 739				       ifa->ifa_preferred_lft * HZ;
 740			}
 741		}
 742		rcu_read_unlock();
 743		if (!change_needed)
 744			continue;
 745		rtnl_lock();
 746		hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
 747			unsigned long age;
 748
 749			if (ifa->ifa_flags & IFA_F_PERMANENT)
 750				continue;
 751
 752			/* We try to batch several events at once. */
 753			age = (now - ifa->ifa_tstamp +
 754			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 755
 756			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
 757			    age >= ifa->ifa_valid_lft) {
 758				struct in_ifaddr __rcu **ifap;
 759				struct in_ifaddr *tmp;
 760
 761				ifap = &ifa->ifa_dev->ifa_list;
 762				tmp = rtnl_dereference(*ifap);
 763				while (tmp) {
 764					if (tmp == ifa) {
 765						inet_del_ifa(ifa->ifa_dev,
 766							     ifap, 1);
 767						break;
 768					}
 769					ifap = &tmp->ifa_next;
 770					tmp = rtnl_dereference(*ifap);
 771				}
 772			} else if (ifa->ifa_preferred_lft !=
 773				   INFINITY_LIFE_TIME &&
 774				   age >= ifa->ifa_preferred_lft &&
 775				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
 776				ifa->ifa_flags |= IFA_F_DEPRECATED;
 777				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
 778			}
 779		}
 780		rtnl_unlock();
 781	}
 782
 783	next_sec = round_jiffies_up(next);
 784	next_sched = next;
 785
 786	/* If rounded timeout is accurate enough, accept it. */
 787	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
 788		next_sched = next_sec;
 789
 790	now = jiffies;
 791	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
 792	if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
 793		next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
 794
 795	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
 796			next_sched - now);
 797}
 798
 799static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
 800			     __u32 prefered_lft)
 801{
 802	unsigned long timeout;
 803
 804	ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
 805
 806	timeout = addrconf_timeout_fixup(valid_lft, HZ);
 807	if (addrconf_finite_timeout(timeout))
 808		ifa->ifa_valid_lft = timeout;
 809	else
 810		ifa->ifa_flags |= IFA_F_PERMANENT;
 811
 812	timeout = addrconf_timeout_fixup(prefered_lft, HZ);
 813	if (addrconf_finite_timeout(timeout)) {
 814		if (timeout == 0)
 815			ifa->ifa_flags |= IFA_F_DEPRECATED;
 816		ifa->ifa_preferred_lft = timeout;
 817	}
 818	ifa->ifa_tstamp = jiffies;
 819	if (!ifa->ifa_cstamp)
 820		ifa->ifa_cstamp = ifa->ifa_tstamp;
 821}
 822
 823static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
 824				       __u32 *pvalid_lft, __u32 *pprefered_lft,
 825				       struct netlink_ext_ack *extack)
 826{
 827	struct nlattr *tb[IFA_MAX+1];
 828	struct in_ifaddr *ifa;
 829	struct ifaddrmsg *ifm;
 830	struct net_device *dev;
 831	struct in_device *in_dev;
 832	int err;
 833
 834	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 835				     ifa_ipv4_policy, extack);
 836	if (err < 0)
 837		goto errout;
 838
 839	ifm = nlmsg_data(nlh);
 840	err = -EINVAL;
 841	if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
 842		goto errout;
 843
 844	dev = __dev_get_by_index(net, ifm->ifa_index);
 845	err = -ENODEV;
 846	if (!dev)
 847		goto errout;
 848
 849	in_dev = __in_dev_get_rtnl(dev);
 850	err = -ENOBUFS;
 851	if (!in_dev)
 852		goto errout;
 853
 854	ifa = inet_alloc_ifa();
 855	if (!ifa)
 856		/*
 857		 * A potential indev allocation can be left alive, it stays
 858		 * assigned to its device and is destroy with it.
 859		 */
 860		goto errout;
 861
 862	ipv4_devconf_setall(in_dev);
 863	neigh_parms_data_state_setall(in_dev->arp_parms);
 864	in_dev_hold(in_dev);
 865
 866	if (!tb[IFA_ADDRESS])
 867		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 868
 869	INIT_HLIST_NODE(&ifa->hash);
 870	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
 871	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
 872	ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
 873					 ifm->ifa_flags;
 874	ifa->ifa_scope = ifm->ifa_scope;
 875	ifa->ifa_dev = in_dev;
 876
 877	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
 878	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
 879
 880	if (tb[IFA_BROADCAST])
 881		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
 882
 883	if (tb[IFA_LABEL])
 884		nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
 885	else
 886		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
 887
 888	if (tb[IFA_RT_PRIORITY])
 889		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
 890
 891	if (tb[IFA_CACHEINFO]) {
 892		struct ifa_cacheinfo *ci;
 893
 894		ci = nla_data(tb[IFA_CACHEINFO]);
 895		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
 896			err = -EINVAL;
 897			goto errout_free;
 898		}
 899		*pvalid_lft = ci->ifa_valid;
 900		*pprefered_lft = ci->ifa_prefered;
 901	}
 902
 903	return ifa;
 904
 905errout_free:
 906	inet_free_ifa(ifa);
 907errout:
 908	return ERR_PTR(err);
 909}
 910
 911static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
 912{
 913	struct in_device *in_dev = ifa->ifa_dev;
 914	struct in_ifaddr *ifa1;
 915
 916	if (!ifa->ifa_local)
 917		return NULL;
 918
 919	in_dev_for_each_ifa_rtnl(ifa1, in_dev) {
 920		if (ifa1->ifa_mask == ifa->ifa_mask &&
 921		    inet_ifa_match(ifa1->ifa_address, ifa) &&
 922		    ifa1->ifa_local == ifa->ifa_local)
 923			return ifa1;
 924	}
 925	return NULL;
 926}
 927
 928static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
 929			    struct netlink_ext_ack *extack)
 930{
 931	struct net *net = sock_net(skb->sk);
 932	struct in_ifaddr *ifa;
 933	struct in_ifaddr *ifa_existing;
 934	__u32 valid_lft = INFINITY_LIFE_TIME;
 935	__u32 prefered_lft = INFINITY_LIFE_TIME;
 936
 937	ASSERT_RTNL();
 938
 939	ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
 940	if (IS_ERR(ifa))
 941		return PTR_ERR(ifa);
 942
 943	ifa_existing = find_matching_ifa(ifa);
 944	if (!ifa_existing) {
 945		/* It would be best to check for !NLM_F_CREATE here but
 946		 * userspace already relies on not having to provide this.
 947		 */
 948		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
 949		if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
 950			int ret = ip_mc_autojoin_config(net, true, ifa);
 
 951
 952			if (ret < 0) {
 953				inet_free_ifa(ifa);
 954				return ret;
 955			}
 956		}
 957		return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid,
 958					 extack);
 959	} else {
 960		u32 new_metric = ifa->ifa_rt_priority;
 961
 962		inet_free_ifa(ifa);
 963
 964		if (nlh->nlmsg_flags & NLM_F_EXCL ||
 965		    !(nlh->nlmsg_flags & NLM_F_REPLACE))
 966			return -EEXIST;
 967		ifa = ifa_existing;
 968
 969		if (ifa->ifa_rt_priority != new_metric) {
 970			fib_modify_prefix_metric(ifa, new_metric);
 971			ifa->ifa_rt_priority = new_metric;
 972		}
 973
 974		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
 975		cancel_delayed_work(&check_lifetime_work);
 976		queue_delayed_work(system_power_efficient_wq,
 977				&check_lifetime_work, 0);
 978		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
 979	}
 980	return 0;
 981}
 982
 983/*
 984 *	Determine a default network mask, based on the IP address.
 985 */
 986
 987static int inet_abc_len(__be32 addr)
 988{
 989	int rc = -1;	/* Something else, probably a multicast. */
 990
 991	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
 992		rc = 0;
 993	else {
 994		__u32 haddr = ntohl(addr);
 995		if (IN_CLASSA(haddr))
 996			rc = 8;
 997		else if (IN_CLASSB(haddr))
 998			rc = 16;
 999		else if (IN_CLASSC(haddr))
1000			rc = 24;
1001		else if (IN_CLASSE(haddr))
1002			rc = 32;
1003	}
1004
1005	return rc;
1006}
1007
1008
1009int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
1010{
1011	struct sockaddr_in sin_orig;
1012	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
1013	struct in_ifaddr __rcu **ifap = NULL;
1014	struct in_device *in_dev;
1015	struct in_ifaddr *ifa = NULL;
1016	struct net_device *dev;
1017	char *colon;
1018	int ret = -EFAULT;
1019	int tryaddrmatch = 0;
1020
1021	ifr->ifr_name[IFNAMSIZ - 1] = 0;
1022
1023	/* save original address for comparison */
1024	memcpy(&sin_orig, sin, sizeof(*sin));
1025
1026	colon = strchr(ifr->ifr_name, ':');
1027	if (colon)
1028		*colon = 0;
1029
1030	dev_load(net, ifr->ifr_name);
1031
1032	switch (cmd) {
1033	case SIOCGIFADDR:	/* Get interface address */
1034	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1035	case SIOCGIFDSTADDR:	/* Get the destination address */
1036	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1037		/* Note that these ioctls will not sleep,
1038		   so that we do not impose a lock.
1039		   One day we will be forced to put shlock here (I mean SMP)
1040		 */
1041		tryaddrmatch = (sin_orig.sin_family == AF_INET);
1042		memset(sin, 0, sizeof(*sin));
1043		sin->sin_family = AF_INET;
1044		break;
1045
1046	case SIOCSIFFLAGS:
1047		ret = -EPERM;
1048		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1049			goto out;
1050		break;
1051	case SIOCSIFADDR:	/* Set interface address (and family) */
1052	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1053	case SIOCSIFDSTADDR:	/* Set the destination address */
1054	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1055		ret = -EPERM;
1056		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1057			goto out;
1058		ret = -EINVAL;
1059		if (sin->sin_family != AF_INET)
1060			goto out;
1061		break;
1062	default:
1063		ret = -EINVAL;
1064		goto out;
1065	}
1066
1067	rtnl_lock();
1068
1069	ret = -ENODEV;
1070	dev = __dev_get_by_name(net, ifr->ifr_name);
1071	if (!dev)
1072		goto done;
1073
1074	if (colon)
1075		*colon = ':';
1076
1077	in_dev = __in_dev_get_rtnl(dev);
1078	if (in_dev) {
1079		if (tryaddrmatch) {
1080			/* Matthias Andree */
1081			/* compare label and address (4.4BSD style) */
1082			/* note: we only do this for a limited set of ioctls
1083			   and only if the original address family was AF_INET.
1084			   This is checked above. */
1085
1086			for (ifap = &in_dev->ifa_list;
1087			     (ifa = rtnl_dereference(*ifap)) != NULL;
1088			     ifap = &ifa->ifa_next) {
1089				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
1090				    sin_orig.sin_addr.s_addr ==
1091							ifa->ifa_local) {
1092					break; /* found */
1093				}
1094			}
1095		}
1096		/* we didn't get a match, maybe the application is
1097		   4.3BSD-style and passed in junk so we fall back to
1098		   comparing just the label */
1099		if (!ifa) {
1100			for (ifap = &in_dev->ifa_list;
1101			     (ifa = rtnl_dereference(*ifap)) != NULL;
1102			     ifap = &ifa->ifa_next)
1103				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
1104					break;
1105		}
1106	}
1107
1108	ret = -EADDRNOTAVAIL;
1109	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1110		goto done;
1111
1112	switch (cmd) {
1113	case SIOCGIFADDR:	/* Get interface address */
1114		ret = 0;
1115		sin->sin_addr.s_addr = ifa->ifa_local;
1116		break;
1117
1118	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1119		ret = 0;
1120		sin->sin_addr.s_addr = ifa->ifa_broadcast;
1121		break;
1122
1123	case SIOCGIFDSTADDR:	/* Get the destination address */
1124		ret = 0;
1125		sin->sin_addr.s_addr = ifa->ifa_address;
1126		break;
1127
1128	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1129		ret = 0;
1130		sin->sin_addr.s_addr = ifa->ifa_mask;
1131		break;
1132
1133	case SIOCSIFFLAGS:
1134		if (colon) {
1135			ret = -EADDRNOTAVAIL;
1136			if (!ifa)
1137				break;
1138			ret = 0;
1139			if (!(ifr->ifr_flags & IFF_UP))
1140				inet_del_ifa(in_dev, ifap, 1);
1141			break;
1142		}
1143		ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
1144		break;
1145
1146	case SIOCSIFADDR:	/* Set interface address (and family) */
1147		ret = -EINVAL;
1148		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1149			break;
1150
1151		if (!ifa) {
1152			ret = -ENOBUFS;
1153			ifa = inet_alloc_ifa();
1154			if (!ifa)
1155				break;
1156			INIT_HLIST_NODE(&ifa->hash);
1157			if (colon)
1158				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
1159			else
1160				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1161		} else {
1162			ret = 0;
1163			if (ifa->ifa_local == sin->sin_addr.s_addr)
1164				break;
1165			inet_del_ifa(in_dev, ifap, 0);
1166			ifa->ifa_broadcast = 0;
1167			ifa->ifa_scope = 0;
1168		}
1169
1170		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1171
1172		if (!(dev->flags & IFF_POINTOPOINT)) {
1173			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1174			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1175			if ((dev->flags & IFF_BROADCAST) &&
1176			    ifa->ifa_prefixlen < 31)
1177				ifa->ifa_broadcast = ifa->ifa_address |
1178						     ~ifa->ifa_mask;
1179		} else {
1180			ifa->ifa_prefixlen = 32;
1181			ifa->ifa_mask = inet_make_mask(32);
1182		}
1183		set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1184		ret = inet_set_ifa(dev, ifa);
1185		break;
1186
1187	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1188		ret = 0;
1189		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1190			inet_del_ifa(in_dev, ifap, 0);
1191			ifa->ifa_broadcast = sin->sin_addr.s_addr;
1192			inet_insert_ifa(ifa);
1193		}
1194		break;
1195
1196	case SIOCSIFDSTADDR:	/* Set the destination address */
1197		ret = 0;
1198		if (ifa->ifa_address == sin->sin_addr.s_addr)
1199			break;
1200		ret = -EINVAL;
1201		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1202			break;
1203		ret = 0;
1204		inet_del_ifa(in_dev, ifap, 0);
1205		ifa->ifa_address = sin->sin_addr.s_addr;
1206		inet_insert_ifa(ifa);
1207		break;
1208
1209	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1210
1211		/*
1212		 *	The mask we set must be legal.
1213		 */
1214		ret = -EINVAL;
1215		if (bad_mask(sin->sin_addr.s_addr, 0))
1216			break;
1217		ret = 0;
1218		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1219			__be32 old_mask = ifa->ifa_mask;
1220			inet_del_ifa(in_dev, ifap, 0);
1221			ifa->ifa_mask = sin->sin_addr.s_addr;
1222			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1223
1224			/* See if current broadcast address matches
1225			 * with current netmask, then recalculate
1226			 * the broadcast address. Otherwise it's a
1227			 * funny address, so don't touch it since
1228			 * the user seems to know what (s)he's doing...
1229			 */
1230			if ((dev->flags & IFF_BROADCAST) &&
1231			    (ifa->ifa_prefixlen < 31) &&
1232			    (ifa->ifa_broadcast ==
1233			     (ifa->ifa_local|~old_mask))) {
1234				ifa->ifa_broadcast = (ifa->ifa_local |
1235						      ~sin->sin_addr.s_addr);
1236			}
1237			inet_insert_ifa(ifa);
1238		}
1239		break;
1240	}
1241done:
1242	rtnl_unlock();
1243out:
1244	return ret;
1245}
1246
1247static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
1248{
1249	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1250	const struct in_ifaddr *ifa;
1251	struct ifreq ifr;
1252	int done = 0;
1253
1254	if (WARN_ON(size > sizeof(struct ifreq)))
1255		goto out;
1256
1257	if (!in_dev)
1258		goto out;
1259
1260	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1261		if (!buf) {
1262			done += size;
1263			continue;
1264		}
1265		if (len < size)
1266			break;
1267		memset(&ifr, 0, sizeof(struct ifreq));
1268		strcpy(ifr.ifr_name, ifa->ifa_label);
1269
1270		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1271		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1272								ifa->ifa_local;
1273
1274		if (copy_to_user(buf + done, &ifr, size)) {
1275			done = -EFAULT;
1276			break;
1277		}
1278		len  -= size;
1279		done += size;
1280	}
1281out:
1282	return done;
1283}
1284
1285static __be32 in_dev_select_addr(const struct in_device *in_dev,
1286				 int scope)
1287{
1288	const struct in_ifaddr *ifa;
1289
1290	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1291		if (ifa->ifa_flags & IFA_F_SECONDARY)
1292			continue;
1293		if (ifa->ifa_scope != RT_SCOPE_LINK &&
1294		    ifa->ifa_scope <= scope)
1295			return ifa->ifa_local;
1296	}
1297
1298	return 0;
1299}
1300
1301__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1302{
1303	const struct in_ifaddr *ifa;
1304	__be32 addr = 0;
1305	unsigned char localnet_scope = RT_SCOPE_HOST;
1306	struct in_device *in_dev;
1307	struct net *net = dev_net(dev);
1308	int master_idx;
1309
1310	rcu_read_lock();
1311	in_dev = __in_dev_get_rcu(dev);
1312	if (!in_dev)
1313		goto no_in_dev;
1314
1315	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1316		localnet_scope = RT_SCOPE_LINK;
1317
1318	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1319		if (ifa->ifa_flags & IFA_F_SECONDARY)
1320			continue;
1321		if (min(ifa->ifa_scope, localnet_scope) > scope)
1322			continue;
1323		if (!dst || inet_ifa_match(dst, ifa)) {
1324			addr = ifa->ifa_local;
1325			break;
1326		}
1327		if (!addr)
1328			addr = ifa->ifa_local;
1329	}
1330
1331	if (addr)
1332		goto out_unlock;
1333no_in_dev:
1334	master_idx = l3mdev_master_ifindex_rcu(dev);
1335
1336	/* For VRFs, the VRF device takes the place of the loopback device,
1337	 * with addresses on it being preferred.  Note in such cases the
1338	 * loopback device will be among the devices that fail the master_idx
1339	 * equality check in the loop below.
1340	 */
1341	if (master_idx &&
1342	    (dev = dev_get_by_index_rcu(net, master_idx)) &&
1343	    (in_dev = __in_dev_get_rcu(dev))) {
1344		addr = in_dev_select_addr(in_dev, scope);
1345		if (addr)
1346			goto out_unlock;
1347	}
1348
1349	/* Not loopback addresses on loopback should be preferred
1350	   in this case. It is important that lo is the first interface
1351	   in dev_base list.
1352	 */
1353	for_each_netdev_rcu(net, dev) {
1354		if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1355			continue;
1356
1357		in_dev = __in_dev_get_rcu(dev);
1358		if (!in_dev)
1359			continue;
1360
1361		addr = in_dev_select_addr(in_dev, scope);
1362		if (addr)
1363			goto out_unlock;
1364	}
1365out_unlock:
1366	rcu_read_unlock();
1367	return addr;
1368}
1369EXPORT_SYMBOL(inet_select_addr);
1370
1371static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1372			      __be32 local, int scope)
1373{
1374	unsigned char localnet_scope = RT_SCOPE_HOST;
1375	const struct in_ifaddr *ifa;
1376	__be32 addr = 0;
1377	int same = 0;
1378
1379	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1380		localnet_scope = RT_SCOPE_LINK;
1381
1382	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1383		unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
1384
1385		if (!addr &&
1386		    (local == ifa->ifa_local || !local) &&
1387		    min_scope <= scope) {
1388			addr = ifa->ifa_local;
1389			if (same)
1390				break;
1391		}
1392		if (!same) {
1393			same = (!local || inet_ifa_match(local, ifa)) &&
1394				(!dst || inet_ifa_match(dst, ifa));
1395			if (same && addr) {
1396				if (local || !dst)
1397					break;
1398				/* Is the selected addr into dst subnet? */
1399				if (inet_ifa_match(addr, ifa))
1400					break;
1401				/* No, then can we use new local src? */
1402				if (min_scope <= scope) {
1403					addr = ifa->ifa_local;
1404					break;
1405				}
1406				/* search for large dst subnet for addr */
1407				same = 0;
1408			}
1409		}
1410	}
1411
1412	return same ? addr : 0;
1413}
1414
1415/*
1416 * Confirm that local IP address exists using wildcards:
1417 * - net: netns to check, cannot be NULL
1418 * - in_dev: only on this interface, NULL=any interface
1419 * - dst: only in the same subnet as dst, 0=any dst
1420 * - local: address, 0=autoselect the local address
1421 * - scope: maximum allowed scope value for the local address
1422 */
1423__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1424			 __be32 dst, __be32 local, int scope)
1425{
1426	__be32 addr = 0;
1427	struct net_device *dev;
1428
1429	if (in_dev)
1430		return confirm_addr_indev(in_dev, dst, local, scope);
1431
1432	rcu_read_lock();
1433	for_each_netdev_rcu(net, dev) {
1434		in_dev = __in_dev_get_rcu(dev);
1435		if (in_dev) {
1436			addr = confirm_addr_indev(in_dev, dst, local, scope);
1437			if (addr)
1438				break;
1439		}
1440	}
1441	rcu_read_unlock();
1442
1443	return addr;
1444}
1445EXPORT_SYMBOL(inet_confirm_addr);
1446
1447/*
1448 *	Device notifier
1449 */
1450
1451int register_inetaddr_notifier(struct notifier_block *nb)
1452{
1453	return blocking_notifier_chain_register(&inetaddr_chain, nb);
1454}
1455EXPORT_SYMBOL(register_inetaddr_notifier);
1456
1457int unregister_inetaddr_notifier(struct notifier_block *nb)
1458{
1459	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1460}
1461EXPORT_SYMBOL(unregister_inetaddr_notifier);
1462
1463int register_inetaddr_validator_notifier(struct notifier_block *nb)
1464{
1465	return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
1466}
1467EXPORT_SYMBOL(register_inetaddr_validator_notifier);
1468
1469int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
1470{
1471	return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
1472	    nb);
1473}
1474EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
1475
1476/* Rename ifa_labels for a device name change. Make some effort to preserve
1477 * existing alias numbering and to create unique labels if possible.
1478*/
1479static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1480{
1481	struct in_ifaddr *ifa;
1482	int named = 0;
1483
1484	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1485		char old[IFNAMSIZ], *dot;
1486
1487		memcpy(old, ifa->ifa_label, IFNAMSIZ);
1488		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1489		if (named++ == 0)
1490			goto skip;
1491		dot = strchr(old, ':');
1492		if (!dot) {
1493			sprintf(old, ":%d", named);
1494			dot = old;
1495		}
1496		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1497			strcat(ifa->ifa_label, dot);
1498		else
1499			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1500skip:
1501		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1502	}
1503}
1504
 
 
 
 
 
1505static void inetdev_send_gratuitous_arp(struct net_device *dev,
1506					struct in_device *in_dev)
1507
1508{
1509	const struct in_ifaddr *ifa;
1510
1511	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1512		arp_send(ARPOP_REQUEST, ETH_P_ARP,
1513			 ifa->ifa_local, dev,
1514			 ifa->ifa_local, NULL,
1515			 dev->dev_addr, NULL);
1516	}
1517}
1518
1519/* Called only under RTNL semaphore */
1520
1521static int inetdev_event(struct notifier_block *this, unsigned long event,
1522			 void *ptr)
1523{
1524	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1525	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1526
1527	ASSERT_RTNL();
1528
1529	if (!in_dev) {
1530		if (event == NETDEV_REGISTER) {
1531			in_dev = inetdev_init(dev);
1532			if (IS_ERR(in_dev))
1533				return notifier_from_errno(PTR_ERR(in_dev));
1534			if (dev->flags & IFF_LOOPBACK) {
1535				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1536				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1537			}
1538		} else if (event == NETDEV_CHANGEMTU) {
1539			/* Re-enabling IP */
1540			if (inetdev_valid_mtu(dev->mtu))
1541				in_dev = inetdev_init(dev);
1542		}
1543		goto out;
1544	}
1545
1546	switch (event) {
1547	case NETDEV_REGISTER:
1548		pr_debug("%s: bug\n", __func__);
1549		RCU_INIT_POINTER(dev->ip_ptr, NULL);
1550		break;
1551	case NETDEV_UP:
1552		if (!inetdev_valid_mtu(dev->mtu))
1553			break;
1554		if (dev->flags & IFF_LOOPBACK) {
1555			struct in_ifaddr *ifa = inet_alloc_ifa();
1556
1557			if (ifa) {
1558				INIT_HLIST_NODE(&ifa->hash);
1559				ifa->ifa_local =
1560				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
1561				ifa->ifa_prefixlen = 8;
1562				ifa->ifa_mask = inet_make_mask(8);
1563				in_dev_hold(in_dev);
1564				ifa->ifa_dev = in_dev;
1565				ifa->ifa_scope = RT_SCOPE_HOST;
1566				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1567				set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1568						 INFINITY_LIFE_TIME);
1569				ipv4_devconf_setall(in_dev);
1570				neigh_parms_data_state_setall(in_dev->arp_parms);
1571				inet_insert_ifa(ifa);
1572			}
1573		}
1574		ip_mc_up(in_dev);
1575		fallthrough;
1576	case NETDEV_CHANGEADDR:
1577		if (!IN_DEV_ARP_NOTIFY(in_dev))
1578			break;
1579		fallthrough;
1580	case NETDEV_NOTIFY_PEERS:
1581		/* Send gratuitous ARP to notify of link change */
1582		inetdev_send_gratuitous_arp(dev, in_dev);
1583		break;
1584	case NETDEV_DOWN:
1585		ip_mc_down(in_dev);
1586		break;
1587	case NETDEV_PRE_TYPE_CHANGE:
1588		ip_mc_unmap(in_dev);
1589		break;
1590	case NETDEV_POST_TYPE_CHANGE:
1591		ip_mc_remap(in_dev);
1592		break;
1593	case NETDEV_CHANGEMTU:
1594		if (inetdev_valid_mtu(dev->mtu))
1595			break;
1596		/* disable IP when MTU is not enough */
1597		fallthrough;
1598	case NETDEV_UNREGISTER:
1599		inetdev_destroy(in_dev);
1600		break;
1601	case NETDEV_CHANGENAME:
1602		/* Do not notify about label change, this event is
1603		 * not interesting to applications using netlink.
1604		 */
1605		inetdev_changename(dev, in_dev);
1606
1607		devinet_sysctl_unregister(in_dev);
1608		devinet_sysctl_register(in_dev);
1609		break;
1610	}
1611out:
1612	return NOTIFY_DONE;
1613}
1614
1615static struct notifier_block ip_netdev_notifier = {
1616	.notifier_call = inetdev_event,
1617};
1618
1619static size_t inet_nlmsg_size(void)
1620{
1621	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1622	       + nla_total_size(4) /* IFA_ADDRESS */
1623	       + nla_total_size(4) /* IFA_LOCAL */
1624	       + nla_total_size(4) /* IFA_BROADCAST */
1625	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1626	       + nla_total_size(4)  /* IFA_FLAGS */
1627	       + nla_total_size(4)  /* IFA_RT_PRIORITY */
1628	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1629}
1630
1631static inline u32 cstamp_delta(unsigned long cstamp)
1632{
1633	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1634}
1635
1636static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1637			 unsigned long tstamp, u32 preferred, u32 valid)
1638{
1639	struct ifa_cacheinfo ci;
1640
1641	ci.cstamp = cstamp_delta(cstamp);
1642	ci.tstamp = cstamp_delta(tstamp);
1643	ci.ifa_prefered = preferred;
1644	ci.ifa_valid = valid;
1645
1646	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1647}
1648
1649static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1650			    struct inet_fill_args *args)
1651{
1652	struct ifaddrmsg *ifm;
1653	struct nlmsghdr  *nlh;
1654	u32 preferred, valid;
1655
1656	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
1657			args->flags);
1658	if (!nlh)
1659		return -EMSGSIZE;
1660
1661	ifm = nlmsg_data(nlh);
1662	ifm->ifa_family = AF_INET;
1663	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1664	ifm->ifa_flags = ifa->ifa_flags;
1665	ifm->ifa_scope = ifa->ifa_scope;
1666	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1667
1668	if (args->netnsid >= 0 &&
1669	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
1670		goto nla_put_failure;
1671
1672	if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1673		preferred = ifa->ifa_preferred_lft;
1674		valid = ifa->ifa_valid_lft;
1675		if (preferred != INFINITY_LIFE_TIME) {
1676			long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1677
1678			if (preferred > tval)
1679				preferred -= tval;
1680			else
1681				preferred = 0;
1682			if (valid != INFINITY_LIFE_TIME) {
1683				if (valid > tval)
1684					valid -= tval;
1685				else
1686					valid = 0;
1687			}
1688		}
1689	} else {
1690		preferred = INFINITY_LIFE_TIME;
1691		valid = INFINITY_LIFE_TIME;
1692	}
1693	if ((ifa->ifa_address &&
1694	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1695	    (ifa->ifa_local &&
1696	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1697	    (ifa->ifa_broadcast &&
1698	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1699	    (ifa->ifa_label[0] &&
1700	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1701	    nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1702	    (ifa->ifa_rt_priority &&
1703	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
1704	    put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1705			  preferred, valid))
1706		goto nla_put_failure;
1707
1708	nlmsg_end(skb, nlh);
1709	return 0;
1710
1711nla_put_failure:
1712	nlmsg_cancel(skb, nlh);
1713	return -EMSGSIZE;
1714}
1715
1716static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
1717				      struct inet_fill_args *fillargs,
1718				      struct net **tgt_net, struct sock *sk,
1719				      struct netlink_callback *cb)
1720{
1721	struct netlink_ext_ack *extack = cb->extack;
1722	struct nlattr *tb[IFA_MAX+1];
1723	struct ifaddrmsg *ifm;
1724	int err, i;
1725
1726	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
1727		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
1728		return -EINVAL;
1729	}
1730
1731	ifm = nlmsg_data(nlh);
1732	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
1733		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
1734		return -EINVAL;
1735	}
1736
1737	fillargs->ifindex = ifm->ifa_index;
1738	if (fillargs->ifindex) {
1739		cb->answer_flags |= NLM_F_DUMP_FILTERED;
1740		fillargs->flags |= NLM_F_DUMP_FILTERED;
1741	}
1742
1743	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
1744					    ifa_ipv4_policy, extack);
1745	if (err < 0)
1746		return err;
1747
1748	for (i = 0; i <= IFA_MAX; ++i) {
1749		if (!tb[i])
1750			continue;
1751
1752		if (i == IFA_TARGET_NETNSID) {
1753			struct net *net;
1754
1755			fillargs->netnsid = nla_get_s32(tb[i]);
1756
1757			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
1758			if (IS_ERR(net)) {
1759				fillargs->netnsid = -1;
1760				NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
1761				return PTR_ERR(net);
1762			}
1763			*tgt_net = net;
1764		} else {
1765			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
1766			return -EINVAL;
1767		}
1768	}
1769
1770	return 0;
1771}
1772
1773static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
1774			    struct netlink_callback *cb, int s_ip_idx,
1775			    struct inet_fill_args *fillargs)
1776{
1777	struct in_ifaddr *ifa;
1778	int ip_idx = 0;
1779	int err;
1780
1781	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1782		if (ip_idx < s_ip_idx) {
1783			ip_idx++;
1784			continue;
1785		}
1786		err = inet_fill_ifaddr(skb, ifa, fillargs);
1787		if (err < 0)
1788			goto done;
1789
1790		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1791		ip_idx++;
1792	}
1793	err = 0;
1794
1795done:
1796	cb->args[2] = ip_idx;
1797
1798	return err;
1799}
1800
1801static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1802{
1803	const struct nlmsghdr *nlh = cb->nlh;
1804	struct inet_fill_args fillargs = {
1805		.portid = NETLINK_CB(cb->skb).portid,
1806		.seq = nlh->nlmsg_seq,
1807		.event = RTM_NEWADDR,
1808		.flags = NLM_F_MULTI,
1809		.netnsid = -1,
1810	};
1811	struct net *net = sock_net(skb->sk);
1812	struct net *tgt_net = net;
1813	int h, s_h;
1814	int idx, s_idx;
1815	int s_ip_idx;
1816	struct net_device *dev;
1817	struct in_device *in_dev;
1818	struct hlist_head *head;
1819	int err = 0;
1820
1821	s_h = cb->args[0];
1822	s_idx = idx = cb->args[1];
1823	s_ip_idx = cb->args[2];
1824
1825	if (cb->strict_check) {
1826		err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
1827						 skb->sk, cb);
1828		if (err < 0)
1829			goto put_tgt_net;
1830
1831		err = 0;
1832		if (fillargs.ifindex) {
1833			dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
1834			if (!dev) {
1835				err = -ENODEV;
1836				goto put_tgt_net;
1837			}
1838
1839			in_dev = __in_dev_get_rtnl(dev);
1840			if (in_dev) {
1841				err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1842						       &fillargs);
1843			}
1844			goto put_tgt_net;
1845		}
1846	}
1847
1848	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1849		idx = 0;
1850		head = &tgt_net->dev_index_head[h];
1851		rcu_read_lock();
1852		cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
1853			  tgt_net->dev_base_seq;
1854		hlist_for_each_entry_rcu(dev, head, index_hlist) {
1855			if (idx < s_idx)
1856				goto cont;
1857			if (h > s_h || idx > s_idx)
1858				s_ip_idx = 0;
1859			in_dev = __in_dev_get_rcu(dev);
1860			if (!in_dev)
1861				goto cont;
1862
1863			err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1864					       &fillargs);
1865			if (err < 0) {
1866				rcu_read_unlock();
1867				goto done;
1868			}
1869cont:
1870			idx++;
1871		}
1872		rcu_read_unlock();
1873	}
1874
1875done:
1876	cb->args[0] = h;
1877	cb->args[1] = idx;
1878put_tgt_net:
1879	if (fillargs.netnsid >= 0)
1880		put_net(tgt_net);
1881
1882	return skb->len ? : err;
1883}
1884
1885static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1886		      u32 portid)
1887{
1888	struct inet_fill_args fillargs = {
1889		.portid = portid,
1890		.seq = nlh ? nlh->nlmsg_seq : 0,
1891		.event = event,
1892		.flags = 0,
1893		.netnsid = -1,
1894	};
1895	struct sk_buff *skb;
1896	int err = -ENOBUFS;
1897	struct net *net;
1898
1899	net = dev_net(ifa->ifa_dev->dev);
1900	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1901	if (!skb)
1902		goto errout;
1903
1904	err = inet_fill_ifaddr(skb, ifa, &fillargs);
1905	if (err < 0) {
1906		/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1907		WARN_ON(err == -EMSGSIZE);
1908		kfree_skb(skb);
1909		goto errout;
1910	}
1911	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1912	return;
1913errout:
1914	if (err < 0)
1915		rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1916}
1917
1918static size_t inet_get_link_af_size(const struct net_device *dev,
1919				    u32 ext_filter_mask)
1920{
1921	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1922
1923	if (!in_dev)
1924		return 0;
1925
1926	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1927}
1928
1929static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1930			     u32 ext_filter_mask)
1931{
1932	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1933	struct nlattr *nla;
1934	int i;
1935
1936	if (!in_dev)
1937		return -ENODATA;
1938
1939	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1940	if (!nla)
1941		return -EMSGSIZE;
1942
1943	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1944		((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1945
1946	return 0;
1947}
1948
1949static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1950	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
1951};
1952
1953static int inet_validate_link_af(const struct net_device *dev,
1954				 const struct nlattr *nla)
1955{
1956	struct nlattr *a, *tb[IFLA_INET_MAX+1];
1957	int err, rem;
1958
1959	if (dev && !__in_dev_get_rcu(dev))
1960		return -EAFNOSUPPORT;
1961
1962	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
1963					  inet_af_policy, NULL);
1964	if (err < 0)
1965		return err;
1966
1967	if (tb[IFLA_INET_CONF]) {
1968		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1969			int cfgid = nla_type(a);
1970
1971			if (nla_len(a) < 4)
1972				return -EINVAL;
1973
1974			if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1975				return -EINVAL;
1976		}
1977	}
1978
1979	return 0;
1980}
1981
1982static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1983{
1984	struct in_device *in_dev = __in_dev_get_rcu(dev);
1985	struct nlattr *a, *tb[IFLA_INET_MAX+1];
1986	int rem;
1987
1988	if (!in_dev)
1989		return -EAFNOSUPPORT;
1990
1991	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
1992		BUG();
1993
1994	if (tb[IFLA_INET_CONF]) {
1995		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1996			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1997	}
1998
1999	return 0;
2000}
2001
2002static int inet_netconf_msgsize_devconf(int type)
2003{
2004	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
2005		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
2006	bool all = false;
2007
2008	if (type == NETCONFA_ALL)
2009		all = true;
2010
2011	if (all || type == NETCONFA_FORWARDING)
2012		size += nla_total_size(4);
2013	if (all || type == NETCONFA_RP_FILTER)
2014		size += nla_total_size(4);
2015	if (all || type == NETCONFA_MC_FORWARDING)
2016		size += nla_total_size(4);
2017	if (all || type == NETCONFA_BC_FORWARDING)
2018		size += nla_total_size(4);
2019	if (all || type == NETCONFA_PROXY_NEIGH)
2020		size += nla_total_size(4);
2021	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
2022		size += nla_total_size(4);
2023
2024	return size;
2025}
2026
2027static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
2028				     struct ipv4_devconf *devconf, u32 portid,
2029				     u32 seq, int event, unsigned int flags,
2030				     int type)
2031{
2032	struct nlmsghdr  *nlh;
2033	struct netconfmsg *ncm;
2034	bool all = false;
2035
2036	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
2037			flags);
2038	if (!nlh)
2039		return -EMSGSIZE;
2040
2041	if (type == NETCONFA_ALL)
2042		all = true;
2043
2044	ncm = nlmsg_data(nlh);
2045	ncm->ncm_family = AF_INET;
2046
2047	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
2048		goto nla_put_failure;
2049
2050	if (!devconf)
2051		goto out;
2052
2053	if ((all || type == NETCONFA_FORWARDING) &&
2054	    nla_put_s32(skb, NETCONFA_FORWARDING,
2055			IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
2056		goto nla_put_failure;
2057	if ((all || type == NETCONFA_RP_FILTER) &&
2058	    nla_put_s32(skb, NETCONFA_RP_FILTER,
2059			IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
2060		goto nla_put_failure;
2061	if ((all || type == NETCONFA_MC_FORWARDING) &&
2062	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
2063			IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
2064		goto nla_put_failure;
2065	if ((all || type == NETCONFA_BC_FORWARDING) &&
2066	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
2067			IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
2068		goto nla_put_failure;
2069	if ((all || type == NETCONFA_PROXY_NEIGH) &&
2070	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
2071			IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
2072		goto nla_put_failure;
2073	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
2074	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2075			IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
2076		goto nla_put_failure;
2077
2078out:
2079	nlmsg_end(skb, nlh);
2080	return 0;
2081
2082nla_put_failure:
2083	nlmsg_cancel(skb, nlh);
2084	return -EMSGSIZE;
2085}
2086
2087void inet_netconf_notify_devconf(struct net *net, int event, int type,
2088				 int ifindex, struct ipv4_devconf *devconf)
2089{
2090	struct sk_buff *skb;
2091	int err = -ENOBUFS;
2092
2093	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
2094	if (!skb)
2095		goto errout;
2096
2097	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
2098					event, 0, type);
2099	if (err < 0) {
2100		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2101		WARN_ON(err == -EMSGSIZE);
2102		kfree_skb(skb);
2103		goto errout;
2104	}
2105	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
2106	return;
2107errout:
2108	if (err < 0)
2109		rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
2110}
2111
2112static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
2113	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
2114	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
2115	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
2116	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
2117	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
2118};
2119
2120static int inet_netconf_valid_get_req(struct sk_buff *skb,
2121				      const struct nlmsghdr *nlh,
2122				      struct nlattr **tb,
2123				      struct netlink_ext_ack *extack)
2124{
2125	int i, err;
2126
2127	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
2128		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
2129		return -EINVAL;
2130	}
2131
2132	if (!netlink_strict_get_check(skb))
2133		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
2134					      tb, NETCONFA_MAX,
2135					      devconf_ipv4_policy, extack);
2136
2137	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
2138					    tb, NETCONFA_MAX,
2139					    devconf_ipv4_policy, extack);
2140	if (err)
2141		return err;
2142
2143	for (i = 0; i <= NETCONFA_MAX; i++) {
2144		if (!tb[i])
2145			continue;
2146
2147		switch (i) {
2148		case NETCONFA_IFINDEX:
2149			break;
2150		default:
2151			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
2152			return -EINVAL;
2153		}
2154	}
2155
2156	return 0;
2157}
2158
2159static int inet_netconf_get_devconf(struct sk_buff *in_skb,
2160				    struct nlmsghdr *nlh,
2161				    struct netlink_ext_ack *extack)
2162{
2163	struct net *net = sock_net(in_skb->sk);
2164	struct nlattr *tb[NETCONFA_MAX+1];
2165	struct sk_buff *skb;
2166	struct ipv4_devconf *devconf;
2167	struct in_device *in_dev;
2168	struct net_device *dev;
2169	int ifindex;
2170	int err;
2171
2172	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
2173	if (err)
2174		goto errout;
2175
2176	err = -EINVAL;
2177	if (!tb[NETCONFA_IFINDEX])
2178		goto errout;
2179
2180	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
2181	switch (ifindex) {
2182	case NETCONFA_IFINDEX_ALL:
2183		devconf = net->ipv4.devconf_all;
2184		break;
2185	case NETCONFA_IFINDEX_DEFAULT:
2186		devconf = net->ipv4.devconf_dflt;
2187		break;
2188	default:
2189		dev = __dev_get_by_index(net, ifindex);
2190		if (!dev)
2191			goto errout;
2192		in_dev = __in_dev_get_rtnl(dev);
2193		if (!in_dev)
2194			goto errout;
2195		devconf = &in_dev->cnf;
2196		break;
2197	}
2198
2199	err = -ENOBUFS;
2200	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
2201	if (!skb)
2202		goto errout;
2203
2204	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
2205					NETLINK_CB(in_skb).portid,
2206					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
2207					NETCONFA_ALL);
2208	if (err < 0) {
2209		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2210		WARN_ON(err == -EMSGSIZE);
2211		kfree_skb(skb);
2212		goto errout;
2213	}
2214	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2215errout:
2216	return err;
2217}
2218
2219static int inet_netconf_dump_devconf(struct sk_buff *skb,
2220				     struct netlink_callback *cb)
2221{
2222	const struct nlmsghdr *nlh = cb->nlh;
2223	struct net *net = sock_net(skb->sk);
2224	int h, s_h;
2225	int idx, s_idx;
2226	struct net_device *dev;
2227	struct in_device *in_dev;
2228	struct hlist_head *head;
2229
2230	if (cb->strict_check) {
2231		struct netlink_ext_ack *extack = cb->extack;
2232		struct netconfmsg *ncm;
2233
2234		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
2235			NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
2236			return -EINVAL;
2237		}
2238
2239		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
2240			NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
2241			return -EINVAL;
2242		}
2243	}
2244
2245	s_h = cb->args[0];
2246	s_idx = idx = cb->args[1];
2247
2248	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2249		idx = 0;
2250		head = &net->dev_index_head[h];
2251		rcu_read_lock();
2252		cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
2253			  net->dev_base_seq;
2254		hlist_for_each_entry_rcu(dev, head, index_hlist) {
2255			if (idx < s_idx)
2256				goto cont;
2257			in_dev = __in_dev_get_rcu(dev);
2258			if (!in_dev)
2259				goto cont;
2260
2261			if (inet_netconf_fill_devconf(skb, dev->ifindex,
2262						      &in_dev->cnf,
2263						      NETLINK_CB(cb->skb).portid,
2264						      nlh->nlmsg_seq,
2265						      RTM_NEWNETCONF,
2266						      NLM_F_MULTI,
2267						      NETCONFA_ALL) < 0) {
2268				rcu_read_unlock();
2269				goto done;
2270			}
2271			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2272cont:
2273			idx++;
2274		}
2275		rcu_read_unlock();
2276	}
2277	if (h == NETDEV_HASHENTRIES) {
2278		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
2279					      net->ipv4.devconf_all,
2280					      NETLINK_CB(cb->skb).portid,
2281					      nlh->nlmsg_seq,
2282					      RTM_NEWNETCONF, NLM_F_MULTI,
2283					      NETCONFA_ALL) < 0)
2284			goto done;
2285		else
2286			h++;
2287	}
2288	if (h == NETDEV_HASHENTRIES + 1) {
2289		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
2290					      net->ipv4.devconf_dflt,
2291					      NETLINK_CB(cb->skb).portid,
2292					      nlh->nlmsg_seq,
2293					      RTM_NEWNETCONF, NLM_F_MULTI,
2294					      NETCONFA_ALL) < 0)
2295			goto done;
2296		else
2297			h++;
2298	}
2299done:
2300	cb->args[0] = h;
2301	cb->args[1] = idx;
2302
2303	return skb->len;
2304}
2305
2306#ifdef CONFIG_SYSCTL
2307
2308static void devinet_copy_dflt_conf(struct net *net, int i)
2309{
2310	struct net_device *dev;
2311
2312	rcu_read_lock();
2313	for_each_netdev_rcu(net, dev) {
2314		struct in_device *in_dev;
2315
2316		in_dev = __in_dev_get_rcu(dev);
2317		if (in_dev && !test_bit(i, in_dev->cnf.state))
2318			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2319	}
2320	rcu_read_unlock();
2321}
2322
2323/* called with RTNL locked */
2324static void inet_forward_change(struct net *net)
2325{
2326	struct net_device *dev;
2327	int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2328
2329	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2330	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2331	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2332				    NETCONFA_FORWARDING,
2333				    NETCONFA_IFINDEX_ALL,
2334				    net->ipv4.devconf_all);
2335	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2336				    NETCONFA_FORWARDING,
2337				    NETCONFA_IFINDEX_DEFAULT,
2338				    net->ipv4.devconf_dflt);
2339
2340	for_each_netdev(net, dev) {
2341		struct in_device *in_dev;
2342
2343		if (on)
2344			dev_disable_lro(dev);
2345
2346		in_dev = __in_dev_get_rtnl(dev);
2347		if (in_dev) {
2348			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2349			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2350						    NETCONFA_FORWARDING,
2351						    dev->ifindex, &in_dev->cnf);
2352		}
2353	}
2354}
2355
2356static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2357{
2358	if (cnf == net->ipv4.devconf_dflt)
2359		return NETCONFA_IFINDEX_DEFAULT;
2360	else if (cnf == net->ipv4.devconf_all)
2361		return NETCONFA_IFINDEX_ALL;
2362	else {
2363		struct in_device *idev
2364			= container_of(cnf, struct in_device, cnf);
2365		return idev->dev->ifindex;
2366	}
2367}
2368
2369static int devinet_conf_proc(struct ctl_table *ctl, int write,
2370			     void *buffer, size_t *lenp, loff_t *ppos)
 
2371{
2372	int old_value = *(int *)ctl->data;
2373	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2374	int new_value = *(int *)ctl->data;
2375
2376	if (write) {
2377		struct ipv4_devconf *cnf = ctl->extra1;
2378		struct net *net = ctl->extra2;
2379		int i = (int *)ctl->data - cnf->data;
2380		int ifindex;
2381
2382		set_bit(i, cnf->state);
2383
2384		if (cnf == net->ipv4.devconf_dflt)
2385			devinet_copy_dflt_conf(net, i);
2386		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2387		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2388			if ((new_value == 0) && (old_value != 0))
2389				rt_cache_flush(net);
2390
2391		if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
2392		    new_value != old_value)
2393			rt_cache_flush(net);
2394
2395		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2396		    new_value != old_value) {
2397			ifindex = devinet_conf_ifindex(net, cnf);
2398			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2399						    NETCONFA_RP_FILTER,
2400						    ifindex, cnf);
2401		}
2402		if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2403		    new_value != old_value) {
2404			ifindex = devinet_conf_ifindex(net, cnf);
2405			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2406						    NETCONFA_PROXY_NEIGH,
2407						    ifindex, cnf);
2408		}
2409		if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2410		    new_value != old_value) {
2411			ifindex = devinet_conf_ifindex(net, cnf);
2412			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2413						    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2414						    ifindex, cnf);
2415		}
2416	}
2417
2418	return ret;
2419}
2420
2421static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2422				  void *buffer, size_t *lenp, loff_t *ppos)
 
2423{
2424	int *valp = ctl->data;
2425	int val = *valp;
2426	loff_t pos = *ppos;
2427	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2428
2429	if (write && *valp != val) {
2430		struct net *net = ctl->extra2;
2431
2432		if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2433			if (!rtnl_trylock()) {
2434				/* Restore the original values before restarting */
2435				*valp = val;
2436				*ppos = pos;
2437				return restart_syscall();
2438			}
2439			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2440				inet_forward_change(net);
2441			} else {
2442				struct ipv4_devconf *cnf = ctl->extra1;
2443				struct in_device *idev =
2444					container_of(cnf, struct in_device, cnf);
2445				if (*valp)
2446					dev_disable_lro(idev->dev);
2447				inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2448							    NETCONFA_FORWARDING,
2449							    idev->dev->ifindex,
2450							    cnf);
2451			}
2452			rtnl_unlock();
2453			rt_cache_flush(net);
2454		} else
2455			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2456						    NETCONFA_FORWARDING,
2457						    NETCONFA_IFINDEX_DEFAULT,
2458						    net->ipv4.devconf_dflt);
2459	}
2460
2461	return ret;
2462}
2463
2464static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2465				void *buffer, size_t *lenp, loff_t *ppos)
 
2466{
2467	int *valp = ctl->data;
2468	int val = *valp;
2469	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2470	struct net *net = ctl->extra2;
2471
2472	if (write && *valp != val)
2473		rt_cache_flush(net);
2474
2475	return ret;
2476}
2477
2478#define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2479	{ \
2480		.procname	= name, \
2481		.data		= ipv4_devconf.data + \
2482				  IPV4_DEVCONF_ ## attr - 1, \
2483		.maxlen		= sizeof(int), \
2484		.mode		= mval, \
2485		.proc_handler	= proc, \
2486		.extra1		= &ipv4_devconf, \
2487	}
2488
2489#define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2490	DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2491
2492#define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2493	DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2494
2495#define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2496	DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2497
2498#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2499	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2500
2501static struct devinet_sysctl_table {
2502	struct ctl_table_header *sysctl_header;
2503	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2504} devinet_sysctl = {
2505	.devinet_vars = {
2506		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2507					     devinet_sysctl_forward),
2508		DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2509		DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
2510
2511		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2512		DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2513		DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2514		DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2515		DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2516		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2517					"accept_source_route"),
2518		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2519		DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2520		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2521		DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2522		DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2523		DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2524		DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2525		DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2526		DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2527		DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2528		DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2529		DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2530		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2531		DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2532					"force_igmp_version"),
2533		DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2534					"igmpv2_unsolicited_report_interval"),
2535		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2536					"igmpv3_unsolicited_report_interval"),
2537		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2538					"ignore_routes_with_linkdown"),
2539		DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2540					"drop_gratuitous_arp"),
2541
2542		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2543		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2544		DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2545					      "promote_secondaries"),
2546		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2547					      "route_localnet"),
2548		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2549					      "drop_unicast_in_l2_multicast"),
2550	},
2551};
2552
2553static int __devinet_sysctl_register(struct net *net, char *dev_name,
2554				     int ifindex, struct ipv4_devconf *p)
2555{
2556	int i;
2557	struct devinet_sysctl_table *t;
2558	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2559
2560	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2561	if (!t)
2562		goto out;
2563
2564	for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2565		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2566		t->devinet_vars[i].extra1 = p;
2567		t->devinet_vars[i].extra2 = net;
2568	}
2569
2570	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2571
2572	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2573	if (!t->sysctl_header)
2574		goto free;
2575
2576	p->sysctl = t;
2577
2578	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
2579				    ifindex, p);
2580	return 0;
2581
2582free:
2583	kfree(t);
2584out:
2585	return -ENOBUFS;
2586}
2587
2588static void __devinet_sysctl_unregister(struct net *net,
2589					struct ipv4_devconf *cnf, int ifindex)
2590{
2591	struct devinet_sysctl_table *t = cnf->sysctl;
2592
2593	if (t) {
2594		cnf->sysctl = NULL;
2595		unregister_net_sysctl_table(t->sysctl_header);
2596		kfree(t);
2597	}
2598
2599	inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
2600}
2601
2602static int devinet_sysctl_register(struct in_device *idev)
2603{
2604	int err;
2605
2606	if (!sysctl_dev_name_is_allowed(idev->dev->name))
2607		return -EINVAL;
2608
2609	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2610	if (err)
2611		return err;
2612	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2613					idev->dev->ifindex, &idev->cnf);
2614	if (err)
2615		neigh_sysctl_unregister(idev->arp_parms);
2616	return err;
2617}
2618
2619static void devinet_sysctl_unregister(struct in_device *idev)
2620{
2621	struct net *net = dev_net(idev->dev);
2622
2623	__devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
2624	neigh_sysctl_unregister(idev->arp_parms);
2625}
2626
2627static struct ctl_table ctl_forward_entry[] = {
2628	{
2629		.procname	= "ip_forward",
2630		.data		= &ipv4_devconf.data[
2631					IPV4_DEVCONF_FORWARDING - 1],
2632		.maxlen		= sizeof(int),
2633		.mode		= 0644,
2634		.proc_handler	= devinet_sysctl_forward,
2635		.extra1		= &ipv4_devconf,
2636		.extra2		= &init_net,
2637	},
2638	{ },
2639};
2640#endif
2641
2642static __net_init int devinet_init_net(struct net *net)
2643{
2644	int err;
2645	struct ipv4_devconf *all, *dflt;
2646#ifdef CONFIG_SYSCTL
2647	struct ctl_table *tbl;
2648	struct ctl_table_header *forw_hdr;
2649#endif
2650
2651	err = -ENOMEM;
2652	all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
2653	if (!all)
2654		goto err_alloc_all;
2655
2656	dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2657	if (!dflt)
2658		goto err_alloc_dflt;
2659
2660#ifdef CONFIG_SYSCTL
2661	tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
2662	if (!tbl)
2663		goto err_alloc_ctl;
2664
2665	tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2666	tbl[0].extra1 = all;
2667	tbl[0].extra2 = net;
2668#endif
2669
2670	if (!net_eq(net, &init_net)) {
2671		if (IS_ENABLED(CONFIG_SYSCTL) &&
2672		    sysctl_devconf_inherit_init_net == 3) {
2673			/* copy from the current netns */
2674			memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
2675			       sizeof(ipv4_devconf));
2676			memcpy(dflt,
2677			       current->nsproxy->net_ns->ipv4.devconf_dflt,
2678			       sizeof(ipv4_devconf_dflt));
2679		} else if (!IS_ENABLED(CONFIG_SYSCTL) ||
2680			   sysctl_devconf_inherit_init_net != 2) {
2681			/* inherit == 0 or 1: copy from init_net */
2682			memcpy(all, init_net.ipv4.devconf_all,
2683			       sizeof(ipv4_devconf));
2684			memcpy(dflt, init_net.ipv4.devconf_dflt,
2685			       sizeof(ipv4_devconf_dflt));
2686		}
2687		/* else inherit == 2: use compiled values */
2688	}
2689
2690#ifdef CONFIG_SYSCTL
2691	err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2692	if (err < 0)
2693		goto err_reg_all;
2694
2695	err = __devinet_sysctl_register(net, "default",
2696					NETCONFA_IFINDEX_DEFAULT, dflt);
2697	if (err < 0)
2698		goto err_reg_dflt;
2699
2700	err = -ENOMEM;
2701	forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2702	if (!forw_hdr)
2703		goto err_reg_ctl;
2704	net->ipv4.forw_hdr = forw_hdr;
2705#endif
2706
2707	net->ipv4.devconf_all = all;
2708	net->ipv4.devconf_dflt = dflt;
2709	return 0;
2710
2711#ifdef CONFIG_SYSCTL
2712err_reg_ctl:
2713	__devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
2714err_reg_dflt:
2715	__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
2716err_reg_all:
2717	kfree(tbl);
2718err_alloc_ctl:
2719#endif
2720	kfree(dflt);
2721err_alloc_dflt:
2722	kfree(all);
2723err_alloc_all:
2724	return err;
2725}
2726
2727static __net_exit void devinet_exit_net(struct net *net)
2728{
2729#ifdef CONFIG_SYSCTL
2730	struct ctl_table *tbl;
2731
2732	tbl = net->ipv4.forw_hdr->ctl_table_arg;
2733	unregister_net_sysctl_table(net->ipv4.forw_hdr);
2734	__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
2735				    NETCONFA_IFINDEX_DEFAULT);
2736	__devinet_sysctl_unregister(net, net->ipv4.devconf_all,
2737				    NETCONFA_IFINDEX_ALL);
2738	kfree(tbl);
2739#endif
2740	kfree(net->ipv4.devconf_dflt);
2741	kfree(net->ipv4.devconf_all);
2742}
2743
2744static __net_initdata struct pernet_operations devinet_ops = {
2745	.init = devinet_init_net,
2746	.exit = devinet_exit_net,
2747};
2748
2749static struct rtnl_af_ops inet_af_ops __read_mostly = {
2750	.family		  = AF_INET,
2751	.fill_link_af	  = inet_fill_link_af,
2752	.get_link_af_size = inet_get_link_af_size,
2753	.validate_link_af = inet_validate_link_af,
2754	.set_link_af	  = inet_set_link_af,
2755};
2756
2757void __init devinet_init(void)
2758{
2759	int i;
2760
2761	for (i = 0; i < IN4_ADDR_HSIZE; i++)
2762		INIT_HLIST_HEAD(&inet_addr_lst[i]);
2763
2764	register_pernet_subsys(&devinet_ops);
2765
2766	register_gifconf(PF_INET, inet_gifconf);
2767	register_netdevice_notifier(&ip_netdev_notifier);
2768
2769	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2770
2771	rtnl_af_register(&inet_af_ops);
2772
2773	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
2774	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
2775	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
2776	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2777		      inet_netconf_dump_devconf, 0);
2778}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	NET3	IP device support routines.
   4 *
   5 *	Derived from the IP parts of dev.c 1.0.19
   6 * 		Authors:	Ross Biro
   7 *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   8 *				Mark Evans, <evansmp@uhura.aston.ac.uk>
   9 *
  10 *	Additional Authors:
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  13 *
  14 *	Changes:
  15 *		Alexey Kuznetsov:	pa_* fields are replaced with ifaddr
  16 *					lists.
  17 *		Cyrus Durgin:		updated for kmod
  18 *		Matthias Andree:	in devinet_ioctl, compare label and
  19 *					address (4.4BSD alias style support),
  20 *					fall back to comparing just the label
  21 *					if no match found.
  22 */
  23
  24
  25#include <linux/uaccess.h>
  26#include <linux/bitops.h>
  27#include <linux/capability.h>
  28#include <linux/module.h>
  29#include <linux/types.h>
  30#include <linux/kernel.h>
  31#include <linux/sched/signal.h>
  32#include <linux/string.h>
  33#include <linux/mm.h>
  34#include <linux/socket.h>
  35#include <linux/sockios.h>
  36#include <linux/in.h>
  37#include <linux/errno.h>
  38#include <linux/interrupt.h>
  39#include <linux/if_addr.h>
  40#include <linux/if_ether.h>
  41#include <linux/inet.h>
  42#include <linux/netdevice.h>
  43#include <linux/etherdevice.h>
  44#include <linux/skbuff.h>
  45#include <linux/init.h>
  46#include <linux/notifier.h>
  47#include <linux/inetdevice.h>
  48#include <linux/igmp.h>
  49#include <linux/slab.h>
  50#include <linux/hash.h>
  51#ifdef CONFIG_SYSCTL
  52#include <linux/sysctl.h>
  53#endif
  54#include <linux/kmod.h>
  55#include <linux/netconf.h>
  56
  57#include <net/arp.h>
  58#include <net/ip.h>
  59#include <net/route.h>
  60#include <net/ip_fib.h>
  61#include <net/rtnetlink.h>
  62#include <net/net_namespace.h>
  63#include <net/addrconf.h>
  64
  65#define IPV6ONLY_FLAGS	\
  66		(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
  67		 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
  68		 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
  69
  70static struct ipv4_devconf ipv4_devconf = {
  71	.data = {
  72		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  73		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  74		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  75		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  76		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  77		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
  78	},
  79};
  80
  81static struct ipv4_devconf ipv4_devconf_dflt = {
  82	.data = {
  83		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  84		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  85		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  86		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  87		[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
  88		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  89		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
  90	},
  91};
  92
  93#define IPV4_DEVCONF_DFLT(net, attr) \
  94	IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
  95
  96static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
  97	[IFA_LOCAL]     	= { .type = NLA_U32 },
  98	[IFA_ADDRESS]   	= { .type = NLA_U32 },
  99	[IFA_BROADCAST] 	= { .type = NLA_U32 },
 100	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 101	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
 102	[IFA_FLAGS]		= { .type = NLA_U32 },
 103	[IFA_RT_PRIORITY]	= { .type = NLA_U32 },
 104	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
 105};
 106
 107struct inet_fill_args {
 108	u32 portid;
 109	u32 seq;
 110	int event;
 111	unsigned int flags;
 112	int netnsid;
 113	int ifindex;
 114};
 115
 116#define IN4_ADDR_HSIZE_SHIFT	8
 117#define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
 118
 119static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 120
 121static u32 inet_addr_hash(const struct net *net, __be32 addr)
 122{
 123	u32 val = (__force u32) addr ^ net_hash_mix(net);
 124
 125	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 126}
 127
 128static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 129{
 130	u32 hash = inet_addr_hash(net, ifa->ifa_local);
 131
 132	ASSERT_RTNL();
 133	hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
 134}
 135
 136static void inet_hash_remove(struct in_ifaddr *ifa)
 137{
 138	ASSERT_RTNL();
 139	hlist_del_init_rcu(&ifa->hash);
 140}
 141
 142/**
 143 * __ip_dev_find - find the first device with a given source address.
 144 * @net: the net namespace
 145 * @addr: the source address
 146 * @devref: if true, take a reference on the found device
 147 *
 148 * If a caller uses devref=false, it should be protected by RCU, or RTNL
 149 */
 150struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 151{
 152	struct net_device *result = NULL;
 153	struct in_ifaddr *ifa;
 154
 155	rcu_read_lock();
 156	ifa = inet_lookup_ifaddr_rcu(net, addr);
 157	if (!ifa) {
 158		struct flowi4 fl4 = { .daddr = addr };
 159		struct fib_result res = { 0 };
 160		struct fib_table *local;
 161
 162		/* Fallback to FIB local table so that communication
 163		 * over loopback subnets work.
 164		 */
 165		local = fib_get_table(net, RT_TABLE_LOCAL);
 166		if (local &&
 167		    !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
 168		    res.type == RTN_LOCAL)
 169			result = FIB_RES_DEV(res);
 170	} else {
 171		result = ifa->ifa_dev->dev;
 172	}
 173	if (result && devref)
 174		dev_hold(result);
 175	rcu_read_unlock();
 176	return result;
 177}
 178EXPORT_SYMBOL(__ip_dev_find);
 179
 180/* called under RCU lock */
 181struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
 182{
 183	u32 hash = inet_addr_hash(net, addr);
 184	struct in_ifaddr *ifa;
 185
 186	hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash)
 187		if (ifa->ifa_local == addr &&
 188		    net_eq(dev_net(ifa->ifa_dev->dev), net))
 189			return ifa;
 190
 191	return NULL;
 192}
 193
 194static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 195
 196static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
 197static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
 198static void inet_del_ifa(struct in_device *in_dev,
 199			 struct in_ifaddr __rcu **ifap,
 200			 int destroy);
 201#ifdef CONFIG_SYSCTL
 202static int devinet_sysctl_register(struct in_device *idev);
 203static void devinet_sysctl_unregister(struct in_device *idev);
 204#else
 205static int devinet_sysctl_register(struct in_device *idev)
 206{
 207	return 0;
 208}
 209static void devinet_sysctl_unregister(struct in_device *idev)
 210{
 211}
 212#endif
 213
 214/* Locks all the inet devices. */
 215
 216static struct in_ifaddr *inet_alloc_ifa(void)
 217{
 218	return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
 219}
 220
 221static void inet_rcu_free_ifa(struct rcu_head *head)
 222{
 223	struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
 224	if (ifa->ifa_dev)
 225		in_dev_put(ifa->ifa_dev);
 226	kfree(ifa);
 227}
 228
 229static void inet_free_ifa(struct in_ifaddr *ifa)
 230{
 231	call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 232}
 233
 234void in_dev_finish_destroy(struct in_device *idev)
 235{
 236	struct net_device *dev = idev->dev;
 237
 238	WARN_ON(idev->ifa_list);
 239	WARN_ON(idev->mc_list);
 240	kfree(rcu_dereference_protected(idev->mc_hash, 1));
 241#ifdef NET_REFCNT_DEBUG
 242	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
 243#endif
 244	dev_put(dev);
 245	if (!idev->dead)
 246		pr_err("Freeing alive in_device %p\n", idev);
 247	else
 248		kfree(idev);
 249}
 250EXPORT_SYMBOL(in_dev_finish_destroy);
 251
 252static struct in_device *inetdev_init(struct net_device *dev)
 253{
 254	struct in_device *in_dev;
 255	int err = -ENOMEM;
 256
 257	ASSERT_RTNL();
 258
 259	in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
 260	if (!in_dev)
 261		goto out;
 262	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
 263			sizeof(in_dev->cnf));
 264	in_dev->cnf.sysctl = NULL;
 265	in_dev->dev = dev;
 266	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
 267	if (!in_dev->arp_parms)
 268		goto out_kfree;
 269	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
 270		dev_disable_lro(dev);
 271	/* Reference in_dev->dev */
 272	dev_hold(dev);
 273	/* Account for reference dev->ip_ptr (below) */
 274	refcount_set(&in_dev->refcnt, 1);
 275
 276	err = devinet_sysctl_register(in_dev);
 277	if (err) {
 278		in_dev->dead = 1;
 
 279		in_dev_put(in_dev);
 280		in_dev = NULL;
 281		goto out;
 282	}
 283	ip_mc_init_dev(in_dev);
 284	if (dev->flags & IFF_UP)
 285		ip_mc_up(in_dev);
 286
 287	/* we can receive as soon as ip_ptr is set -- do this last */
 288	rcu_assign_pointer(dev->ip_ptr, in_dev);
 289out:
 290	return in_dev ?: ERR_PTR(err);
 291out_kfree:
 292	kfree(in_dev);
 293	in_dev = NULL;
 294	goto out;
 295}
 296
 297static void in_dev_rcu_put(struct rcu_head *head)
 298{
 299	struct in_device *idev = container_of(head, struct in_device, rcu_head);
 300	in_dev_put(idev);
 301}
 302
 303static void inetdev_destroy(struct in_device *in_dev)
 304{
 305	struct net_device *dev;
 306	struct in_ifaddr *ifa;
 307
 308	ASSERT_RTNL();
 309
 310	dev = in_dev->dev;
 311
 312	in_dev->dead = 1;
 313
 314	ip_mc_destroy_dev(in_dev);
 315
 316	while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
 317		inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
 318		inet_free_ifa(ifa);
 319	}
 320
 321	RCU_INIT_POINTER(dev->ip_ptr, NULL);
 322
 323	devinet_sysctl_unregister(in_dev);
 324	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
 325	arp_ifdown(dev);
 326
 327	call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
 328}
 329
 330int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
 331{
 332	const struct in_ifaddr *ifa;
 333
 334	rcu_read_lock();
 335	in_dev_for_each_ifa_rcu(ifa, in_dev) {
 336		if (inet_ifa_match(a, ifa)) {
 337			if (!b || inet_ifa_match(b, ifa)) {
 338				rcu_read_unlock();
 339				return 1;
 340			}
 341		}
 342	}
 343	rcu_read_unlock();
 344	return 0;
 345}
 346
 347static void __inet_del_ifa(struct in_device *in_dev,
 348			   struct in_ifaddr __rcu **ifap,
 349			   int destroy, struct nlmsghdr *nlh, u32 portid)
 350{
 351	struct in_ifaddr *promote = NULL;
 352	struct in_ifaddr *ifa, *ifa1;
 353	struct in_ifaddr *last_prim;
 354	struct in_ifaddr *prev_prom = NULL;
 355	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
 356
 357	ASSERT_RTNL();
 358
 359	ifa1 = rtnl_dereference(*ifap);
 360	last_prim = rtnl_dereference(in_dev->ifa_list);
 361	if (in_dev->dead)
 362		goto no_promotions;
 363
 364	/* 1. Deleting primary ifaddr forces deletion all secondaries
 365	 * unless alias promotion is set
 366	 **/
 367
 368	if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
 369		struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
 370
 371		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
 372			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
 373			    ifa1->ifa_scope <= ifa->ifa_scope)
 374				last_prim = ifa;
 375
 376			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
 377			    ifa1->ifa_mask != ifa->ifa_mask ||
 378			    !inet_ifa_match(ifa1->ifa_address, ifa)) {
 379				ifap1 = &ifa->ifa_next;
 380				prev_prom = ifa;
 381				continue;
 382			}
 383
 384			if (!do_promote) {
 385				inet_hash_remove(ifa);
 386				*ifap1 = ifa->ifa_next;
 387
 388				rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
 389				blocking_notifier_call_chain(&inetaddr_chain,
 390						NETDEV_DOWN, ifa);
 391				inet_free_ifa(ifa);
 392			} else {
 393				promote = ifa;
 394				break;
 395			}
 396		}
 397	}
 398
 399	/* On promotion all secondaries from subnet are changing
 400	 * the primary IP, we must remove all their routes silently
 401	 * and later to add them back with new prefsrc. Do this
 402	 * while all addresses are on the device list.
 403	 */
 404	for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
 405		if (ifa1->ifa_mask == ifa->ifa_mask &&
 406		    inet_ifa_match(ifa1->ifa_address, ifa))
 407			fib_del_ifaddr(ifa, ifa1);
 408	}
 409
 410no_promotions:
 411	/* 2. Unlink it */
 412
 413	*ifap = ifa1->ifa_next;
 414	inet_hash_remove(ifa1);
 415
 416	/* 3. Announce address deletion */
 417
 418	/* Send message first, then call notifier.
 419	   At first sight, FIB update triggered by notifier
 420	   will refer to already deleted ifaddr, that could confuse
 421	   netlink listeners. It is not true: look, gated sees
 422	   that route deleted and if it still thinks that ifaddr
 423	   is valid, it will try to restore deleted routes... Grr.
 424	   So that, this order is correct.
 425	 */
 426	rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
 427	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
 428
 429	if (promote) {
 430		struct in_ifaddr *next_sec;
 431
 432		next_sec = rtnl_dereference(promote->ifa_next);
 433		if (prev_prom) {
 434			struct in_ifaddr *last_sec;
 435
 436			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
 437
 438			last_sec = rtnl_dereference(last_prim->ifa_next);
 439			rcu_assign_pointer(promote->ifa_next, last_sec);
 440			rcu_assign_pointer(last_prim->ifa_next, promote);
 441		}
 442
 443		promote->ifa_flags &= ~IFA_F_SECONDARY;
 444		rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
 445		blocking_notifier_call_chain(&inetaddr_chain,
 446				NETDEV_UP, promote);
 447		for (ifa = next_sec; ifa;
 448		     ifa = rtnl_dereference(ifa->ifa_next)) {
 449			if (ifa1->ifa_mask != ifa->ifa_mask ||
 450			    !inet_ifa_match(ifa1->ifa_address, ifa))
 451					continue;
 452			fib_add_ifaddr(ifa);
 453		}
 454
 455	}
 456	if (destroy)
 457		inet_free_ifa(ifa1);
 458}
 459
 460static void inet_del_ifa(struct in_device *in_dev,
 461			 struct in_ifaddr __rcu **ifap,
 462			 int destroy)
 463{
 464	__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
 465}
 466
 467static void check_lifetime(struct work_struct *work);
 468
 469static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
 470
 471static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
 472			     u32 portid, struct netlink_ext_ack *extack)
 473{
 474	struct in_ifaddr __rcu **last_primary, **ifap;
 475	struct in_device *in_dev = ifa->ifa_dev;
 476	struct in_validator_info ivi;
 477	struct in_ifaddr *ifa1;
 478	int ret;
 479
 480	ASSERT_RTNL();
 481
 482	if (!ifa->ifa_local) {
 483		inet_free_ifa(ifa);
 484		return 0;
 485	}
 486
 487	ifa->ifa_flags &= ~IFA_F_SECONDARY;
 488	last_primary = &in_dev->ifa_list;
 489
 490	/* Don't set IPv6 only flags to IPv4 addresses */
 491	ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
 492
 493	ifap = &in_dev->ifa_list;
 494	ifa1 = rtnl_dereference(*ifap);
 495
 496	while (ifa1) {
 497		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
 498		    ifa->ifa_scope <= ifa1->ifa_scope)
 499			last_primary = &ifa1->ifa_next;
 500		if (ifa1->ifa_mask == ifa->ifa_mask &&
 501		    inet_ifa_match(ifa1->ifa_address, ifa)) {
 502			if (ifa1->ifa_local == ifa->ifa_local) {
 503				inet_free_ifa(ifa);
 504				return -EEXIST;
 505			}
 506			if (ifa1->ifa_scope != ifa->ifa_scope) {
 507				inet_free_ifa(ifa);
 508				return -EINVAL;
 509			}
 510			ifa->ifa_flags |= IFA_F_SECONDARY;
 511		}
 512
 513		ifap = &ifa1->ifa_next;
 514		ifa1 = rtnl_dereference(*ifap);
 515	}
 516
 517	/* Allow any devices that wish to register ifaddr validtors to weigh
 518	 * in now, before changes are committed.  The rntl lock is serializing
 519	 * access here, so the state should not change between a validator call
 520	 * and a final notify on commit.  This isn't invoked on promotion under
 521	 * the assumption that validators are checking the address itself, and
 522	 * not the flags.
 523	 */
 524	ivi.ivi_addr = ifa->ifa_address;
 525	ivi.ivi_dev = ifa->ifa_dev;
 526	ivi.extack = extack;
 527	ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
 528					   NETDEV_UP, &ivi);
 529	ret = notifier_to_errno(ret);
 530	if (ret) {
 531		inet_free_ifa(ifa);
 532		return ret;
 533	}
 534
 535	if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
 536		prandom_seed((__force u32) ifa->ifa_local);
 537		ifap = last_primary;
 538	}
 539
 540	rcu_assign_pointer(ifa->ifa_next, *ifap);
 541	rcu_assign_pointer(*ifap, ifa);
 542
 543	inet_hash_insert(dev_net(in_dev->dev), ifa);
 544
 545	cancel_delayed_work(&check_lifetime_work);
 546	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
 547
 548	/* Send message first, then call notifier.
 549	   Notifier will trigger FIB update, so that
 550	   listeners of netlink will know about new ifaddr */
 551	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
 552	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
 553
 554	return 0;
 555}
 556
 557static int inet_insert_ifa(struct in_ifaddr *ifa)
 558{
 559	return __inet_insert_ifa(ifa, NULL, 0, NULL);
 560}
 561
 562static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
 563{
 564	struct in_device *in_dev = __in_dev_get_rtnl(dev);
 565
 566	ASSERT_RTNL();
 567
 568	if (!in_dev) {
 569		inet_free_ifa(ifa);
 570		return -ENOBUFS;
 571	}
 572	ipv4_devconf_setall(in_dev);
 573	neigh_parms_data_state_setall(in_dev->arp_parms);
 574	if (ifa->ifa_dev != in_dev) {
 575		WARN_ON(ifa->ifa_dev);
 576		in_dev_hold(in_dev);
 577		ifa->ifa_dev = in_dev;
 578	}
 579	if (ipv4_is_loopback(ifa->ifa_local))
 580		ifa->ifa_scope = RT_SCOPE_HOST;
 581	return inet_insert_ifa(ifa);
 582}
 583
 584/* Caller must hold RCU or RTNL :
 585 * We dont take a reference on found in_device
 586 */
 587struct in_device *inetdev_by_index(struct net *net, int ifindex)
 588{
 589	struct net_device *dev;
 590	struct in_device *in_dev = NULL;
 591
 592	rcu_read_lock();
 593	dev = dev_get_by_index_rcu(net, ifindex);
 594	if (dev)
 595		in_dev = rcu_dereference_rtnl(dev->ip_ptr);
 596	rcu_read_unlock();
 597	return in_dev;
 598}
 599EXPORT_SYMBOL(inetdev_by_index);
 600
 601/* Called only from RTNL semaphored context. No locks. */
 602
 603struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
 604				    __be32 mask)
 605{
 606	struct in_ifaddr *ifa;
 607
 608	ASSERT_RTNL();
 609
 610	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
 611		if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
 612			return ifa;
 613	}
 614	return NULL;
 615}
 616
 617static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
 
 618{
 
 619	struct ip_mreqn mreq = {
 620		.imr_multiaddr.s_addr = ifa->ifa_address,
 621		.imr_ifindex = ifa->ifa_dev->dev->ifindex,
 622	};
 
 623	int ret;
 624
 625	ASSERT_RTNL();
 626
 627	lock_sock(sk);
 628	if (join)
 629		ret = ip_mc_join_group(sk, &mreq);
 630	else
 631		ret = ip_mc_leave_group(sk, &mreq);
 632	release_sock(sk);
 633
 634	return ret;
 
 
 
 635}
 636
 637static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
 638			    struct netlink_ext_ack *extack)
 639{
 640	struct net *net = sock_net(skb->sk);
 641	struct in_ifaddr __rcu **ifap;
 642	struct nlattr *tb[IFA_MAX+1];
 643	struct in_device *in_dev;
 644	struct ifaddrmsg *ifm;
 645	struct in_ifaddr *ifa;
 646
 647	int err = -EINVAL;
 648
 649	ASSERT_RTNL();
 650
 651	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 652				     ifa_ipv4_policy, extack);
 653	if (err < 0)
 654		goto errout;
 655
 656	ifm = nlmsg_data(nlh);
 657	in_dev = inetdev_by_index(net, ifm->ifa_index);
 658	if (!in_dev) {
 659		err = -ENODEV;
 660		goto errout;
 661	}
 662
 663	for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL;
 664	     ifap = &ifa->ifa_next) {
 665		if (tb[IFA_LOCAL] &&
 666		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
 667			continue;
 668
 669		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
 670			continue;
 671
 672		if (tb[IFA_ADDRESS] &&
 673		    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
 674		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
 675			continue;
 676
 677		if (ipv4_is_multicast(ifa->ifa_address))
 678			ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
 679		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
 680		return 0;
 681	}
 682
 683	err = -EADDRNOTAVAIL;
 684errout:
 685	return err;
 686}
 687
 688#define INFINITY_LIFE_TIME	0xFFFFFFFF
 689
 690static void check_lifetime(struct work_struct *work)
 691{
 692	unsigned long now, next, next_sec, next_sched;
 693	struct in_ifaddr *ifa;
 694	struct hlist_node *n;
 695	int i;
 696
 697	now = jiffies;
 698	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
 699
 700	for (i = 0; i < IN4_ADDR_HSIZE; i++) {
 701		bool change_needed = false;
 702
 703		rcu_read_lock();
 704		hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
 705			unsigned long age;
 706
 707			if (ifa->ifa_flags & IFA_F_PERMANENT)
 708				continue;
 709
 710			/* We try to batch several events at once. */
 711			age = (now - ifa->ifa_tstamp +
 712			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 713
 714			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
 715			    age >= ifa->ifa_valid_lft) {
 716				change_needed = true;
 717			} else if (ifa->ifa_preferred_lft ==
 718				   INFINITY_LIFE_TIME) {
 719				continue;
 720			} else if (age >= ifa->ifa_preferred_lft) {
 721				if (time_before(ifa->ifa_tstamp +
 722						ifa->ifa_valid_lft * HZ, next))
 723					next = ifa->ifa_tstamp +
 724					       ifa->ifa_valid_lft * HZ;
 725
 726				if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
 727					change_needed = true;
 728			} else if (time_before(ifa->ifa_tstamp +
 729					       ifa->ifa_preferred_lft * HZ,
 730					       next)) {
 731				next = ifa->ifa_tstamp +
 732				       ifa->ifa_preferred_lft * HZ;
 733			}
 734		}
 735		rcu_read_unlock();
 736		if (!change_needed)
 737			continue;
 738		rtnl_lock();
 739		hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
 740			unsigned long age;
 741
 742			if (ifa->ifa_flags & IFA_F_PERMANENT)
 743				continue;
 744
 745			/* We try to batch several events at once. */
 746			age = (now - ifa->ifa_tstamp +
 747			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 748
 749			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
 750			    age >= ifa->ifa_valid_lft) {
 751				struct in_ifaddr __rcu **ifap;
 752				struct in_ifaddr *tmp;
 753
 754				ifap = &ifa->ifa_dev->ifa_list;
 755				tmp = rtnl_dereference(*ifap);
 756				while (tmp) {
 757					if (tmp == ifa) {
 758						inet_del_ifa(ifa->ifa_dev,
 759							     ifap, 1);
 760						break;
 761					}
 762					ifap = &tmp->ifa_next;
 763					tmp = rtnl_dereference(*ifap);
 764				}
 765			} else if (ifa->ifa_preferred_lft !=
 766				   INFINITY_LIFE_TIME &&
 767				   age >= ifa->ifa_preferred_lft &&
 768				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
 769				ifa->ifa_flags |= IFA_F_DEPRECATED;
 770				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
 771			}
 772		}
 773		rtnl_unlock();
 774	}
 775
 776	next_sec = round_jiffies_up(next);
 777	next_sched = next;
 778
 779	/* If rounded timeout is accurate enough, accept it. */
 780	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
 781		next_sched = next_sec;
 782
 783	now = jiffies;
 784	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
 785	if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
 786		next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
 787
 788	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
 789			next_sched - now);
 790}
 791
 792static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
 793			     __u32 prefered_lft)
 794{
 795	unsigned long timeout;
 796
 797	ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
 798
 799	timeout = addrconf_timeout_fixup(valid_lft, HZ);
 800	if (addrconf_finite_timeout(timeout))
 801		ifa->ifa_valid_lft = timeout;
 802	else
 803		ifa->ifa_flags |= IFA_F_PERMANENT;
 804
 805	timeout = addrconf_timeout_fixup(prefered_lft, HZ);
 806	if (addrconf_finite_timeout(timeout)) {
 807		if (timeout == 0)
 808			ifa->ifa_flags |= IFA_F_DEPRECATED;
 809		ifa->ifa_preferred_lft = timeout;
 810	}
 811	ifa->ifa_tstamp = jiffies;
 812	if (!ifa->ifa_cstamp)
 813		ifa->ifa_cstamp = ifa->ifa_tstamp;
 814}
 815
 816static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
 817				       __u32 *pvalid_lft, __u32 *pprefered_lft,
 818				       struct netlink_ext_ack *extack)
 819{
 820	struct nlattr *tb[IFA_MAX+1];
 821	struct in_ifaddr *ifa;
 822	struct ifaddrmsg *ifm;
 823	struct net_device *dev;
 824	struct in_device *in_dev;
 825	int err;
 826
 827	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 828				     ifa_ipv4_policy, extack);
 829	if (err < 0)
 830		goto errout;
 831
 832	ifm = nlmsg_data(nlh);
 833	err = -EINVAL;
 834	if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
 835		goto errout;
 836
 837	dev = __dev_get_by_index(net, ifm->ifa_index);
 838	err = -ENODEV;
 839	if (!dev)
 840		goto errout;
 841
 842	in_dev = __in_dev_get_rtnl(dev);
 843	err = -ENOBUFS;
 844	if (!in_dev)
 845		goto errout;
 846
 847	ifa = inet_alloc_ifa();
 848	if (!ifa)
 849		/*
 850		 * A potential indev allocation can be left alive, it stays
 851		 * assigned to its device and is destroy with it.
 852		 */
 853		goto errout;
 854
 855	ipv4_devconf_setall(in_dev);
 856	neigh_parms_data_state_setall(in_dev->arp_parms);
 857	in_dev_hold(in_dev);
 858
 859	if (!tb[IFA_ADDRESS])
 860		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 861
 862	INIT_HLIST_NODE(&ifa->hash);
 863	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
 864	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
 865	ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
 866					 ifm->ifa_flags;
 867	ifa->ifa_scope = ifm->ifa_scope;
 868	ifa->ifa_dev = in_dev;
 869
 870	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
 871	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
 872
 873	if (tb[IFA_BROADCAST])
 874		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
 875
 876	if (tb[IFA_LABEL])
 877		nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
 878	else
 879		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
 880
 881	if (tb[IFA_RT_PRIORITY])
 882		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
 883
 884	if (tb[IFA_CACHEINFO]) {
 885		struct ifa_cacheinfo *ci;
 886
 887		ci = nla_data(tb[IFA_CACHEINFO]);
 888		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
 889			err = -EINVAL;
 890			goto errout_free;
 891		}
 892		*pvalid_lft = ci->ifa_valid;
 893		*pprefered_lft = ci->ifa_prefered;
 894	}
 895
 896	return ifa;
 897
 898errout_free:
 899	inet_free_ifa(ifa);
 900errout:
 901	return ERR_PTR(err);
 902}
 903
 904static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
 905{
 906	struct in_device *in_dev = ifa->ifa_dev;
 907	struct in_ifaddr *ifa1;
 908
 909	if (!ifa->ifa_local)
 910		return NULL;
 911
 912	in_dev_for_each_ifa_rtnl(ifa1, in_dev) {
 913		if (ifa1->ifa_mask == ifa->ifa_mask &&
 914		    inet_ifa_match(ifa1->ifa_address, ifa) &&
 915		    ifa1->ifa_local == ifa->ifa_local)
 916			return ifa1;
 917	}
 918	return NULL;
 919}
 920
 921static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
 922			    struct netlink_ext_ack *extack)
 923{
 924	struct net *net = sock_net(skb->sk);
 925	struct in_ifaddr *ifa;
 926	struct in_ifaddr *ifa_existing;
 927	__u32 valid_lft = INFINITY_LIFE_TIME;
 928	__u32 prefered_lft = INFINITY_LIFE_TIME;
 929
 930	ASSERT_RTNL();
 931
 932	ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
 933	if (IS_ERR(ifa))
 934		return PTR_ERR(ifa);
 935
 936	ifa_existing = find_matching_ifa(ifa);
 937	if (!ifa_existing) {
 938		/* It would be best to check for !NLM_F_CREATE here but
 939		 * userspace already relies on not having to provide this.
 940		 */
 941		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
 942		if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
 943			int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
 944					       true, ifa);
 945
 946			if (ret < 0) {
 947				inet_free_ifa(ifa);
 948				return ret;
 949			}
 950		}
 951		return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid,
 952					 extack);
 953	} else {
 954		u32 new_metric = ifa->ifa_rt_priority;
 955
 956		inet_free_ifa(ifa);
 957
 958		if (nlh->nlmsg_flags & NLM_F_EXCL ||
 959		    !(nlh->nlmsg_flags & NLM_F_REPLACE))
 960			return -EEXIST;
 961		ifa = ifa_existing;
 962
 963		if (ifa->ifa_rt_priority != new_metric) {
 964			fib_modify_prefix_metric(ifa, new_metric);
 965			ifa->ifa_rt_priority = new_metric;
 966		}
 967
 968		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
 969		cancel_delayed_work(&check_lifetime_work);
 970		queue_delayed_work(system_power_efficient_wq,
 971				&check_lifetime_work, 0);
 972		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
 973	}
 974	return 0;
 975}
 976
 977/*
 978 *	Determine a default network mask, based on the IP address.
 979 */
 980
 981static int inet_abc_len(__be32 addr)
 982{
 983	int rc = -1;	/* Something else, probably a multicast. */
 984
 985	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
 986		rc = 0;
 987	else {
 988		__u32 haddr = ntohl(addr);
 989		if (IN_CLASSA(haddr))
 990			rc = 8;
 991		else if (IN_CLASSB(haddr))
 992			rc = 16;
 993		else if (IN_CLASSC(haddr))
 994			rc = 24;
 995		else if (IN_CLASSE(haddr))
 996			rc = 32;
 997	}
 998
 999	return rc;
1000}
1001
1002
1003int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
1004{
1005	struct sockaddr_in sin_orig;
1006	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
1007	struct in_ifaddr __rcu **ifap = NULL;
1008	struct in_device *in_dev;
1009	struct in_ifaddr *ifa = NULL;
1010	struct net_device *dev;
1011	char *colon;
1012	int ret = -EFAULT;
1013	int tryaddrmatch = 0;
1014
1015	ifr->ifr_name[IFNAMSIZ - 1] = 0;
1016
1017	/* save original address for comparison */
1018	memcpy(&sin_orig, sin, sizeof(*sin));
1019
1020	colon = strchr(ifr->ifr_name, ':');
1021	if (colon)
1022		*colon = 0;
1023
1024	dev_load(net, ifr->ifr_name);
1025
1026	switch (cmd) {
1027	case SIOCGIFADDR:	/* Get interface address */
1028	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1029	case SIOCGIFDSTADDR:	/* Get the destination address */
1030	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1031		/* Note that these ioctls will not sleep,
1032		   so that we do not impose a lock.
1033		   One day we will be forced to put shlock here (I mean SMP)
1034		 */
1035		tryaddrmatch = (sin_orig.sin_family == AF_INET);
1036		memset(sin, 0, sizeof(*sin));
1037		sin->sin_family = AF_INET;
1038		break;
1039
1040	case SIOCSIFFLAGS:
1041		ret = -EPERM;
1042		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1043			goto out;
1044		break;
1045	case SIOCSIFADDR:	/* Set interface address (and family) */
1046	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1047	case SIOCSIFDSTADDR:	/* Set the destination address */
1048	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1049		ret = -EPERM;
1050		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1051			goto out;
1052		ret = -EINVAL;
1053		if (sin->sin_family != AF_INET)
1054			goto out;
1055		break;
1056	default:
1057		ret = -EINVAL;
1058		goto out;
1059	}
1060
1061	rtnl_lock();
1062
1063	ret = -ENODEV;
1064	dev = __dev_get_by_name(net, ifr->ifr_name);
1065	if (!dev)
1066		goto done;
1067
1068	if (colon)
1069		*colon = ':';
1070
1071	in_dev = __in_dev_get_rtnl(dev);
1072	if (in_dev) {
1073		if (tryaddrmatch) {
1074			/* Matthias Andree */
1075			/* compare label and address (4.4BSD style) */
1076			/* note: we only do this for a limited set of ioctls
1077			   and only if the original address family was AF_INET.
1078			   This is checked above. */
1079
1080			for (ifap = &in_dev->ifa_list;
1081			     (ifa = rtnl_dereference(*ifap)) != NULL;
1082			     ifap = &ifa->ifa_next) {
1083				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
1084				    sin_orig.sin_addr.s_addr ==
1085							ifa->ifa_local) {
1086					break; /* found */
1087				}
1088			}
1089		}
1090		/* we didn't get a match, maybe the application is
1091		   4.3BSD-style and passed in junk so we fall back to
1092		   comparing just the label */
1093		if (!ifa) {
1094			for (ifap = &in_dev->ifa_list;
1095			     (ifa = rtnl_dereference(*ifap)) != NULL;
1096			     ifap = &ifa->ifa_next)
1097				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
1098					break;
1099		}
1100	}
1101
1102	ret = -EADDRNOTAVAIL;
1103	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1104		goto done;
1105
1106	switch (cmd) {
1107	case SIOCGIFADDR:	/* Get interface address */
1108		ret = 0;
1109		sin->sin_addr.s_addr = ifa->ifa_local;
1110		break;
1111
1112	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1113		ret = 0;
1114		sin->sin_addr.s_addr = ifa->ifa_broadcast;
1115		break;
1116
1117	case SIOCGIFDSTADDR:	/* Get the destination address */
1118		ret = 0;
1119		sin->sin_addr.s_addr = ifa->ifa_address;
1120		break;
1121
1122	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1123		ret = 0;
1124		sin->sin_addr.s_addr = ifa->ifa_mask;
1125		break;
1126
1127	case SIOCSIFFLAGS:
1128		if (colon) {
1129			ret = -EADDRNOTAVAIL;
1130			if (!ifa)
1131				break;
1132			ret = 0;
1133			if (!(ifr->ifr_flags & IFF_UP))
1134				inet_del_ifa(in_dev, ifap, 1);
1135			break;
1136		}
1137		ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
1138		break;
1139
1140	case SIOCSIFADDR:	/* Set interface address (and family) */
1141		ret = -EINVAL;
1142		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1143			break;
1144
1145		if (!ifa) {
1146			ret = -ENOBUFS;
1147			ifa = inet_alloc_ifa();
1148			if (!ifa)
1149				break;
1150			INIT_HLIST_NODE(&ifa->hash);
1151			if (colon)
1152				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
1153			else
1154				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1155		} else {
1156			ret = 0;
1157			if (ifa->ifa_local == sin->sin_addr.s_addr)
1158				break;
1159			inet_del_ifa(in_dev, ifap, 0);
1160			ifa->ifa_broadcast = 0;
1161			ifa->ifa_scope = 0;
1162		}
1163
1164		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1165
1166		if (!(dev->flags & IFF_POINTOPOINT)) {
1167			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1168			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1169			if ((dev->flags & IFF_BROADCAST) &&
1170			    ifa->ifa_prefixlen < 31)
1171				ifa->ifa_broadcast = ifa->ifa_address |
1172						     ~ifa->ifa_mask;
1173		} else {
1174			ifa->ifa_prefixlen = 32;
1175			ifa->ifa_mask = inet_make_mask(32);
1176		}
1177		set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1178		ret = inet_set_ifa(dev, ifa);
1179		break;
1180
1181	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1182		ret = 0;
1183		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1184			inet_del_ifa(in_dev, ifap, 0);
1185			ifa->ifa_broadcast = sin->sin_addr.s_addr;
1186			inet_insert_ifa(ifa);
1187		}
1188		break;
1189
1190	case SIOCSIFDSTADDR:	/* Set the destination address */
1191		ret = 0;
1192		if (ifa->ifa_address == sin->sin_addr.s_addr)
1193			break;
1194		ret = -EINVAL;
1195		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1196			break;
1197		ret = 0;
1198		inet_del_ifa(in_dev, ifap, 0);
1199		ifa->ifa_address = sin->sin_addr.s_addr;
1200		inet_insert_ifa(ifa);
1201		break;
1202
1203	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1204
1205		/*
1206		 *	The mask we set must be legal.
1207		 */
1208		ret = -EINVAL;
1209		if (bad_mask(sin->sin_addr.s_addr, 0))
1210			break;
1211		ret = 0;
1212		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1213			__be32 old_mask = ifa->ifa_mask;
1214			inet_del_ifa(in_dev, ifap, 0);
1215			ifa->ifa_mask = sin->sin_addr.s_addr;
1216			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1217
1218			/* See if current broadcast address matches
1219			 * with current netmask, then recalculate
1220			 * the broadcast address. Otherwise it's a
1221			 * funny address, so don't touch it since
1222			 * the user seems to know what (s)he's doing...
1223			 */
1224			if ((dev->flags & IFF_BROADCAST) &&
1225			    (ifa->ifa_prefixlen < 31) &&
1226			    (ifa->ifa_broadcast ==
1227			     (ifa->ifa_local|~old_mask))) {
1228				ifa->ifa_broadcast = (ifa->ifa_local |
1229						      ~sin->sin_addr.s_addr);
1230			}
1231			inet_insert_ifa(ifa);
1232		}
1233		break;
1234	}
1235done:
1236	rtnl_unlock();
1237out:
1238	return ret;
1239}
1240
1241static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
1242{
1243	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1244	const struct in_ifaddr *ifa;
1245	struct ifreq ifr;
1246	int done = 0;
1247
1248	if (WARN_ON(size > sizeof(struct ifreq)))
1249		goto out;
1250
1251	if (!in_dev)
1252		goto out;
1253
1254	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1255		if (!buf) {
1256			done += size;
1257			continue;
1258		}
1259		if (len < size)
1260			break;
1261		memset(&ifr, 0, sizeof(struct ifreq));
1262		strcpy(ifr.ifr_name, ifa->ifa_label);
1263
1264		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1265		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1266								ifa->ifa_local;
1267
1268		if (copy_to_user(buf + done, &ifr, size)) {
1269			done = -EFAULT;
1270			break;
1271		}
1272		len  -= size;
1273		done += size;
1274	}
1275out:
1276	return done;
1277}
1278
1279static __be32 in_dev_select_addr(const struct in_device *in_dev,
1280				 int scope)
1281{
1282	const struct in_ifaddr *ifa;
1283
1284	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1285		if (ifa->ifa_flags & IFA_F_SECONDARY)
1286			continue;
1287		if (ifa->ifa_scope != RT_SCOPE_LINK &&
1288		    ifa->ifa_scope <= scope)
1289			return ifa->ifa_local;
1290	}
1291
1292	return 0;
1293}
1294
1295__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1296{
1297	const struct in_ifaddr *ifa;
1298	__be32 addr = 0;
1299	unsigned char localnet_scope = RT_SCOPE_HOST;
1300	struct in_device *in_dev;
1301	struct net *net = dev_net(dev);
1302	int master_idx;
1303
1304	rcu_read_lock();
1305	in_dev = __in_dev_get_rcu(dev);
1306	if (!in_dev)
1307		goto no_in_dev;
1308
1309	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1310		localnet_scope = RT_SCOPE_LINK;
1311
1312	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1313		if (ifa->ifa_flags & IFA_F_SECONDARY)
1314			continue;
1315		if (min(ifa->ifa_scope, localnet_scope) > scope)
1316			continue;
1317		if (!dst || inet_ifa_match(dst, ifa)) {
1318			addr = ifa->ifa_local;
1319			break;
1320		}
1321		if (!addr)
1322			addr = ifa->ifa_local;
1323	}
1324
1325	if (addr)
1326		goto out_unlock;
1327no_in_dev:
1328	master_idx = l3mdev_master_ifindex_rcu(dev);
1329
1330	/* For VRFs, the VRF device takes the place of the loopback device,
1331	 * with addresses on it being preferred.  Note in such cases the
1332	 * loopback device will be among the devices that fail the master_idx
1333	 * equality check in the loop below.
1334	 */
1335	if (master_idx &&
1336	    (dev = dev_get_by_index_rcu(net, master_idx)) &&
1337	    (in_dev = __in_dev_get_rcu(dev))) {
1338		addr = in_dev_select_addr(in_dev, scope);
1339		if (addr)
1340			goto out_unlock;
1341	}
1342
1343	/* Not loopback addresses on loopback should be preferred
1344	   in this case. It is important that lo is the first interface
1345	   in dev_base list.
1346	 */
1347	for_each_netdev_rcu(net, dev) {
1348		if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1349			continue;
1350
1351		in_dev = __in_dev_get_rcu(dev);
1352		if (!in_dev)
1353			continue;
1354
1355		addr = in_dev_select_addr(in_dev, scope);
1356		if (addr)
1357			goto out_unlock;
1358	}
1359out_unlock:
1360	rcu_read_unlock();
1361	return addr;
1362}
1363EXPORT_SYMBOL(inet_select_addr);
1364
1365static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1366			      __be32 local, int scope)
1367{
1368	unsigned char localnet_scope = RT_SCOPE_HOST;
1369	const struct in_ifaddr *ifa;
1370	__be32 addr = 0;
1371	int same = 0;
1372
1373	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1374		localnet_scope = RT_SCOPE_LINK;
1375
1376	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1377		unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
1378
1379		if (!addr &&
1380		    (local == ifa->ifa_local || !local) &&
1381		    min_scope <= scope) {
1382			addr = ifa->ifa_local;
1383			if (same)
1384				break;
1385		}
1386		if (!same) {
1387			same = (!local || inet_ifa_match(local, ifa)) &&
1388				(!dst || inet_ifa_match(dst, ifa));
1389			if (same && addr) {
1390				if (local || !dst)
1391					break;
1392				/* Is the selected addr into dst subnet? */
1393				if (inet_ifa_match(addr, ifa))
1394					break;
1395				/* No, then can we use new local src? */
1396				if (min_scope <= scope) {
1397					addr = ifa->ifa_local;
1398					break;
1399				}
1400				/* search for large dst subnet for addr */
1401				same = 0;
1402			}
1403		}
1404	}
1405
1406	return same ? addr : 0;
1407}
1408
1409/*
1410 * Confirm that local IP address exists using wildcards:
1411 * - net: netns to check, cannot be NULL
1412 * - in_dev: only on this interface, NULL=any interface
1413 * - dst: only in the same subnet as dst, 0=any dst
1414 * - local: address, 0=autoselect the local address
1415 * - scope: maximum allowed scope value for the local address
1416 */
1417__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1418			 __be32 dst, __be32 local, int scope)
1419{
1420	__be32 addr = 0;
1421	struct net_device *dev;
1422
1423	if (in_dev)
1424		return confirm_addr_indev(in_dev, dst, local, scope);
1425
1426	rcu_read_lock();
1427	for_each_netdev_rcu(net, dev) {
1428		in_dev = __in_dev_get_rcu(dev);
1429		if (in_dev) {
1430			addr = confirm_addr_indev(in_dev, dst, local, scope);
1431			if (addr)
1432				break;
1433		}
1434	}
1435	rcu_read_unlock();
1436
1437	return addr;
1438}
1439EXPORT_SYMBOL(inet_confirm_addr);
1440
1441/*
1442 *	Device notifier
1443 */
1444
1445int register_inetaddr_notifier(struct notifier_block *nb)
1446{
1447	return blocking_notifier_chain_register(&inetaddr_chain, nb);
1448}
1449EXPORT_SYMBOL(register_inetaddr_notifier);
1450
1451int unregister_inetaddr_notifier(struct notifier_block *nb)
1452{
1453	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1454}
1455EXPORT_SYMBOL(unregister_inetaddr_notifier);
1456
1457int register_inetaddr_validator_notifier(struct notifier_block *nb)
1458{
1459	return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
1460}
1461EXPORT_SYMBOL(register_inetaddr_validator_notifier);
1462
1463int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
1464{
1465	return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
1466	    nb);
1467}
1468EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
1469
1470/* Rename ifa_labels for a device name change. Make some effort to preserve
1471 * existing alias numbering and to create unique labels if possible.
1472*/
1473static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1474{
1475	struct in_ifaddr *ifa;
1476	int named = 0;
1477
1478	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1479		char old[IFNAMSIZ], *dot;
1480
1481		memcpy(old, ifa->ifa_label, IFNAMSIZ);
1482		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1483		if (named++ == 0)
1484			goto skip;
1485		dot = strchr(old, ':');
1486		if (!dot) {
1487			sprintf(old, ":%d", named);
1488			dot = old;
1489		}
1490		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1491			strcat(ifa->ifa_label, dot);
1492		else
1493			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1494skip:
1495		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1496	}
1497}
1498
1499static bool inetdev_valid_mtu(unsigned int mtu)
1500{
1501	return mtu >= IPV4_MIN_MTU;
1502}
1503
1504static void inetdev_send_gratuitous_arp(struct net_device *dev,
1505					struct in_device *in_dev)
1506
1507{
1508	const struct in_ifaddr *ifa;
1509
1510	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1511		arp_send(ARPOP_REQUEST, ETH_P_ARP,
1512			 ifa->ifa_local, dev,
1513			 ifa->ifa_local, NULL,
1514			 dev->dev_addr, NULL);
1515	}
1516}
1517
1518/* Called only under RTNL semaphore */
1519
1520static int inetdev_event(struct notifier_block *this, unsigned long event,
1521			 void *ptr)
1522{
1523	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1524	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1525
1526	ASSERT_RTNL();
1527
1528	if (!in_dev) {
1529		if (event == NETDEV_REGISTER) {
1530			in_dev = inetdev_init(dev);
1531			if (IS_ERR(in_dev))
1532				return notifier_from_errno(PTR_ERR(in_dev));
1533			if (dev->flags & IFF_LOOPBACK) {
1534				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1535				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1536			}
1537		} else if (event == NETDEV_CHANGEMTU) {
1538			/* Re-enabling IP */
1539			if (inetdev_valid_mtu(dev->mtu))
1540				in_dev = inetdev_init(dev);
1541		}
1542		goto out;
1543	}
1544
1545	switch (event) {
1546	case NETDEV_REGISTER:
1547		pr_debug("%s: bug\n", __func__);
1548		RCU_INIT_POINTER(dev->ip_ptr, NULL);
1549		break;
1550	case NETDEV_UP:
1551		if (!inetdev_valid_mtu(dev->mtu))
1552			break;
1553		if (dev->flags & IFF_LOOPBACK) {
1554			struct in_ifaddr *ifa = inet_alloc_ifa();
1555
1556			if (ifa) {
1557				INIT_HLIST_NODE(&ifa->hash);
1558				ifa->ifa_local =
1559				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
1560				ifa->ifa_prefixlen = 8;
1561				ifa->ifa_mask = inet_make_mask(8);
1562				in_dev_hold(in_dev);
1563				ifa->ifa_dev = in_dev;
1564				ifa->ifa_scope = RT_SCOPE_HOST;
1565				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1566				set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1567						 INFINITY_LIFE_TIME);
1568				ipv4_devconf_setall(in_dev);
1569				neigh_parms_data_state_setall(in_dev->arp_parms);
1570				inet_insert_ifa(ifa);
1571			}
1572		}
1573		ip_mc_up(in_dev);
1574		/* fall through */
1575	case NETDEV_CHANGEADDR:
1576		if (!IN_DEV_ARP_NOTIFY(in_dev))
1577			break;
1578		/* fall through */
1579	case NETDEV_NOTIFY_PEERS:
1580		/* Send gratuitous ARP to notify of link change */
1581		inetdev_send_gratuitous_arp(dev, in_dev);
1582		break;
1583	case NETDEV_DOWN:
1584		ip_mc_down(in_dev);
1585		break;
1586	case NETDEV_PRE_TYPE_CHANGE:
1587		ip_mc_unmap(in_dev);
1588		break;
1589	case NETDEV_POST_TYPE_CHANGE:
1590		ip_mc_remap(in_dev);
1591		break;
1592	case NETDEV_CHANGEMTU:
1593		if (inetdev_valid_mtu(dev->mtu))
1594			break;
1595		/* disable IP when MTU is not enough */
1596		/* fall through */
1597	case NETDEV_UNREGISTER:
1598		inetdev_destroy(in_dev);
1599		break;
1600	case NETDEV_CHANGENAME:
1601		/* Do not notify about label change, this event is
1602		 * not interesting to applications using netlink.
1603		 */
1604		inetdev_changename(dev, in_dev);
1605
1606		devinet_sysctl_unregister(in_dev);
1607		devinet_sysctl_register(in_dev);
1608		break;
1609	}
1610out:
1611	return NOTIFY_DONE;
1612}
1613
1614static struct notifier_block ip_netdev_notifier = {
1615	.notifier_call = inetdev_event,
1616};
1617
1618static size_t inet_nlmsg_size(void)
1619{
1620	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1621	       + nla_total_size(4) /* IFA_ADDRESS */
1622	       + nla_total_size(4) /* IFA_LOCAL */
1623	       + nla_total_size(4) /* IFA_BROADCAST */
1624	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1625	       + nla_total_size(4)  /* IFA_FLAGS */
1626	       + nla_total_size(4)  /* IFA_RT_PRIORITY */
1627	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1628}
1629
1630static inline u32 cstamp_delta(unsigned long cstamp)
1631{
1632	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1633}
1634
1635static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1636			 unsigned long tstamp, u32 preferred, u32 valid)
1637{
1638	struct ifa_cacheinfo ci;
1639
1640	ci.cstamp = cstamp_delta(cstamp);
1641	ci.tstamp = cstamp_delta(tstamp);
1642	ci.ifa_prefered = preferred;
1643	ci.ifa_valid = valid;
1644
1645	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1646}
1647
1648static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1649			    struct inet_fill_args *args)
1650{
1651	struct ifaddrmsg *ifm;
1652	struct nlmsghdr  *nlh;
1653	u32 preferred, valid;
1654
1655	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
1656			args->flags);
1657	if (!nlh)
1658		return -EMSGSIZE;
1659
1660	ifm = nlmsg_data(nlh);
1661	ifm->ifa_family = AF_INET;
1662	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1663	ifm->ifa_flags = ifa->ifa_flags;
1664	ifm->ifa_scope = ifa->ifa_scope;
1665	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1666
1667	if (args->netnsid >= 0 &&
1668	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
1669		goto nla_put_failure;
1670
1671	if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1672		preferred = ifa->ifa_preferred_lft;
1673		valid = ifa->ifa_valid_lft;
1674		if (preferred != INFINITY_LIFE_TIME) {
1675			long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1676
1677			if (preferred > tval)
1678				preferred -= tval;
1679			else
1680				preferred = 0;
1681			if (valid != INFINITY_LIFE_TIME) {
1682				if (valid > tval)
1683					valid -= tval;
1684				else
1685					valid = 0;
1686			}
1687		}
1688	} else {
1689		preferred = INFINITY_LIFE_TIME;
1690		valid = INFINITY_LIFE_TIME;
1691	}
1692	if ((ifa->ifa_address &&
1693	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1694	    (ifa->ifa_local &&
1695	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1696	    (ifa->ifa_broadcast &&
1697	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1698	    (ifa->ifa_label[0] &&
1699	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1700	    nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1701	    (ifa->ifa_rt_priority &&
1702	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
1703	    put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1704			  preferred, valid))
1705		goto nla_put_failure;
1706
1707	nlmsg_end(skb, nlh);
1708	return 0;
1709
1710nla_put_failure:
1711	nlmsg_cancel(skb, nlh);
1712	return -EMSGSIZE;
1713}
1714
1715static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
1716				      struct inet_fill_args *fillargs,
1717				      struct net **tgt_net, struct sock *sk,
1718				      struct netlink_callback *cb)
1719{
1720	struct netlink_ext_ack *extack = cb->extack;
1721	struct nlattr *tb[IFA_MAX+1];
1722	struct ifaddrmsg *ifm;
1723	int err, i;
1724
1725	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
1726		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
1727		return -EINVAL;
1728	}
1729
1730	ifm = nlmsg_data(nlh);
1731	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
1732		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
1733		return -EINVAL;
1734	}
1735
1736	fillargs->ifindex = ifm->ifa_index;
1737	if (fillargs->ifindex) {
1738		cb->answer_flags |= NLM_F_DUMP_FILTERED;
1739		fillargs->flags |= NLM_F_DUMP_FILTERED;
1740	}
1741
1742	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
1743					    ifa_ipv4_policy, extack);
1744	if (err < 0)
1745		return err;
1746
1747	for (i = 0; i <= IFA_MAX; ++i) {
1748		if (!tb[i])
1749			continue;
1750
1751		if (i == IFA_TARGET_NETNSID) {
1752			struct net *net;
1753
1754			fillargs->netnsid = nla_get_s32(tb[i]);
1755
1756			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
1757			if (IS_ERR(net)) {
1758				fillargs->netnsid = -1;
1759				NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
1760				return PTR_ERR(net);
1761			}
1762			*tgt_net = net;
1763		} else {
1764			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
1765			return -EINVAL;
1766		}
1767	}
1768
1769	return 0;
1770}
1771
1772static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
1773			    struct netlink_callback *cb, int s_ip_idx,
1774			    struct inet_fill_args *fillargs)
1775{
1776	struct in_ifaddr *ifa;
1777	int ip_idx = 0;
1778	int err;
1779
1780	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1781		if (ip_idx < s_ip_idx) {
1782			ip_idx++;
1783			continue;
1784		}
1785		err = inet_fill_ifaddr(skb, ifa, fillargs);
1786		if (err < 0)
1787			goto done;
1788
1789		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1790		ip_idx++;
1791	}
1792	err = 0;
1793
1794done:
1795	cb->args[2] = ip_idx;
1796
1797	return err;
1798}
1799
1800static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1801{
1802	const struct nlmsghdr *nlh = cb->nlh;
1803	struct inet_fill_args fillargs = {
1804		.portid = NETLINK_CB(cb->skb).portid,
1805		.seq = nlh->nlmsg_seq,
1806		.event = RTM_NEWADDR,
1807		.flags = NLM_F_MULTI,
1808		.netnsid = -1,
1809	};
1810	struct net *net = sock_net(skb->sk);
1811	struct net *tgt_net = net;
1812	int h, s_h;
1813	int idx, s_idx;
1814	int s_ip_idx;
1815	struct net_device *dev;
1816	struct in_device *in_dev;
1817	struct hlist_head *head;
1818	int err = 0;
1819
1820	s_h = cb->args[0];
1821	s_idx = idx = cb->args[1];
1822	s_ip_idx = cb->args[2];
1823
1824	if (cb->strict_check) {
1825		err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
1826						 skb->sk, cb);
1827		if (err < 0)
1828			goto put_tgt_net;
1829
1830		err = 0;
1831		if (fillargs.ifindex) {
1832			dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
1833			if (!dev) {
1834				err = -ENODEV;
1835				goto put_tgt_net;
1836			}
1837
1838			in_dev = __in_dev_get_rtnl(dev);
1839			if (in_dev) {
1840				err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1841						       &fillargs);
1842			}
1843			goto put_tgt_net;
1844		}
1845	}
1846
1847	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1848		idx = 0;
1849		head = &tgt_net->dev_index_head[h];
1850		rcu_read_lock();
1851		cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
1852			  tgt_net->dev_base_seq;
1853		hlist_for_each_entry_rcu(dev, head, index_hlist) {
1854			if (idx < s_idx)
1855				goto cont;
1856			if (h > s_h || idx > s_idx)
1857				s_ip_idx = 0;
1858			in_dev = __in_dev_get_rcu(dev);
1859			if (!in_dev)
1860				goto cont;
1861
1862			err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1863					       &fillargs);
1864			if (err < 0) {
1865				rcu_read_unlock();
1866				goto done;
1867			}
1868cont:
1869			idx++;
1870		}
1871		rcu_read_unlock();
1872	}
1873
1874done:
1875	cb->args[0] = h;
1876	cb->args[1] = idx;
1877put_tgt_net:
1878	if (fillargs.netnsid >= 0)
1879		put_net(tgt_net);
1880
1881	return skb->len ? : err;
1882}
1883
1884static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1885		      u32 portid)
1886{
1887	struct inet_fill_args fillargs = {
1888		.portid = portid,
1889		.seq = nlh ? nlh->nlmsg_seq : 0,
1890		.event = event,
1891		.flags = 0,
1892		.netnsid = -1,
1893	};
1894	struct sk_buff *skb;
1895	int err = -ENOBUFS;
1896	struct net *net;
1897
1898	net = dev_net(ifa->ifa_dev->dev);
1899	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1900	if (!skb)
1901		goto errout;
1902
1903	err = inet_fill_ifaddr(skb, ifa, &fillargs);
1904	if (err < 0) {
1905		/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1906		WARN_ON(err == -EMSGSIZE);
1907		kfree_skb(skb);
1908		goto errout;
1909	}
1910	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1911	return;
1912errout:
1913	if (err < 0)
1914		rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1915}
1916
1917static size_t inet_get_link_af_size(const struct net_device *dev,
1918				    u32 ext_filter_mask)
1919{
1920	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1921
1922	if (!in_dev)
1923		return 0;
1924
1925	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1926}
1927
1928static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1929			     u32 ext_filter_mask)
1930{
1931	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1932	struct nlattr *nla;
1933	int i;
1934
1935	if (!in_dev)
1936		return -ENODATA;
1937
1938	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1939	if (!nla)
1940		return -EMSGSIZE;
1941
1942	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1943		((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1944
1945	return 0;
1946}
1947
1948static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1949	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
1950};
1951
1952static int inet_validate_link_af(const struct net_device *dev,
1953				 const struct nlattr *nla)
1954{
1955	struct nlattr *a, *tb[IFLA_INET_MAX+1];
1956	int err, rem;
1957
1958	if (dev && !__in_dev_get_rcu(dev))
1959		return -EAFNOSUPPORT;
1960
1961	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
1962					  inet_af_policy, NULL);
1963	if (err < 0)
1964		return err;
1965
1966	if (tb[IFLA_INET_CONF]) {
1967		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1968			int cfgid = nla_type(a);
1969
1970			if (nla_len(a) < 4)
1971				return -EINVAL;
1972
1973			if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1974				return -EINVAL;
1975		}
1976	}
1977
1978	return 0;
1979}
1980
1981static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1982{
1983	struct in_device *in_dev = __in_dev_get_rcu(dev);
1984	struct nlattr *a, *tb[IFLA_INET_MAX+1];
1985	int rem;
1986
1987	if (!in_dev)
1988		return -EAFNOSUPPORT;
1989
1990	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
1991		BUG();
1992
1993	if (tb[IFLA_INET_CONF]) {
1994		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1995			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1996	}
1997
1998	return 0;
1999}
2000
2001static int inet_netconf_msgsize_devconf(int type)
2002{
2003	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
2004		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
2005	bool all = false;
2006
2007	if (type == NETCONFA_ALL)
2008		all = true;
2009
2010	if (all || type == NETCONFA_FORWARDING)
2011		size += nla_total_size(4);
2012	if (all || type == NETCONFA_RP_FILTER)
2013		size += nla_total_size(4);
2014	if (all || type == NETCONFA_MC_FORWARDING)
2015		size += nla_total_size(4);
2016	if (all || type == NETCONFA_BC_FORWARDING)
2017		size += nla_total_size(4);
2018	if (all || type == NETCONFA_PROXY_NEIGH)
2019		size += nla_total_size(4);
2020	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
2021		size += nla_total_size(4);
2022
2023	return size;
2024}
2025
2026static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
2027				     struct ipv4_devconf *devconf, u32 portid,
2028				     u32 seq, int event, unsigned int flags,
2029				     int type)
2030{
2031	struct nlmsghdr  *nlh;
2032	struct netconfmsg *ncm;
2033	bool all = false;
2034
2035	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
2036			flags);
2037	if (!nlh)
2038		return -EMSGSIZE;
2039
2040	if (type == NETCONFA_ALL)
2041		all = true;
2042
2043	ncm = nlmsg_data(nlh);
2044	ncm->ncm_family = AF_INET;
2045
2046	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
2047		goto nla_put_failure;
2048
2049	if (!devconf)
2050		goto out;
2051
2052	if ((all || type == NETCONFA_FORWARDING) &&
2053	    nla_put_s32(skb, NETCONFA_FORWARDING,
2054			IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
2055		goto nla_put_failure;
2056	if ((all || type == NETCONFA_RP_FILTER) &&
2057	    nla_put_s32(skb, NETCONFA_RP_FILTER,
2058			IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
2059		goto nla_put_failure;
2060	if ((all || type == NETCONFA_MC_FORWARDING) &&
2061	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
2062			IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
2063		goto nla_put_failure;
2064	if ((all || type == NETCONFA_BC_FORWARDING) &&
2065	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
2066			IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
2067		goto nla_put_failure;
2068	if ((all || type == NETCONFA_PROXY_NEIGH) &&
2069	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
2070			IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
2071		goto nla_put_failure;
2072	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
2073	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2074			IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
2075		goto nla_put_failure;
2076
2077out:
2078	nlmsg_end(skb, nlh);
2079	return 0;
2080
2081nla_put_failure:
2082	nlmsg_cancel(skb, nlh);
2083	return -EMSGSIZE;
2084}
2085
2086void inet_netconf_notify_devconf(struct net *net, int event, int type,
2087				 int ifindex, struct ipv4_devconf *devconf)
2088{
2089	struct sk_buff *skb;
2090	int err = -ENOBUFS;
2091
2092	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
2093	if (!skb)
2094		goto errout;
2095
2096	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
2097					event, 0, type);
2098	if (err < 0) {
2099		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2100		WARN_ON(err == -EMSGSIZE);
2101		kfree_skb(skb);
2102		goto errout;
2103	}
2104	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
2105	return;
2106errout:
2107	if (err < 0)
2108		rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
2109}
2110
2111static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
2112	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
2113	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
2114	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
2115	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
2116	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
2117};
2118
2119static int inet_netconf_valid_get_req(struct sk_buff *skb,
2120				      const struct nlmsghdr *nlh,
2121				      struct nlattr **tb,
2122				      struct netlink_ext_ack *extack)
2123{
2124	int i, err;
2125
2126	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
2127		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
2128		return -EINVAL;
2129	}
2130
2131	if (!netlink_strict_get_check(skb))
2132		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
2133					      tb, NETCONFA_MAX,
2134					      devconf_ipv4_policy, extack);
2135
2136	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
2137					    tb, NETCONFA_MAX,
2138					    devconf_ipv4_policy, extack);
2139	if (err)
2140		return err;
2141
2142	for (i = 0; i <= NETCONFA_MAX; i++) {
2143		if (!tb[i])
2144			continue;
2145
2146		switch (i) {
2147		case NETCONFA_IFINDEX:
2148			break;
2149		default:
2150			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
2151			return -EINVAL;
2152		}
2153	}
2154
2155	return 0;
2156}
2157
2158static int inet_netconf_get_devconf(struct sk_buff *in_skb,
2159				    struct nlmsghdr *nlh,
2160				    struct netlink_ext_ack *extack)
2161{
2162	struct net *net = sock_net(in_skb->sk);
2163	struct nlattr *tb[NETCONFA_MAX+1];
2164	struct sk_buff *skb;
2165	struct ipv4_devconf *devconf;
2166	struct in_device *in_dev;
2167	struct net_device *dev;
2168	int ifindex;
2169	int err;
2170
2171	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
2172	if (err)
2173		goto errout;
2174
2175	err = -EINVAL;
2176	if (!tb[NETCONFA_IFINDEX])
2177		goto errout;
2178
2179	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
2180	switch (ifindex) {
2181	case NETCONFA_IFINDEX_ALL:
2182		devconf = net->ipv4.devconf_all;
2183		break;
2184	case NETCONFA_IFINDEX_DEFAULT:
2185		devconf = net->ipv4.devconf_dflt;
2186		break;
2187	default:
2188		dev = __dev_get_by_index(net, ifindex);
2189		if (!dev)
2190			goto errout;
2191		in_dev = __in_dev_get_rtnl(dev);
2192		if (!in_dev)
2193			goto errout;
2194		devconf = &in_dev->cnf;
2195		break;
2196	}
2197
2198	err = -ENOBUFS;
2199	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
2200	if (!skb)
2201		goto errout;
2202
2203	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
2204					NETLINK_CB(in_skb).portid,
2205					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
2206					NETCONFA_ALL);
2207	if (err < 0) {
2208		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2209		WARN_ON(err == -EMSGSIZE);
2210		kfree_skb(skb);
2211		goto errout;
2212	}
2213	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2214errout:
2215	return err;
2216}
2217
2218static int inet_netconf_dump_devconf(struct sk_buff *skb,
2219				     struct netlink_callback *cb)
2220{
2221	const struct nlmsghdr *nlh = cb->nlh;
2222	struct net *net = sock_net(skb->sk);
2223	int h, s_h;
2224	int idx, s_idx;
2225	struct net_device *dev;
2226	struct in_device *in_dev;
2227	struct hlist_head *head;
2228
2229	if (cb->strict_check) {
2230		struct netlink_ext_ack *extack = cb->extack;
2231		struct netconfmsg *ncm;
2232
2233		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
2234			NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
2235			return -EINVAL;
2236		}
2237
2238		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
2239			NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
2240			return -EINVAL;
2241		}
2242	}
2243
2244	s_h = cb->args[0];
2245	s_idx = idx = cb->args[1];
2246
2247	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2248		idx = 0;
2249		head = &net->dev_index_head[h];
2250		rcu_read_lock();
2251		cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
2252			  net->dev_base_seq;
2253		hlist_for_each_entry_rcu(dev, head, index_hlist) {
2254			if (idx < s_idx)
2255				goto cont;
2256			in_dev = __in_dev_get_rcu(dev);
2257			if (!in_dev)
2258				goto cont;
2259
2260			if (inet_netconf_fill_devconf(skb, dev->ifindex,
2261						      &in_dev->cnf,
2262						      NETLINK_CB(cb->skb).portid,
2263						      nlh->nlmsg_seq,
2264						      RTM_NEWNETCONF,
2265						      NLM_F_MULTI,
2266						      NETCONFA_ALL) < 0) {
2267				rcu_read_unlock();
2268				goto done;
2269			}
2270			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2271cont:
2272			idx++;
2273		}
2274		rcu_read_unlock();
2275	}
2276	if (h == NETDEV_HASHENTRIES) {
2277		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
2278					      net->ipv4.devconf_all,
2279					      NETLINK_CB(cb->skb).portid,
2280					      nlh->nlmsg_seq,
2281					      RTM_NEWNETCONF, NLM_F_MULTI,
2282					      NETCONFA_ALL) < 0)
2283			goto done;
2284		else
2285			h++;
2286	}
2287	if (h == NETDEV_HASHENTRIES + 1) {
2288		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
2289					      net->ipv4.devconf_dflt,
2290					      NETLINK_CB(cb->skb).portid,
2291					      nlh->nlmsg_seq,
2292					      RTM_NEWNETCONF, NLM_F_MULTI,
2293					      NETCONFA_ALL) < 0)
2294			goto done;
2295		else
2296			h++;
2297	}
2298done:
2299	cb->args[0] = h;
2300	cb->args[1] = idx;
2301
2302	return skb->len;
2303}
2304
2305#ifdef CONFIG_SYSCTL
2306
2307static void devinet_copy_dflt_conf(struct net *net, int i)
2308{
2309	struct net_device *dev;
2310
2311	rcu_read_lock();
2312	for_each_netdev_rcu(net, dev) {
2313		struct in_device *in_dev;
2314
2315		in_dev = __in_dev_get_rcu(dev);
2316		if (in_dev && !test_bit(i, in_dev->cnf.state))
2317			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2318	}
2319	rcu_read_unlock();
2320}
2321
2322/* called with RTNL locked */
2323static void inet_forward_change(struct net *net)
2324{
2325	struct net_device *dev;
2326	int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2327
2328	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2329	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2330	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2331				    NETCONFA_FORWARDING,
2332				    NETCONFA_IFINDEX_ALL,
2333				    net->ipv4.devconf_all);
2334	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2335				    NETCONFA_FORWARDING,
2336				    NETCONFA_IFINDEX_DEFAULT,
2337				    net->ipv4.devconf_dflt);
2338
2339	for_each_netdev(net, dev) {
2340		struct in_device *in_dev;
2341
2342		if (on)
2343			dev_disable_lro(dev);
2344
2345		in_dev = __in_dev_get_rtnl(dev);
2346		if (in_dev) {
2347			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2348			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2349						    NETCONFA_FORWARDING,
2350						    dev->ifindex, &in_dev->cnf);
2351		}
2352	}
2353}
2354
2355static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2356{
2357	if (cnf == net->ipv4.devconf_dflt)
2358		return NETCONFA_IFINDEX_DEFAULT;
2359	else if (cnf == net->ipv4.devconf_all)
2360		return NETCONFA_IFINDEX_ALL;
2361	else {
2362		struct in_device *idev
2363			= container_of(cnf, struct in_device, cnf);
2364		return idev->dev->ifindex;
2365	}
2366}
2367
2368static int devinet_conf_proc(struct ctl_table *ctl, int write,
2369			     void __user *buffer,
2370			     size_t *lenp, loff_t *ppos)
2371{
2372	int old_value = *(int *)ctl->data;
2373	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2374	int new_value = *(int *)ctl->data;
2375
2376	if (write) {
2377		struct ipv4_devconf *cnf = ctl->extra1;
2378		struct net *net = ctl->extra2;
2379		int i = (int *)ctl->data - cnf->data;
2380		int ifindex;
2381
2382		set_bit(i, cnf->state);
2383
2384		if (cnf == net->ipv4.devconf_dflt)
2385			devinet_copy_dflt_conf(net, i);
2386		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2387		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2388			if ((new_value == 0) && (old_value != 0))
2389				rt_cache_flush(net);
2390
2391		if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
2392		    new_value != old_value)
2393			rt_cache_flush(net);
2394
2395		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2396		    new_value != old_value) {
2397			ifindex = devinet_conf_ifindex(net, cnf);
2398			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2399						    NETCONFA_RP_FILTER,
2400						    ifindex, cnf);
2401		}
2402		if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2403		    new_value != old_value) {
2404			ifindex = devinet_conf_ifindex(net, cnf);
2405			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2406						    NETCONFA_PROXY_NEIGH,
2407						    ifindex, cnf);
2408		}
2409		if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2410		    new_value != old_value) {
2411			ifindex = devinet_conf_ifindex(net, cnf);
2412			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2413						    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2414						    ifindex, cnf);
2415		}
2416	}
2417
2418	return ret;
2419}
2420
2421static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2422				  void __user *buffer,
2423				  size_t *lenp, loff_t *ppos)
2424{
2425	int *valp = ctl->data;
2426	int val = *valp;
2427	loff_t pos = *ppos;
2428	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2429
2430	if (write && *valp != val) {
2431		struct net *net = ctl->extra2;
2432
2433		if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2434			if (!rtnl_trylock()) {
2435				/* Restore the original values before restarting */
2436				*valp = val;
2437				*ppos = pos;
2438				return restart_syscall();
2439			}
2440			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2441				inet_forward_change(net);
2442			} else {
2443				struct ipv4_devconf *cnf = ctl->extra1;
2444				struct in_device *idev =
2445					container_of(cnf, struct in_device, cnf);
2446				if (*valp)
2447					dev_disable_lro(idev->dev);
2448				inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2449							    NETCONFA_FORWARDING,
2450							    idev->dev->ifindex,
2451							    cnf);
2452			}
2453			rtnl_unlock();
2454			rt_cache_flush(net);
2455		} else
2456			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2457						    NETCONFA_FORWARDING,
2458						    NETCONFA_IFINDEX_DEFAULT,
2459						    net->ipv4.devconf_dflt);
2460	}
2461
2462	return ret;
2463}
2464
2465static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2466				void __user *buffer,
2467				size_t *lenp, loff_t *ppos)
2468{
2469	int *valp = ctl->data;
2470	int val = *valp;
2471	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2472	struct net *net = ctl->extra2;
2473
2474	if (write && *valp != val)
2475		rt_cache_flush(net);
2476
2477	return ret;
2478}
2479
2480#define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2481	{ \
2482		.procname	= name, \
2483		.data		= ipv4_devconf.data + \
2484				  IPV4_DEVCONF_ ## attr - 1, \
2485		.maxlen		= sizeof(int), \
2486		.mode		= mval, \
2487		.proc_handler	= proc, \
2488		.extra1		= &ipv4_devconf, \
2489	}
2490
2491#define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2492	DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2493
2494#define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2495	DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2496
2497#define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2498	DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2499
2500#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2501	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2502
2503static struct devinet_sysctl_table {
2504	struct ctl_table_header *sysctl_header;
2505	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2506} devinet_sysctl = {
2507	.devinet_vars = {
2508		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2509					     devinet_sysctl_forward),
2510		DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2511		DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
2512
2513		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2514		DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2515		DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2516		DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2517		DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2518		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2519					"accept_source_route"),
2520		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2521		DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2522		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2523		DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2524		DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2525		DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2526		DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2527		DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2528		DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2529		DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2530		DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2531		DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2532		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2533		DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2534					"force_igmp_version"),
2535		DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2536					"igmpv2_unsolicited_report_interval"),
2537		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2538					"igmpv3_unsolicited_report_interval"),
2539		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2540					"ignore_routes_with_linkdown"),
2541		DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2542					"drop_gratuitous_arp"),
2543
2544		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2545		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2546		DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2547					      "promote_secondaries"),
2548		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2549					      "route_localnet"),
2550		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2551					      "drop_unicast_in_l2_multicast"),
2552	},
2553};
2554
2555static int __devinet_sysctl_register(struct net *net, char *dev_name,
2556				     int ifindex, struct ipv4_devconf *p)
2557{
2558	int i;
2559	struct devinet_sysctl_table *t;
2560	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2561
2562	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2563	if (!t)
2564		goto out;
2565
2566	for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2567		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2568		t->devinet_vars[i].extra1 = p;
2569		t->devinet_vars[i].extra2 = net;
2570	}
2571
2572	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2573
2574	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2575	if (!t->sysctl_header)
2576		goto free;
2577
2578	p->sysctl = t;
2579
2580	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
2581				    ifindex, p);
2582	return 0;
2583
2584free:
2585	kfree(t);
2586out:
2587	return -ENOBUFS;
2588}
2589
2590static void __devinet_sysctl_unregister(struct net *net,
2591					struct ipv4_devconf *cnf, int ifindex)
2592{
2593	struct devinet_sysctl_table *t = cnf->sysctl;
2594
2595	if (t) {
2596		cnf->sysctl = NULL;
2597		unregister_net_sysctl_table(t->sysctl_header);
2598		kfree(t);
2599	}
2600
2601	inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
2602}
2603
2604static int devinet_sysctl_register(struct in_device *idev)
2605{
2606	int err;
2607
2608	if (!sysctl_dev_name_is_allowed(idev->dev->name))
2609		return -EINVAL;
2610
2611	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2612	if (err)
2613		return err;
2614	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2615					idev->dev->ifindex, &idev->cnf);
2616	if (err)
2617		neigh_sysctl_unregister(idev->arp_parms);
2618	return err;
2619}
2620
2621static void devinet_sysctl_unregister(struct in_device *idev)
2622{
2623	struct net *net = dev_net(idev->dev);
2624
2625	__devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
2626	neigh_sysctl_unregister(idev->arp_parms);
2627}
2628
2629static struct ctl_table ctl_forward_entry[] = {
2630	{
2631		.procname	= "ip_forward",
2632		.data		= &ipv4_devconf.data[
2633					IPV4_DEVCONF_FORWARDING - 1],
2634		.maxlen		= sizeof(int),
2635		.mode		= 0644,
2636		.proc_handler	= devinet_sysctl_forward,
2637		.extra1		= &ipv4_devconf,
2638		.extra2		= &init_net,
2639	},
2640	{ },
2641};
2642#endif
2643
2644static __net_init int devinet_init_net(struct net *net)
2645{
2646	int err;
2647	struct ipv4_devconf *all, *dflt;
2648#ifdef CONFIG_SYSCTL
2649	struct ctl_table *tbl;
2650	struct ctl_table_header *forw_hdr;
2651#endif
2652
2653	err = -ENOMEM;
2654	all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
2655	if (!all)
2656		goto err_alloc_all;
2657
2658	dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2659	if (!dflt)
2660		goto err_alloc_dflt;
2661
2662#ifdef CONFIG_SYSCTL
2663	tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
2664	if (!tbl)
2665		goto err_alloc_ctl;
2666
2667	tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2668	tbl[0].extra1 = all;
2669	tbl[0].extra2 = net;
2670#endif
2671
2672	if ((!IS_ENABLED(CONFIG_SYSCTL) ||
2673	     sysctl_devconf_inherit_init_net != 2) &&
2674	    !net_eq(net, &init_net)) {
2675		memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf));
2676		memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt));
 
 
 
 
 
 
 
 
 
 
 
 
 
2677	}
2678
2679#ifdef CONFIG_SYSCTL
2680	err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2681	if (err < 0)
2682		goto err_reg_all;
2683
2684	err = __devinet_sysctl_register(net, "default",
2685					NETCONFA_IFINDEX_DEFAULT, dflt);
2686	if (err < 0)
2687		goto err_reg_dflt;
2688
2689	err = -ENOMEM;
2690	forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2691	if (!forw_hdr)
2692		goto err_reg_ctl;
2693	net->ipv4.forw_hdr = forw_hdr;
2694#endif
2695
2696	net->ipv4.devconf_all = all;
2697	net->ipv4.devconf_dflt = dflt;
2698	return 0;
2699
2700#ifdef CONFIG_SYSCTL
2701err_reg_ctl:
2702	__devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
2703err_reg_dflt:
2704	__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
2705err_reg_all:
2706	kfree(tbl);
2707err_alloc_ctl:
2708#endif
2709	kfree(dflt);
2710err_alloc_dflt:
2711	kfree(all);
2712err_alloc_all:
2713	return err;
2714}
2715
2716static __net_exit void devinet_exit_net(struct net *net)
2717{
2718#ifdef CONFIG_SYSCTL
2719	struct ctl_table *tbl;
2720
2721	tbl = net->ipv4.forw_hdr->ctl_table_arg;
2722	unregister_net_sysctl_table(net->ipv4.forw_hdr);
2723	__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
2724				    NETCONFA_IFINDEX_DEFAULT);
2725	__devinet_sysctl_unregister(net, net->ipv4.devconf_all,
2726				    NETCONFA_IFINDEX_ALL);
2727	kfree(tbl);
2728#endif
2729	kfree(net->ipv4.devconf_dflt);
2730	kfree(net->ipv4.devconf_all);
2731}
2732
2733static __net_initdata struct pernet_operations devinet_ops = {
2734	.init = devinet_init_net,
2735	.exit = devinet_exit_net,
2736};
2737
2738static struct rtnl_af_ops inet_af_ops __read_mostly = {
2739	.family		  = AF_INET,
2740	.fill_link_af	  = inet_fill_link_af,
2741	.get_link_af_size = inet_get_link_af_size,
2742	.validate_link_af = inet_validate_link_af,
2743	.set_link_af	  = inet_set_link_af,
2744};
2745
2746void __init devinet_init(void)
2747{
2748	int i;
2749
2750	for (i = 0; i < IN4_ADDR_HSIZE; i++)
2751		INIT_HLIST_HEAD(&inet_addr_lst[i]);
2752
2753	register_pernet_subsys(&devinet_ops);
2754
2755	register_gifconf(PF_INET, inet_gifconf);
2756	register_netdevice_notifier(&ip_netdev_notifier);
2757
2758	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2759
2760	rtnl_af_register(&inet_af_ops);
2761
2762	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
2763	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
2764	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
2765	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2766		      inet_netconf_dump_devconf, 0);
2767}