Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	NET3	IP device support routines.
   4 *
   5 *	Derived from the IP parts of dev.c 1.0.19
   6 * 		Authors:	Ross Biro
   7 *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   8 *				Mark Evans, <evansmp@uhura.aston.ac.uk>
   9 *
  10 *	Additional Authors:
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  13 *
  14 *	Changes:
  15 *		Alexey Kuznetsov:	pa_* fields are replaced with ifaddr
  16 *					lists.
  17 *		Cyrus Durgin:		updated for kmod
  18 *		Matthias Andree:	in devinet_ioctl, compare label and
  19 *					address (4.4BSD alias style support),
  20 *					fall back to comparing just the label
  21 *					if no match found.
  22 */
  23
  24
  25#include <linux/uaccess.h>
  26#include <linux/bitops.h>
  27#include <linux/capability.h>
  28#include <linux/module.h>
  29#include <linux/types.h>
  30#include <linux/kernel.h>
  31#include <linux/sched/signal.h>
  32#include <linux/string.h>
  33#include <linux/mm.h>
  34#include <linux/socket.h>
  35#include <linux/sockios.h>
  36#include <linux/in.h>
  37#include <linux/errno.h>
  38#include <linux/interrupt.h>
  39#include <linux/if_addr.h>
  40#include <linux/if_ether.h>
  41#include <linux/inet.h>
  42#include <linux/netdevice.h>
  43#include <linux/etherdevice.h>
  44#include <linux/skbuff.h>
  45#include <linux/init.h>
  46#include <linux/notifier.h>
  47#include <linux/inetdevice.h>
  48#include <linux/igmp.h>
  49#include <linux/slab.h>
  50#include <linux/hash.h>
  51#ifdef CONFIG_SYSCTL
  52#include <linux/sysctl.h>
  53#endif
  54#include <linux/kmod.h>
  55#include <linux/netconf.h>
  56
  57#include <net/arp.h>
  58#include <net/ip.h>
  59#include <net/route.h>
  60#include <net/ip_fib.h>
  61#include <net/rtnetlink.h>
  62#include <net/net_namespace.h>
  63#include <net/addrconf.h>
  64
  65#define IPV6ONLY_FLAGS	\
  66		(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
  67		 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
  68		 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
  69
  70static struct ipv4_devconf ipv4_devconf = {
  71	.data = {
  72		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  73		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  74		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  75		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  76		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  77		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
  78		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
  79	},
  80};
  81
  82static struct ipv4_devconf ipv4_devconf_dflt = {
  83	.data = {
  84		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  85		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  86		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  87		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  88		[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
  89		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  90		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
  91		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
  92	},
  93};
  94
  95#define IPV4_DEVCONF_DFLT(net, attr) \
  96	IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
  97
  98static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
  99	[IFA_LOCAL]     	= { .type = NLA_U32 },
 100	[IFA_ADDRESS]   	= { .type = NLA_U32 },
 101	[IFA_BROADCAST] 	= { .type = NLA_U32 },
 102	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 103	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
 104	[IFA_FLAGS]		= { .type = NLA_U32 },
 105	[IFA_RT_PRIORITY]	= { .type = NLA_U32 },
 106	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
 107	[IFA_PROTO]		= { .type = NLA_U8 },
 108};
 109
 110struct inet_fill_args {
 111	u32 portid;
 112	u32 seq;
 113	int event;
 114	unsigned int flags;
 115	int netnsid;
 116	int ifindex;
 117};
 118
 119#define IN4_ADDR_HSIZE_SHIFT	8
 120#define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
 121
 122static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 123
 124static u32 inet_addr_hash(const struct net *net, __be32 addr)
 125{
 126	u32 val = (__force u32) addr ^ net_hash_mix(net);
 127
 128	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 129}
 130
 131static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 132{
 133	u32 hash = inet_addr_hash(net, ifa->ifa_local);
 134
 135	ASSERT_RTNL();
 136	hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
 137}
 138
 139static void inet_hash_remove(struct in_ifaddr *ifa)
 140{
 141	ASSERT_RTNL();
 142	hlist_del_init_rcu(&ifa->hash);
 143}
 144
 145/**
 146 * __ip_dev_find - find the first device with a given source address.
 147 * @net: the net namespace
 148 * @addr: the source address
 149 * @devref: if true, take a reference on the found device
 150 *
 151 * If a caller uses devref=false, it should be protected by RCU, or RTNL
 152 */
 153struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 154{
 155	struct net_device *result = NULL;
 156	struct in_ifaddr *ifa;
 157
 158	rcu_read_lock();
 159	ifa = inet_lookup_ifaddr_rcu(net, addr);
 160	if (!ifa) {
 161		struct flowi4 fl4 = { .daddr = addr };
 162		struct fib_result res = { 0 };
 163		struct fib_table *local;
 164
 165		/* Fallback to FIB local table so that communication
 166		 * over loopback subnets work.
 167		 */
 168		local = fib_get_table(net, RT_TABLE_LOCAL);
 169		if (local &&
 170		    !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
 171		    res.type == RTN_LOCAL)
 172			result = FIB_RES_DEV(res);
 173	} else {
 174		result = ifa->ifa_dev->dev;
 175	}
 176	if (result && devref)
 177		dev_hold(result);
 178	rcu_read_unlock();
 179	return result;
 180}
 181EXPORT_SYMBOL(__ip_dev_find);
 182
 183/* called under RCU lock */
 184struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
 185{
 186	u32 hash = inet_addr_hash(net, addr);
 187	struct in_ifaddr *ifa;
 188
 189	hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash)
 190		if (ifa->ifa_local == addr &&
 191		    net_eq(dev_net(ifa->ifa_dev->dev), net))
 192			return ifa;
 193
 194	return NULL;
 195}
 196
 197static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 198
 199static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
 200static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
 201static void inet_del_ifa(struct in_device *in_dev,
 202			 struct in_ifaddr __rcu **ifap,
 203			 int destroy);
 204#ifdef CONFIG_SYSCTL
 205static int devinet_sysctl_register(struct in_device *idev);
 206static void devinet_sysctl_unregister(struct in_device *idev);
 207#else
 208static int devinet_sysctl_register(struct in_device *idev)
 209{
 210	return 0;
 211}
 212static void devinet_sysctl_unregister(struct in_device *idev)
 213{
 214}
 215#endif
 216
 217/* Locks all the inet devices. */
 218
 219static struct in_ifaddr *inet_alloc_ifa(void)
 220{
 221	return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL_ACCOUNT);
 222}
 223
 224static void inet_rcu_free_ifa(struct rcu_head *head)
 225{
 226	struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
 227	if (ifa->ifa_dev)
 228		in_dev_put(ifa->ifa_dev);
 229	kfree(ifa);
 230}
 231
 232static void inet_free_ifa(struct in_ifaddr *ifa)
 233{
 234	call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 235}
 236
 237static void in_dev_free_rcu(struct rcu_head *head)
 238{
 239	struct in_device *idev = container_of(head, struct in_device, rcu_head);
 240
 241	kfree(rcu_dereference_protected(idev->mc_hash, 1));
 242	kfree(idev);
 243}
 244
 245void in_dev_finish_destroy(struct in_device *idev)
 246{
 247	struct net_device *dev = idev->dev;
 248
 249	WARN_ON(idev->ifa_list);
 250	WARN_ON(idev->mc_list);
 
 251#ifdef NET_REFCNT_DEBUG
 252	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
 253#endif
 254	netdev_put(dev, &idev->dev_tracker);
 255	if (!idev->dead)
 256		pr_err("Freeing alive in_device %p\n", idev);
 257	else
 258		call_rcu(&idev->rcu_head, in_dev_free_rcu);
 259}
 260EXPORT_SYMBOL(in_dev_finish_destroy);
 261
 262static struct in_device *inetdev_init(struct net_device *dev)
 263{
 264	struct in_device *in_dev;
 265	int err = -ENOMEM;
 266
 267	ASSERT_RTNL();
 268
 269	in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
 270	if (!in_dev)
 271		goto out;
 272	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
 273			sizeof(in_dev->cnf));
 274	in_dev->cnf.sysctl = NULL;
 275	in_dev->dev = dev;
 276	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
 277	if (!in_dev->arp_parms)
 278		goto out_kfree;
 279	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
 280		dev_disable_lro(dev);
 281	/* Reference in_dev->dev */
 282	netdev_hold(dev, &in_dev->dev_tracker, GFP_KERNEL);
 283	/* Account for reference dev->ip_ptr (below) */
 284	refcount_set(&in_dev->refcnt, 1);
 285
 286	err = devinet_sysctl_register(in_dev);
 287	if (err) {
 288		in_dev->dead = 1;
 289		neigh_parms_release(&arp_tbl, in_dev->arp_parms);
 290		in_dev_put(in_dev);
 291		in_dev = NULL;
 292		goto out;
 293	}
 294	ip_mc_init_dev(in_dev);
 295	if (dev->flags & IFF_UP)
 296		ip_mc_up(in_dev);
 297
 298	/* we can receive as soon as ip_ptr is set -- do this last */
 299	rcu_assign_pointer(dev->ip_ptr, in_dev);
 300out:
 301	return in_dev ?: ERR_PTR(err);
 302out_kfree:
 303	kfree(in_dev);
 304	in_dev = NULL;
 305	goto out;
 306}
 307
 
 
 
 
 
 
 308static void inetdev_destroy(struct in_device *in_dev)
 309{
 310	struct net_device *dev;
 311	struct in_ifaddr *ifa;
 312
 313	ASSERT_RTNL();
 314
 315	dev = in_dev->dev;
 316
 317	in_dev->dead = 1;
 318
 319	ip_mc_destroy_dev(in_dev);
 320
 321	while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
 322		inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
 323		inet_free_ifa(ifa);
 324	}
 325
 326	RCU_INIT_POINTER(dev->ip_ptr, NULL);
 327
 328	devinet_sysctl_unregister(in_dev);
 329	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
 330	arp_ifdown(dev);
 331
 332	in_dev_put(in_dev);
 333}
 334
 335int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
 336{
 337	const struct in_ifaddr *ifa;
 338
 339	rcu_read_lock();
 340	in_dev_for_each_ifa_rcu(ifa, in_dev) {
 341		if (inet_ifa_match(a, ifa)) {
 342			if (!b || inet_ifa_match(b, ifa)) {
 343				rcu_read_unlock();
 344				return 1;
 345			}
 346		}
 347	}
 348	rcu_read_unlock();
 349	return 0;
 350}
 351
 352static void __inet_del_ifa(struct in_device *in_dev,
 353			   struct in_ifaddr __rcu **ifap,
 354			   int destroy, struct nlmsghdr *nlh, u32 portid)
 355{
 356	struct in_ifaddr *promote = NULL;
 357	struct in_ifaddr *ifa, *ifa1;
 358	struct in_ifaddr __rcu **last_prim;
 359	struct in_ifaddr *prev_prom = NULL;
 360	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
 361
 362	ASSERT_RTNL();
 363
 364	ifa1 = rtnl_dereference(*ifap);
 365	last_prim = ifap;
 366	if (in_dev->dead)
 367		goto no_promotions;
 368
 369	/* 1. Deleting primary ifaddr forces deletion all secondaries
 370	 * unless alias promotion is set
 371	 **/
 372
 373	if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
 374		struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
 375
 376		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
 377			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
 378			    ifa1->ifa_scope <= ifa->ifa_scope)
 379				last_prim = &ifa->ifa_next;
 380
 381			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
 382			    ifa1->ifa_mask != ifa->ifa_mask ||
 383			    !inet_ifa_match(ifa1->ifa_address, ifa)) {
 384				ifap1 = &ifa->ifa_next;
 385				prev_prom = ifa;
 386				continue;
 387			}
 388
 389			if (!do_promote) {
 390				inet_hash_remove(ifa);
 391				*ifap1 = ifa->ifa_next;
 392
 393				rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
 394				blocking_notifier_call_chain(&inetaddr_chain,
 395						NETDEV_DOWN, ifa);
 396				inet_free_ifa(ifa);
 397			} else {
 398				promote = ifa;
 399				break;
 400			}
 401		}
 402	}
 403
 404	/* On promotion all secondaries from subnet are changing
 405	 * the primary IP, we must remove all their routes silently
 406	 * and later to add them back with new prefsrc. Do this
 407	 * while all addresses are on the device list.
 408	 */
 409	for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
 410		if (ifa1->ifa_mask == ifa->ifa_mask &&
 411		    inet_ifa_match(ifa1->ifa_address, ifa))
 412			fib_del_ifaddr(ifa, ifa1);
 413	}
 414
 415no_promotions:
 416	/* 2. Unlink it */
 417
 418	*ifap = ifa1->ifa_next;
 419	inet_hash_remove(ifa1);
 420
 421	/* 3. Announce address deletion */
 422
 423	/* Send message first, then call notifier.
 424	   At first sight, FIB update triggered by notifier
 425	   will refer to already deleted ifaddr, that could confuse
 426	   netlink listeners. It is not true: look, gated sees
 427	   that route deleted and if it still thinks that ifaddr
 428	   is valid, it will try to restore deleted routes... Grr.
 429	   So that, this order is correct.
 430	 */
 431	rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
 432	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
 433
 434	if (promote) {
 435		struct in_ifaddr *next_sec;
 436
 437		next_sec = rtnl_dereference(promote->ifa_next);
 438		if (prev_prom) {
 439			struct in_ifaddr *last_sec;
 440
 441			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
 442
 443			last_sec = rtnl_dereference(*last_prim);
 444			rcu_assign_pointer(promote->ifa_next, last_sec);
 445			rcu_assign_pointer(*last_prim, promote);
 446		}
 447
 448		promote->ifa_flags &= ~IFA_F_SECONDARY;
 449		rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
 450		blocking_notifier_call_chain(&inetaddr_chain,
 451				NETDEV_UP, promote);
 452		for (ifa = next_sec; ifa;
 453		     ifa = rtnl_dereference(ifa->ifa_next)) {
 454			if (ifa1->ifa_mask != ifa->ifa_mask ||
 455			    !inet_ifa_match(ifa1->ifa_address, ifa))
 456					continue;
 457			fib_add_ifaddr(ifa);
 458		}
 459
 460	}
 461	if (destroy)
 462		inet_free_ifa(ifa1);
 463}
 464
 465static void inet_del_ifa(struct in_device *in_dev,
 466			 struct in_ifaddr __rcu **ifap,
 467			 int destroy)
 468{
 469	__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
 470}
 471
 472static void check_lifetime(struct work_struct *work);
 473
 474static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
 475
 476static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
 477			     u32 portid, struct netlink_ext_ack *extack)
 478{
 479	struct in_ifaddr __rcu **last_primary, **ifap;
 480	struct in_device *in_dev = ifa->ifa_dev;
 481	struct in_validator_info ivi;
 482	struct in_ifaddr *ifa1;
 483	int ret;
 484
 485	ASSERT_RTNL();
 486
 487	if (!ifa->ifa_local) {
 488		inet_free_ifa(ifa);
 489		return 0;
 490	}
 491
 492	ifa->ifa_flags &= ~IFA_F_SECONDARY;
 493	last_primary = &in_dev->ifa_list;
 494
 495	/* Don't set IPv6 only flags to IPv4 addresses */
 496	ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
 497
 498	ifap = &in_dev->ifa_list;
 499	ifa1 = rtnl_dereference(*ifap);
 500
 501	while (ifa1) {
 502		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
 503		    ifa->ifa_scope <= ifa1->ifa_scope)
 504			last_primary = &ifa1->ifa_next;
 505		if (ifa1->ifa_mask == ifa->ifa_mask &&
 506		    inet_ifa_match(ifa1->ifa_address, ifa)) {
 507			if (ifa1->ifa_local == ifa->ifa_local) {
 508				inet_free_ifa(ifa);
 509				return -EEXIST;
 510			}
 511			if (ifa1->ifa_scope != ifa->ifa_scope) {
 512				NL_SET_ERR_MSG(extack, "ipv4: Invalid scope value");
 513				inet_free_ifa(ifa);
 514				return -EINVAL;
 515			}
 516			ifa->ifa_flags |= IFA_F_SECONDARY;
 517		}
 518
 519		ifap = &ifa1->ifa_next;
 520		ifa1 = rtnl_dereference(*ifap);
 521	}
 522
 523	/* Allow any devices that wish to register ifaddr validtors to weigh
 524	 * in now, before changes are committed.  The rntl lock is serializing
 525	 * access here, so the state should not change between a validator call
 526	 * and a final notify on commit.  This isn't invoked on promotion under
 527	 * the assumption that validators are checking the address itself, and
 528	 * not the flags.
 529	 */
 530	ivi.ivi_addr = ifa->ifa_address;
 531	ivi.ivi_dev = ifa->ifa_dev;
 532	ivi.extack = extack;
 533	ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
 534					   NETDEV_UP, &ivi);
 535	ret = notifier_to_errno(ret);
 536	if (ret) {
 537		inet_free_ifa(ifa);
 538		return ret;
 539	}
 540
 541	if (!(ifa->ifa_flags & IFA_F_SECONDARY))
 
 542		ifap = last_primary;
 
 543
 544	rcu_assign_pointer(ifa->ifa_next, *ifap);
 545	rcu_assign_pointer(*ifap, ifa);
 546
 547	inet_hash_insert(dev_net(in_dev->dev), ifa);
 548
 549	cancel_delayed_work(&check_lifetime_work);
 550	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
 551
 552	/* Send message first, then call notifier.
 553	   Notifier will trigger FIB update, so that
 554	   listeners of netlink will know about new ifaddr */
 555	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
 556	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
 557
 558	return 0;
 559}
 560
 561static int inet_insert_ifa(struct in_ifaddr *ifa)
 562{
 563	return __inet_insert_ifa(ifa, NULL, 0, NULL);
 564}
 565
 566static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
 567{
 568	struct in_device *in_dev = __in_dev_get_rtnl(dev);
 569
 570	ASSERT_RTNL();
 571
 572	if (!in_dev) {
 573		inet_free_ifa(ifa);
 574		return -ENOBUFS;
 575	}
 576	ipv4_devconf_setall(in_dev);
 577	neigh_parms_data_state_setall(in_dev->arp_parms);
 578	if (ifa->ifa_dev != in_dev) {
 579		WARN_ON(ifa->ifa_dev);
 580		in_dev_hold(in_dev);
 581		ifa->ifa_dev = in_dev;
 582	}
 583	if (ipv4_is_loopback(ifa->ifa_local))
 584		ifa->ifa_scope = RT_SCOPE_HOST;
 585	return inet_insert_ifa(ifa);
 586}
 587
 588/* Caller must hold RCU or RTNL :
 589 * We dont take a reference on found in_device
 590 */
 591struct in_device *inetdev_by_index(struct net *net, int ifindex)
 592{
 593	struct net_device *dev;
 594	struct in_device *in_dev = NULL;
 595
 596	rcu_read_lock();
 597	dev = dev_get_by_index_rcu(net, ifindex);
 598	if (dev)
 599		in_dev = rcu_dereference_rtnl(dev->ip_ptr);
 600	rcu_read_unlock();
 601	return in_dev;
 602}
 603EXPORT_SYMBOL(inetdev_by_index);
 604
 605/* Called only from RTNL semaphored context. No locks. */
 606
 607struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
 608				    __be32 mask)
 609{
 610	struct in_ifaddr *ifa;
 611
 612	ASSERT_RTNL();
 613
 614	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
 615		if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
 616			return ifa;
 617	}
 618	return NULL;
 619}
 620
 621static int ip_mc_autojoin_config(struct net *net, bool join,
 622				 const struct in_ifaddr *ifa)
 623{
 624#if defined(CONFIG_IP_MULTICAST)
 625	struct ip_mreqn mreq = {
 626		.imr_multiaddr.s_addr = ifa->ifa_address,
 627		.imr_ifindex = ifa->ifa_dev->dev->ifindex,
 628	};
 629	struct sock *sk = net->ipv4.mc_autojoin_sk;
 630	int ret;
 631
 632	ASSERT_RTNL();
 633
 634	lock_sock(sk);
 635	if (join)
 636		ret = ip_mc_join_group(sk, &mreq);
 637	else
 638		ret = ip_mc_leave_group(sk, &mreq);
 639	release_sock(sk);
 640
 641	return ret;
 642#else
 643	return -EOPNOTSUPP;
 644#endif
 645}
 646
 647static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
 648			    struct netlink_ext_ack *extack)
 649{
 650	struct net *net = sock_net(skb->sk);
 651	struct in_ifaddr __rcu **ifap;
 652	struct nlattr *tb[IFA_MAX+1];
 653	struct in_device *in_dev;
 654	struct ifaddrmsg *ifm;
 655	struct in_ifaddr *ifa;
 656	int err;
 
 657
 658	ASSERT_RTNL();
 659
 660	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 661				     ifa_ipv4_policy, extack);
 662	if (err < 0)
 663		goto errout;
 664
 665	ifm = nlmsg_data(nlh);
 666	in_dev = inetdev_by_index(net, ifm->ifa_index);
 667	if (!in_dev) {
 668		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
 669		err = -ENODEV;
 670		goto errout;
 671	}
 672
 673	for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL;
 674	     ifap = &ifa->ifa_next) {
 675		if (tb[IFA_LOCAL] &&
 676		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
 677			continue;
 678
 679		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
 680			continue;
 681
 682		if (tb[IFA_ADDRESS] &&
 683		    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
 684		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
 685			continue;
 686
 687		if (ipv4_is_multicast(ifa->ifa_address))
 688			ip_mc_autojoin_config(net, false, ifa);
 689		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
 690		return 0;
 691	}
 692
 693	NL_SET_ERR_MSG(extack, "ipv4: Address not found");
 694	err = -EADDRNOTAVAIL;
 695errout:
 696	return err;
 697}
 698
 699#define INFINITY_LIFE_TIME	0xFFFFFFFF
 700
 701static void check_lifetime(struct work_struct *work)
 702{
 703	unsigned long now, next, next_sec, next_sched;
 704	struct in_ifaddr *ifa;
 705	struct hlist_node *n;
 706	int i;
 707
 708	now = jiffies;
 709	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
 710
 711	for (i = 0; i < IN4_ADDR_HSIZE; i++) {
 712		bool change_needed = false;
 713
 714		rcu_read_lock();
 715		hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
 716			unsigned long age, tstamp;
 717			u32 preferred_lft;
 718			u32 valid_lft;
 719			u32 flags;
 720
 721			flags = READ_ONCE(ifa->ifa_flags);
 722			if (flags & IFA_F_PERMANENT)
 723				continue;
 724
 725			preferred_lft = READ_ONCE(ifa->ifa_preferred_lft);
 726			valid_lft = READ_ONCE(ifa->ifa_valid_lft);
 727			tstamp = READ_ONCE(ifa->ifa_tstamp);
 728			/* We try to batch several events at once. */
 729			age = (now - tstamp +
 730			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 731
 732			if (valid_lft != INFINITY_LIFE_TIME &&
 733			    age >= valid_lft) {
 734				change_needed = true;
 735			} else if (preferred_lft ==
 736				   INFINITY_LIFE_TIME) {
 737				continue;
 738			} else if (age >= preferred_lft) {
 739				if (time_before(tstamp + valid_lft * HZ, next))
 740					next = tstamp + valid_lft * HZ;
 
 
 741
 742				if (!(flags & IFA_F_DEPRECATED))
 743					change_needed = true;
 744			} else if (time_before(tstamp + preferred_lft * HZ,
 
 745					       next)) {
 746				next = tstamp + preferred_lft * HZ;
 
 747			}
 748		}
 749		rcu_read_unlock();
 750		if (!change_needed)
 751			continue;
 752		rtnl_lock();
 753		hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
 754			unsigned long age;
 755
 756			if (ifa->ifa_flags & IFA_F_PERMANENT)
 757				continue;
 758
 759			/* We try to batch several events at once. */
 760			age = (now - ifa->ifa_tstamp +
 761			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 762
 763			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
 764			    age >= ifa->ifa_valid_lft) {
 765				struct in_ifaddr __rcu **ifap;
 766				struct in_ifaddr *tmp;
 767
 768				ifap = &ifa->ifa_dev->ifa_list;
 769				tmp = rtnl_dereference(*ifap);
 770				while (tmp) {
 771					if (tmp == ifa) {
 772						inet_del_ifa(ifa->ifa_dev,
 773							     ifap, 1);
 774						break;
 775					}
 776					ifap = &tmp->ifa_next;
 777					tmp = rtnl_dereference(*ifap);
 778				}
 779			} else if (ifa->ifa_preferred_lft !=
 780				   INFINITY_LIFE_TIME &&
 781				   age >= ifa->ifa_preferred_lft &&
 782				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
 783				ifa->ifa_flags |= IFA_F_DEPRECATED;
 784				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
 785			}
 786		}
 787		rtnl_unlock();
 788	}
 789
 790	next_sec = round_jiffies_up(next);
 791	next_sched = next;
 792
 793	/* If rounded timeout is accurate enough, accept it. */
 794	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
 795		next_sched = next_sec;
 796
 797	now = jiffies;
 798	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
 799	if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
 800		next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
 801
 802	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
 803			next_sched - now);
 804}
 805
 806static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
 807			     __u32 prefered_lft)
 808{
 809	unsigned long timeout;
 810	u32 flags;
 811
 812	flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
 813
 814	timeout = addrconf_timeout_fixup(valid_lft, HZ);
 815	if (addrconf_finite_timeout(timeout))
 816		WRITE_ONCE(ifa->ifa_valid_lft, timeout);
 817	else
 818		flags |= IFA_F_PERMANENT;
 819
 820	timeout = addrconf_timeout_fixup(prefered_lft, HZ);
 821	if (addrconf_finite_timeout(timeout)) {
 822		if (timeout == 0)
 823			flags |= IFA_F_DEPRECATED;
 824		WRITE_ONCE(ifa->ifa_preferred_lft, timeout);
 825	}
 826	WRITE_ONCE(ifa->ifa_flags, flags);
 827	WRITE_ONCE(ifa->ifa_tstamp, jiffies);
 828	if (!ifa->ifa_cstamp)
 829		WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp);
 830}
 831
 832static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
 833				       __u32 *pvalid_lft, __u32 *pprefered_lft,
 834				       struct netlink_ext_ack *extack)
 835{
 836	struct nlattr *tb[IFA_MAX+1];
 837	struct in_ifaddr *ifa;
 838	struct ifaddrmsg *ifm;
 839	struct net_device *dev;
 840	struct in_device *in_dev;
 841	int err;
 842
 843	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 844				     ifa_ipv4_policy, extack);
 845	if (err < 0)
 846		goto errout;
 847
 848	ifm = nlmsg_data(nlh);
 849	err = -EINVAL;
 850
 851	if (ifm->ifa_prefixlen > 32) {
 852		NL_SET_ERR_MSG(extack, "ipv4: Invalid prefix length");
 853		goto errout;
 854	}
 855
 856	if (!tb[IFA_LOCAL]) {
 857		NL_SET_ERR_MSG(extack, "ipv4: Local address is not supplied");
 858		goto errout;
 859	}
 860
 861	dev = __dev_get_by_index(net, ifm->ifa_index);
 862	err = -ENODEV;
 863	if (!dev) {
 864		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
 865		goto errout;
 866	}
 867
 868	in_dev = __in_dev_get_rtnl(dev);
 869	err = -ENOBUFS;
 870	if (!in_dev)
 871		goto errout;
 872
 873	ifa = inet_alloc_ifa();
 874	if (!ifa)
 875		/*
 876		 * A potential indev allocation can be left alive, it stays
 877		 * assigned to its device and is destroy with it.
 878		 */
 879		goto errout;
 880
 881	ipv4_devconf_setall(in_dev);
 882	neigh_parms_data_state_setall(in_dev->arp_parms);
 883	in_dev_hold(in_dev);
 884
 885	if (!tb[IFA_ADDRESS])
 886		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 887
 888	INIT_HLIST_NODE(&ifa->hash);
 889	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
 890	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
 891	ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
 892					 ifm->ifa_flags;
 893	ifa->ifa_scope = ifm->ifa_scope;
 894	ifa->ifa_dev = in_dev;
 895
 896	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
 897	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
 898
 899	if (tb[IFA_BROADCAST])
 900		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
 901
 902	if (tb[IFA_LABEL])
 903		nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
 904	else
 905		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
 906
 907	if (tb[IFA_RT_PRIORITY])
 908		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
 909
 910	if (tb[IFA_PROTO])
 911		ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]);
 912
 913	if (tb[IFA_CACHEINFO]) {
 914		struct ifa_cacheinfo *ci;
 915
 916		ci = nla_data(tb[IFA_CACHEINFO]);
 917		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
 918			NL_SET_ERR_MSG(extack, "ipv4: address lifetime invalid");
 919			err = -EINVAL;
 920			goto errout_free;
 921		}
 922		*pvalid_lft = ci->ifa_valid;
 923		*pprefered_lft = ci->ifa_prefered;
 924	}
 925
 926	return ifa;
 927
 928errout_free:
 929	inet_free_ifa(ifa);
 930errout:
 931	return ERR_PTR(err);
 932}
 933
 934static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
 935{
 936	struct in_device *in_dev = ifa->ifa_dev;
 937	struct in_ifaddr *ifa1;
 938
 939	if (!ifa->ifa_local)
 940		return NULL;
 941
 942	in_dev_for_each_ifa_rtnl(ifa1, in_dev) {
 943		if (ifa1->ifa_mask == ifa->ifa_mask &&
 944		    inet_ifa_match(ifa1->ifa_address, ifa) &&
 945		    ifa1->ifa_local == ifa->ifa_local)
 946			return ifa1;
 947	}
 948	return NULL;
 949}
 950
 951static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
 952			    struct netlink_ext_ack *extack)
 953{
 954	struct net *net = sock_net(skb->sk);
 955	struct in_ifaddr *ifa;
 956	struct in_ifaddr *ifa_existing;
 957	__u32 valid_lft = INFINITY_LIFE_TIME;
 958	__u32 prefered_lft = INFINITY_LIFE_TIME;
 959
 960	ASSERT_RTNL();
 961
 962	ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
 963	if (IS_ERR(ifa))
 964		return PTR_ERR(ifa);
 965
 966	ifa_existing = find_matching_ifa(ifa);
 967	if (!ifa_existing) {
 968		/* It would be best to check for !NLM_F_CREATE here but
 969		 * userspace already relies on not having to provide this.
 970		 */
 971		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
 972		if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
 973			int ret = ip_mc_autojoin_config(net, true, ifa);
 
 974
 975			if (ret < 0) {
 976				NL_SET_ERR_MSG(extack, "ipv4: Multicast auto join failed");
 977				inet_free_ifa(ifa);
 978				return ret;
 979			}
 980		}
 981		return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid,
 982					 extack);
 983	} else {
 984		u32 new_metric = ifa->ifa_rt_priority;
 985		u8 new_proto = ifa->ifa_proto;
 986
 987		inet_free_ifa(ifa);
 988
 989		if (nlh->nlmsg_flags & NLM_F_EXCL ||
 990		    !(nlh->nlmsg_flags & NLM_F_REPLACE)) {
 991			NL_SET_ERR_MSG(extack, "ipv4: Address already assigned");
 992			return -EEXIST;
 993		}
 994		ifa = ifa_existing;
 995
 996		if (ifa->ifa_rt_priority != new_metric) {
 997			fib_modify_prefix_metric(ifa, new_metric);
 998			ifa->ifa_rt_priority = new_metric;
 999		}
1000
1001		ifa->ifa_proto = new_proto;
1002
1003		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
1004		cancel_delayed_work(&check_lifetime_work);
1005		queue_delayed_work(system_power_efficient_wq,
1006				&check_lifetime_work, 0);
1007		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
1008	}
1009	return 0;
1010}
1011
1012/*
1013 *	Determine a default network mask, based on the IP address.
1014 */
1015
1016static int inet_abc_len(__be32 addr)
1017{
1018	int rc = -1;	/* Something else, probably a multicast. */
1019
1020	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
1021		rc = 0;
1022	else {
1023		__u32 haddr = ntohl(addr);
1024		if (IN_CLASSA(haddr))
1025			rc = 8;
1026		else if (IN_CLASSB(haddr))
1027			rc = 16;
1028		else if (IN_CLASSC(haddr))
1029			rc = 24;
1030		else if (IN_CLASSE(haddr))
1031			rc = 32;
1032	}
1033
1034	return rc;
1035}
1036
1037
1038int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
1039{
1040	struct sockaddr_in sin_orig;
1041	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
1042	struct in_ifaddr __rcu **ifap = NULL;
1043	struct in_device *in_dev;
1044	struct in_ifaddr *ifa = NULL;
1045	struct net_device *dev;
1046	char *colon;
1047	int ret = -EFAULT;
1048	int tryaddrmatch = 0;
1049
1050	ifr->ifr_name[IFNAMSIZ - 1] = 0;
1051
1052	/* save original address for comparison */
1053	memcpy(&sin_orig, sin, sizeof(*sin));
1054
1055	colon = strchr(ifr->ifr_name, ':');
1056	if (colon)
1057		*colon = 0;
1058
1059	dev_load(net, ifr->ifr_name);
1060
1061	switch (cmd) {
1062	case SIOCGIFADDR:	/* Get interface address */
1063	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1064	case SIOCGIFDSTADDR:	/* Get the destination address */
1065	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1066		/* Note that these ioctls will not sleep,
1067		   so that we do not impose a lock.
1068		   One day we will be forced to put shlock here (I mean SMP)
1069		 */
1070		tryaddrmatch = (sin_orig.sin_family == AF_INET);
1071		memset(sin, 0, sizeof(*sin));
1072		sin->sin_family = AF_INET;
1073		break;
1074
1075	case SIOCSIFFLAGS:
1076		ret = -EPERM;
1077		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1078			goto out;
1079		break;
1080	case SIOCSIFADDR:	/* Set interface address (and family) */
1081	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1082	case SIOCSIFDSTADDR:	/* Set the destination address */
1083	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1084		ret = -EPERM;
1085		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1086			goto out;
1087		ret = -EINVAL;
1088		if (sin->sin_family != AF_INET)
1089			goto out;
1090		break;
1091	default:
1092		ret = -EINVAL;
1093		goto out;
1094	}
1095
1096	rtnl_lock();
1097
1098	ret = -ENODEV;
1099	dev = __dev_get_by_name(net, ifr->ifr_name);
1100	if (!dev)
1101		goto done;
1102
1103	if (colon)
1104		*colon = ':';
1105
1106	in_dev = __in_dev_get_rtnl(dev);
1107	if (in_dev) {
1108		if (tryaddrmatch) {
1109			/* Matthias Andree */
1110			/* compare label and address (4.4BSD style) */
1111			/* note: we only do this for a limited set of ioctls
1112			   and only if the original address family was AF_INET.
1113			   This is checked above. */
1114
1115			for (ifap = &in_dev->ifa_list;
1116			     (ifa = rtnl_dereference(*ifap)) != NULL;
1117			     ifap = &ifa->ifa_next) {
1118				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
1119				    sin_orig.sin_addr.s_addr ==
1120							ifa->ifa_local) {
1121					break; /* found */
1122				}
1123			}
1124		}
1125		/* we didn't get a match, maybe the application is
1126		   4.3BSD-style and passed in junk so we fall back to
1127		   comparing just the label */
1128		if (!ifa) {
1129			for (ifap = &in_dev->ifa_list;
1130			     (ifa = rtnl_dereference(*ifap)) != NULL;
1131			     ifap = &ifa->ifa_next)
1132				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
1133					break;
1134		}
1135	}
1136
1137	ret = -EADDRNOTAVAIL;
1138	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1139		goto done;
1140
1141	switch (cmd) {
1142	case SIOCGIFADDR:	/* Get interface address */
1143		ret = 0;
1144		sin->sin_addr.s_addr = ifa->ifa_local;
1145		break;
1146
1147	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1148		ret = 0;
1149		sin->sin_addr.s_addr = ifa->ifa_broadcast;
1150		break;
1151
1152	case SIOCGIFDSTADDR:	/* Get the destination address */
1153		ret = 0;
1154		sin->sin_addr.s_addr = ifa->ifa_address;
1155		break;
1156
1157	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1158		ret = 0;
1159		sin->sin_addr.s_addr = ifa->ifa_mask;
1160		break;
1161
1162	case SIOCSIFFLAGS:
1163		if (colon) {
1164			ret = -EADDRNOTAVAIL;
1165			if (!ifa)
1166				break;
1167			ret = 0;
1168			if (!(ifr->ifr_flags & IFF_UP))
1169				inet_del_ifa(in_dev, ifap, 1);
1170			break;
1171		}
1172		ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
1173		break;
1174
1175	case SIOCSIFADDR:	/* Set interface address (and family) */
1176		ret = -EINVAL;
1177		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1178			break;
1179
1180		if (!ifa) {
1181			ret = -ENOBUFS;
1182			ifa = inet_alloc_ifa();
1183			if (!ifa)
1184				break;
1185			INIT_HLIST_NODE(&ifa->hash);
1186			if (colon)
1187				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
1188			else
1189				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1190		} else {
1191			ret = 0;
1192			if (ifa->ifa_local == sin->sin_addr.s_addr)
1193				break;
1194			inet_del_ifa(in_dev, ifap, 0);
1195			ifa->ifa_broadcast = 0;
1196			ifa->ifa_scope = 0;
1197		}
1198
1199		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1200
1201		if (!(dev->flags & IFF_POINTOPOINT)) {
1202			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1203			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1204			if ((dev->flags & IFF_BROADCAST) &&
1205			    ifa->ifa_prefixlen < 31)
1206				ifa->ifa_broadcast = ifa->ifa_address |
1207						     ~ifa->ifa_mask;
1208		} else {
1209			ifa->ifa_prefixlen = 32;
1210			ifa->ifa_mask = inet_make_mask(32);
1211		}
1212		set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1213		ret = inet_set_ifa(dev, ifa);
1214		break;
1215
1216	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1217		ret = 0;
1218		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1219			inet_del_ifa(in_dev, ifap, 0);
1220			ifa->ifa_broadcast = sin->sin_addr.s_addr;
1221			inet_insert_ifa(ifa);
1222		}
1223		break;
1224
1225	case SIOCSIFDSTADDR:	/* Set the destination address */
1226		ret = 0;
1227		if (ifa->ifa_address == sin->sin_addr.s_addr)
1228			break;
1229		ret = -EINVAL;
1230		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1231			break;
1232		ret = 0;
1233		inet_del_ifa(in_dev, ifap, 0);
1234		ifa->ifa_address = sin->sin_addr.s_addr;
1235		inet_insert_ifa(ifa);
1236		break;
1237
1238	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1239
1240		/*
1241		 *	The mask we set must be legal.
1242		 */
1243		ret = -EINVAL;
1244		if (bad_mask(sin->sin_addr.s_addr, 0))
1245			break;
1246		ret = 0;
1247		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1248			__be32 old_mask = ifa->ifa_mask;
1249			inet_del_ifa(in_dev, ifap, 0);
1250			ifa->ifa_mask = sin->sin_addr.s_addr;
1251			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1252
1253			/* See if current broadcast address matches
1254			 * with current netmask, then recalculate
1255			 * the broadcast address. Otherwise it's a
1256			 * funny address, so don't touch it since
1257			 * the user seems to know what (s)he's doing...
1258			 */
1259			if ((dev->flags & IFF_BROADCAST) &&
1260			    (ifa->ifa_prefixlen < 31) &&
1261			    (ifa->ifa_broadcast ==
1262			     (ifa->ifa_local|~old_mask))) {
1263				ifa->ifa_broadcast = (ifa->ifa_local |
1264						      ~sin->sin_addr.s_addr);
1265			}
1266			inet_insert_ifa(ifa);
1267		}
1268		break;
1269	}
1270done:
1271	rtnl_unlock();
1272out:
1273	return ret;
1274}
1275
1276int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
1277{
1278	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1279	const struct in_ifaddr *ifa;
1280	struct ifreq ifr;
1281	int done = 0;
1282
1283	if (WARN_ON(size > sizeof(struct ifreq)))
1284		goto out;
1285
1286	if (!in_dev)
1287		goto out;
1288
1289	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1290		if (!buf) {
1291			done += size;
1292			continue;
1293		}
1294		if (len < size)
1295			break;
1296		memset(&ifr, 0, sizeof(struct ifreq));
1297		strcpy(ifr.ifr_name, ifa->ifa_label);
1298
1299		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1300		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1301								ifa->ifa_local;
1302
1303		if (copy_to_user(buf + done, &ifr, size)) {
1304			done = -EFAULT;
1305			break;
1306		}
1307		len  -= size;
1308		done += size;
1309	}
1310out:
1311	return done;
1312}
1313
1314static __be32 in_dev_select_addr(const struct in_device *in_dev,
1315				 int scope)
1316{
1317	const struct in_ifaddr *ifa;
1318
1319	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1320		if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
1321			continue;
1322		if (ifa->ifa_scope != RT_SCOPE_LINK &&
1323		    ifa->ifa_scope <= scope)
1324			return ifa->ifa_local;
1325	}
1326
1327	return 0;
1328}
1329
1330__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1331{
1332	const struct in_ifaddr *ifa;
1333	__be32 addr = 0;
1334	unsigned char localnet_scope = RT_SCOPE_HOST;
1335	struct in_device *in_dev;
1336	struct net *net = dev_net(dev);
1337	int master_idx;
1338
1339	rcu_read_lock();
1340	in_dev = __in_dev_get_rcu(dev);
1341	if (!in_dev)
1342		goto no_in_dev;
1343
1344	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1345		localnet_scope = RT_SCOPE_LINK;
1346
1347	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1348		if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
1349			continue;
1350		if (min(ifa->ifa_scope, localnet_scope) > scope)
1351			continue;
1352		if (!dst || inet_ifa_match(dst, ifa)) {
1353			addr = ifa->ifa_local;
1354			break;
1355		}
1356		if (!addr)
1357			addr = ifa->ifa_local;
1358	}
1359
1360	if (addr)
1361		goto out_unlock;
1362no_in_dev:
1363	master_idx = l3mdev_master_ifindex_rcu(dev);
1364
1365	/* For VRFs, the VRF device takes the place of the loopback device,
1366	 * with addresses on it being preferred.  Note in such cases the
1367	 * loopback device will be among the devices that fail the master_idx
1368	 * equality check in the loop below.
1369	 */
1370	if (master_idx &&
1371	    (dev = dev_get_by_index_rcu(net, master_idx)) &&
1372	    (in_dev = __in_dev_get_rcu(dev))) {
1373		addr = in_dev_select_addr(in_dev, scope);
1374		if (addr)
1375			goto out_unlock;
1376	}
1377
1378	/* Not loopback addresses on loopback should be preferred
1379	   in this case. It is important that lo is the first interface
1380	   in dev_base list.
1381	 */
1382	for_each_netdev_rcu(net, dev) {
1383		if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1384			continue;
1385
1386		in_dev = __in_dev_get_rcu(dev);
1387		if (!in_dev)
1388			continue;
1389
1390		addr = in_dev_select_addr(in_dev, scope);
1391		if (addr)
1392			goto out_unlock;
1393	}
1394out_unlock:
1395	rcu_read_unlock();
1396	return addr;
1397}
1398EXPORT_SYMBOL(inet_select_addr);
1399
1400static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1401			      __be32 local, int scope)
1402{
1403	unsigned char localnet_scope = RT_SCOPE_HOST;
1404	const struct in_ifaddr *ifa;
1405	__be32 addr = 0;
1406	int same = 0;
1407
1408	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1409		localnet_scope = RT_SCOPE_LINK;
1410
1411	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1412		unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
1413
1414		if (!addr &&
1415		    (local == ifa->ifa_local || !local) &&
1416		    min_scope <= scope) {
1417			addr = ifa->ifa_local;
1418			if (same)
1419				break;
1420		}
1421		if (!same) {
1422			same = (!local || inet_ifa_match(local, ifa)) &&
1423				(!dst || inet_ifa_match(dst, ifa));
1424			if (same && addr) {
1425				if (local || !dst)
1426					break;
1427				/* Is the selected addr into dst subnet? */
1428				if (inet_ifa_match(addr, ifa))
1429					break;
1430				/* No, then can we use new local src? */
1431				if (min_scope <= scope) {
1432					addr = ifa->ifa_local;
1433					break;
1434				}
1435				/* search for large dst subnet for addr */
1436				same = 0;
1437			}
1438		}
1439	}
1440
1441	return same ? addr : 0;
1442}
1443
1444/*
1445 * Confirm that local IP address exists using wildcards:
1446 * - net: netns to check, cannot be NULL
1447 * - in_dev: only on this interface, NULL=any interface
1448 * - dst: only in the same subnet as dst, 0=any dst
1449 * - local: address, 0=autoselect the local address
1450 * - scope: maximum allowed scope value for the local address
1451 */
1452__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1453			 __be32 dst, __be32 local, int scope)
1454{
1455	__be32 addr = 0;
1456	struct net_device *dev;
1457
1458	if (in_dev)
1459		return confirm_addr_indev(in_dev, dst, local, scope);
1460
1461	rcu_read_lock();
1462	for_each_netdev_rcu(net, dev) {
1463		in_dev = __in_dev_get_rcu(dev);
1464		if (in_dev) {
1465			addr = confirm_addr_indev(in_dev, dst, local, scope);
1466			if (addr)
1467				break;
1468		}
1469	}
1470	rcu_read_unlock();
1471
1472	return addr;
1473}
1474EXPORT_SYMBOL(inet_confirm_addr);
1475
1476/*
1477 *	Device notifier
1478 */
1479
1480int register_inetaddr_notifier(struct notifier_block *nb)
1481{
1482	return blocking_notifier_chain_register(&inetaddr_chain, nb);
1483}
1484EXPORT_SYMBOL(register_inetaddr_notifier);
1485
1486int unregister_inetaddr_notifier(struct notifier_block *nb)
1487{
1488	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1489}
1490EXPORT_SYMBOL(unregister_inetaddr_notifier);
1491
1492int register_inetaddr_validator_notifier(struct notifier_block *nb)
1493{
1494	return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
1495}
1496EXPORT_SYMBOL(register_inetaddr_validator_notifier);
1497
1498int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
1499{
1500	return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
1501	    nb);
1502}
1503EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
1504
1505/* Rename ifa_labels for a device name change. Make some effort to preserve
1506 * existing alias numbering and to create unique labels if possible.
1507*/
1508static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1509{
1510	struct in_ifaddr *ifa;
1511	int named = 0;
1512
1513	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1514		char old[IFNAMSIZ], *dot;
1515
1516		memcpy(old, ifa->ifa_label, IFNAMSIZ);
1517		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1518		if (named++ == 0)
1519			goto skip;
1520		dot = strchr(old, ':');
1521		if (!dot) {
1522			sprintf(old, ":%d", named);
1523			dot = old;
1524		}
1525		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1526			strcat(ifa->ifa_label, dot);
1527		else
1528			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1529skip:
1530		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1531	}
1532}
1533
 
 
 
 
 
1534static void inetdev_send_gratuitous_arp(struct net_device *dev,
1535					struct in_device *in_dev)
1536
1537{
1538	const struct in_ifaddr *ifa;
1539
1540	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1541		arp_send(ARPOP_REQUEST, ETH_P_ARP,
1542			 ifa->ifa_local, dev,
1543			 ifa->ifa_local, NULL,
1544			 dev->dev_addr, NULL);
1545	}
1546}
1547
1548/* Called only under RTNL semaphore */
1549
1550static int inetdev_event(struct notifier_block *this, unsigned long event,
1551			 void *ptr)
1552{
1553	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1554	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1555
1556	ASSERT_RTNL();
1557
1558	if (!in_dev) {
1559		if (event == NETDEV_REGISTER) {
1560			in_dev = inetdev_init(dev);
1561			if (IS_ERR(in_dev))
1562				return notifier_from_errno(PTR_ERR(in_dev));
1563			if (dev->flags & IFF_LOOPBACK) {
1564				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1565				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1566			}
1567		} else if (event == NETDEV_CHANGEMTU) {
1568			/* Re-enabling IP */
1569			if (inetdev_valid_mtu(dev->mtu))
1570				in_dev = inetdev_init(dev);
1571		}
1572		goto out;
1573	}
1574
1575	switch (event) {
1576	case NETDEV_REGISTER:
1577		pr_debug("%s: bug\n", __func__);
1578		RCU_INIT_POINTER(dev->ip_ptr, NULL);
1579		break;
1580	case NETDEV_UP:
1581		if (!inetdev_valid_mtu(dev->mtu))
1582			break;
1583		if (dev->flags & IFF_LOOPBACK) {
1584			struct in_ifaddr *ifa = inet_alloc_ifa();
1585
1586			if (ifa) {
1587				INIT_HLIST_NODE(&ifa->hash);
1588				ifa->ifa_local =
1589				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
1590				ifa->ifa_prefixlen = 8;
1591				ifa->ifa_mask = inet_make_mask(8);
1592				in_dev_hold(in_dev);
1593				ifa->ifa_dev = in_dev;
1594				ifa->ifa_scope = RT_SCOPE_HOST;
1595				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1596				set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1597						 INFINITY_LIFE_TIME);
1598				ipv4_devconf_setall(in_dev);
1599				neigh_parms_data_state_setall(in_dev->arp_parms);
1600				inet_insert_ifa(ifa);
1601			}
1602		}
1603		ip_mc_up(in_dev);
1604		fallthrough;
1605	case NETDEV_CHANGEADDR:
1606		if (!IN_DEV_ARP_NOTIFY(in_dev))
1607			break;
1608		fallthrough;
1609	case NETDEV_NOTIFY_PEERS:
1610		/* Send gratuitous ARP to notify of link change */
1611		inetdev_send_gratuitous_arp(dev, in_dev);
1612		break;
1613	case NETDEV_DOWN:
1614		ip_mc_down(in_dev);
1615		break;
1616	case NETDEV_PRE_TYPE_CHANGE:
1617		ip_mc_unmap(in_dev);
1618		break;
1619	case NETDEV_POST_TYPE_CHANGE:
1620		ip_mc_remap(in_dev);
1621		break;
1622	case NETDEV_CHANGEMTU:
1623		if (inetdev_valid_mtu(dev->mtu))
1624			break;
1625		/* disable IP when MTU is not enough */
1626		fallthrough;
1627	case NETDEV_UNREGISTER:
1628		inetdev_destroy(in_dev);
1629		break;
1630	case NETDEV_CHANGENAME:
1631		/* Do not notify about label change, this event is
1632		 * not interesting to applications using netlink.
1633		 */
1634		inetdev_changename(dev, in_dev);
1635
1636		devinet_sysctl_unregister(in_dev);
1637		devinet_sysctl_register(in_dev);
1638		break;
1639	}
1640out:
1641	return NOTIFY_DONE;
1642}
1643
1644static struct notifier_block ip_netdev_notifier = {
1645	.notifier_call = inetdev_event,
1646};
1647
1648static size_t inet_nlmsg_size(void)
1649{
1650	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1651	       + nla_total_size(4) /* IFA_ADDRESS */
1652	       + nla_total_size(4) /* IFA_LOCAL */
1653	       + nla_total_size(4) /* IFA_BROADCAST */
1654	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1655	       + nla_total_size(4)  /* IFA_FLAGS */
1656	       + nla_total_size(1)  /* IFA_PROTO */
1657	       + nla_total_size(4)  /* IFA_RT_PRIORITY */
1658	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1659}
1660
1661static inline u32 cstamp_delta(unsigned long cstamp)
1662{
1663	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1664}
1665
1666static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1667			 unsigned long tstamp, u32 preferred, u32 valid)
1668{
1669	struct ifa_cacheinfo ci;
1670
1671	ci.cstamp = cstamp_delta(cstamp);
1672	ci.tstamp = cstamp_delta(tstamp);
1673	ci.ifa_prefered = preferred;
1674	ci.ifa_valid = valid;
1675
1676	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1677}
1678
1679static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
1680			    struct inet_fill_args *args)
1681{
1682	struct ifaddrmsg *ifm;
1683	struct nlmsghdr  *nlh;
1684	unsigned long tstamp;
1685	u32 preferred, valid;
1686	u32 flags;
1687
1688	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
1689			args->flags);
1690	if (!nlh)
1691		return -EMSGSIZE;
1692
1693	ifm = nlmsg_data(nlh);
1694	ifm->ifa_family = AF_INET;
1695	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1696
1697	flags = READ_ONCE(ifa->ifa_flags);
1698	/* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits.
1699	 * The 32bit value is given in IFA_FLAGS attribute.
1700	 */
1701	ifm->ifa_flags = (__u8)flags;
1702
1703	ifm->ifa_scope = ifa->ifa_scope;
1704	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1705
1706	if (args->netnsid >= 0 &&
1707	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
1708		goto nla_put_failure;
1709
1710	tstamp = READ_ONCE(ifa->ifa_tstamp);
1711	if (!(flags & IFA_F_PERMANENT)) {
1712		preferred = READ_ONCE(ifa->ifa_preferred_lft);
1713		valid = READ_ONCE(ifa->ifa_valid_lft);
1714		if (preferred != INFINITY_LIFE_TIME) {
1715			long tval = (jiffies - tstamp) / HZ;
1716
1717			if (preferred > tval)
1718				preferred -= tval;
1719			else
1720				preferred = 0;
1721			if (valid != INFINITY_LIFE_TIME) {
1722				if (valid > tval)
1723					valid -= tval;
1724				else
1725					valid = 0;
1726			}
1727		}
1728	} else {
1729		preferred = INFINITY_LIFE_TIME;
1730		valid = INFINITY_LIFE_TIME;
1731	}
1732	if ((ifa->ifa_address &&
1733	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1734	    (ifa->ifa_local &&
1735	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1736	    (ifa->ifa_broadcast &&
1737	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1738	    (ifa->ifa_label[0] &&
1739	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1740	    (ifa->ifa_proto &&
1741	     nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
1742	    nla_put_u32(skb, IFA_FLAGS, flags) ||
1743	    (ifa->ifa_rt_priority &&
1744	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
1745	    put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
1746			  preferred, valid))
1747		goto nla_put_failure;
1748
1749	nlmsg_end(skb, nlh);
1750	return 0;
1751
1752nla_put_failure:
1753	nlmsg_cancel(skb, nlh);
1754	return -EMSGSIZE;
1755}
1756
1757static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
1758				      struct inet_fill_args *fillargs,
1759				      struct net **tgt_net, struct sock *sk,
1760				      struct netlink_callback *cb)
1761{
1762	struct netlink_ext_ack *extack = cb->extack;
1763	struct nlattr *tb[IFA_MAX+1];
1764	struct ifaddrmsg *ifm;
1765	int err, i;
1766
1767	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
1768		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
1769		return -EINVAL;
1770	}
1771
1772	ifm = nlmsg_data(nlh);
1773	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
1774		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
1775		return -EINVAL;
1776	}
1777
1778	fillargs->ifindex = ifm->ifa_index;
1779	if (fillargs->ifindex) {
1780		cb->answer_flags |= NLM_F_DUMP_FILTERED;
1781		fillargs->flags |= NLM_F_DUMP_FILTERED;
1782	}
1783
1784	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
1785					    ifa_ipv4_policy, extack);
1786	if (err < 0)
1787		return err;
1788
1789	for (i = 0; i <= IFA_MAX; ++i) {
1790		if (!tb[i])
1791			continue;
1792
1793		if (i == IFA_TARGET_NETNSID) {
1794			struct net *net;
1795
1796			fillargs->netnsid = nla_get_s32(tb[i]);
1797
1798			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
1799			if (IS_ERR(net)) {
1800				fillargs->netnsid = -1;
1801				NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
1802				return PTR_ERR(net);
1803			}
1804			*tgt_net = net;
1805		} else {
1806			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
1807			return -EINVAL;
1808		}
1809	}
1810
1811	return 0;
1812}
1813
1814static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
1815			    struct netlink_callback *cb, int *s_ip_idx,
1816			    struct inet_fill_args *fillargs)
1817{
1818	struct in_ifaddr *ifa;
1819	int ip_idx = 0;
1820	int err;
1821
1822	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1823		if (ip_idx < *s_ip_idx) {
1824			ip_idx++;
1825			continue;
1826		}
1827		err = inet_fill_ifaddr(skb, ifa, fillargs);
1828		if (err < 0)
1829			goto done;
1830
1831		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1832		ip_idx++;
1833	}
1834	err = 0;
1835	ip_idx = 0;
1836done:
1837	*s_ip_idx = ip_idx;
1838
1839	return err;
1840}
1841
1842/* Combine dev_addr_genid and dev_base_seq to detect changes.
1843 */
1844static u32 inet_base_seq(const struct net *net)
1845{
1846	u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
1847		  READ_ONCE(net->dev_base_seq);
1848
1849	/* Must not return 0 (see nl_dump_check_consistent()).
1850	 * Chose a value far away from 0.
1851	 */
1852	if (!res)
1853		res = 0x80000000;
1854	return res;
1855}
1856
1857static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1858{
1859	const struct nlmsghdr *nlh = cb->nlh;
1860	struct inet_fill_args fillargs = {
1861		.portid = NETLINK_CB(cb->skb).portid,
1862		.seq = nlh->nlmsg_seq,
1863		.event = RTM_NEWADDR,
1864		.flags = NLM_F_MULTI,
1865		.netnsid = -1,
1866	};
1867	struct net *net = sock_net(skb->sk);
1868	struct net *tgt_net = net;
1869	struct {
1870		unsigned long ifindex;
1871		int ip_idx;
1872	} *ctx = (void *)cb->ctx;
1873	struct in_device *in_dev;
1874	struct net_device *dev;
 
 
1875	int err = 0;
1876
1877	rcu_read_lock();
 
 
 
1878	if (cb->strict_check) {
1879		err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
1880						 skb->sk, cb);
1881		if (err < 0)
1882			goto done;
1883
 
1884		if (fillargs.ifindex) {
1885			dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
1886			if (!dev) {
1887				err = -ENODEV;
1888				goto done;
1889			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1890			in_dev = __in_dev_get_rcu(dev);
1891			if (!in_dev)
1892				goto done;
1893			err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
 
1894					       &fillargs);
1895			goto done;
 
 
 
 
 
1896		}
 
1897	}
1898
1899	cb->seq = inet_base_seq(tgt_net);
1900
1901	for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
1902		in_dev = __in_dev_get_rcu(dev);
1903		if (!in_dev)
1904			continue;
1905		err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
1906				       &fillargs);
1907		if (err < 0)
1908			goto done;
1909	}
1910done:
 
 
 
1911	if (fillargs.netnsid >= 0)
1912		put_net(tgt_net);
1913	rcu_read_unlock();
1914	return err;
1915}
1916
1917static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1918		      u32 portid)
1919{
1920	struct inet_fill_args fillargs = {
1921		.portid = portid,
1922		.seq = nlh ? nlh->nlmsg_seq : 0,
1923		.event = event,
1924		.flags = 0,
1925		.netnsid = -1,
1926	};
1927	struct sk_buff *skb;
1928	int err = -ENOBUFS;
1929	struct net *net;
1930
1931	net = dev_net(ifa->ifa_dev->dev);
1932	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1933	if (!skb)
1934		goto errout;
1935
1936	err = inet_fill_ifaddr(skb, ifa, &fillargs);
1937	if (err < 0) {
1938		/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1939		WARN_ON(err == -EMSGSIZE);
1940		kfree_skb(skb);
1941		goto errout;
1942	}
1943	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1944	return;
1945errout:
1946	if (err < 0)
1947		rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1948}
1949
1950static size_t inet_get_link_af_size(const struct net_device *dev,
1951				    u32 ext_filter_mask)
1952{
1953	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1954
1955	if (!in_dev)
1956		return 0;
1957
1958	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1959}
1960
1961static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1962			     u32 ext_filter_mask)
1963{
1964	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1965	struct nlattr *nla;
1966	int i;
1967
1968	if (!in_dev)
1969		return -ENODATA;
1970
1971	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1972	if (!nla)
1973		return -EMSGSIZE;
1974
1975	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1976		((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]);
1977
1978	return 0;
1979}
1980
1981static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1982	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
1983};
1984
1985static int inet_validate_link_af(const struct net_device *dev,
1986				 const struct nlattr *nla,
1987				 struct netlink_ext_ack *extack)
1988{
1989	struct nlattr *a, *tb[IFLA_INET_MAX+1];
1990	int err, rem;
1991
1992	if (dev && !__in_dev_get_rtnl(dev))
1993		return -EAFNOSUPPORT;
1994
1995	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
1996					  inet_af_policy, extack);
1997	if (err < 0)
1998		return err;
1999
2000	if (tb[IFLA_INET_CONF]) {
2001		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
2002			int cfgid = nla_type(a);
2003
2004			if (nla_len(a) < 4)
2005				return -EINVAL;
2006
2007			if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
2008				return -EINVAL;
2009		}
2010	}
2011
2012	return 0;
2013}
2014
2015static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
2016			    struct netlink_ext_ack *extack)
2017{
2018	struct in_device *in_dev = __in_dev_get_rtnl(dev);
2019	struct nlattr *a, *tb[IFLA_INET_MAX+1];
2020	int rem;
2021
2022	if (!in_dev)
2023		return -EAFNOSUPPORT;
2024
2025	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
2026		return -EINVAL;
2027
2028	if (tb[IFLA_INET_CONF]) {
2029		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
2030			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
2031	}
2032
2033	return 0;
2034}
2035
2036static int inet_netconf_msgsize_devconf(int type)
2037{
2038	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
2039		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
2040	bool all = false;
2041
2042	if (type == NETCONFA_ALL)
2043		all = true;
2044
2045	if (all || type == NETCONFA_FORWARDING)
2046		size += nla_total_size(4);
2047	if (all || type == NETCONFA_RP_FILTER)
2048		size += nla_total_size(4);
2049	if (all || type == NETCONFA_MC_FORWARDING)
2050		size += nla_total_size(4);
2051	if (all || type == NETCONFA_BC_FORWARDING)
2052		size += nla_total_size(4);
2053	if (all || type == NETCONFA_PROXY_NEIGH)
2054		size += nla_total_size(4);
2055	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
2056		size += nla_total_size(4);
2057
2058	return size;
2059}
2060
2061static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
2062				     const struct ipv4_devconf *devconf,
2063				     u32 portid, u32 seq, int event,
2064				     unsigned int flags, int type)
2065{
2066	struct nlmsghdr  *nlh;
2067	struct netconfmsg *ncm;
2068	bool all = false;
2069
2070	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
2071			flags);
2072	if (!nlh)
2073		return -EMSGSIZE;
2074
2075	if (type == NETCONFA_ALL)
2076		all = true;
2077
2078	ncm = nlmsg_data(nlh);
2079	ncm->ncm_family = AF_INET;
2080
2081	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
2082		goto nla_put_failure;
2083
2084	if (!devconf)
2085		goto out;
2086
2087	if ((all || type == NETCONFA_FORWARDING) &&
2088	    nla_put_s32(skb, NETCONFA_FORWARDING,
2089			IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0)
2090		goto nla_put_failure;
2091	if ((all || type == NETCONFA_RP_FILTER) &&
2092	    nla_put_s32(skb, NETCONFA_RP_FILTER,
2093			IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0)
2094		goto nla_put_failure;
2095	if ((all || type == NETCONFA_MC_FORWARDING) &&
2096	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
2097			IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0)
2098		goto nla_put_failure;
2099	if ((all || type == NETCONFA_BC_FORWARDING) &&
2100	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
2101			IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0)
2102		goto nla_put_failure;
2103	if ((all || type == NETCONFA_PROXY_NEIGH) &&
2104	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
2105			IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0)
2106		goto nla_put_failure;
2107	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
2108	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2109			IPV4_DEVCONF_RO(*devconf,
2110					IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
2111		goto nla_put_failure;
2112
2113out:
2114	nlmsg_end(skb, nlh);
2115	return 0;
2116
2117nla_put_failure:
2118	nlmsg_cancel(skb, nlh);
2119	return -EMSGSIZE;
2120}
2121
2122void inet_netconf_notify_devconf(struct net *net, int event, int type,
2123				 int ifindex, struct ipv4_devconf *devconf)
2124{
2125	struct sk_buff *skb;
2126	int err = -ENOBUFS;
2127
2128	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
2129	if (!skb)
2130		goto errout;
2131
2132	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
2133					event, 0, type);
2134	if (err < 0) {
2135		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2136		WARN_ON(err == -EMSGSIZE);
2137		kfree_skb(skb);
2138		goto errout;
2139	}
2140	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
2141	return;
2142errout:
2143	if (err < 0)
2144		rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
2145}
2146
2147static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
2148	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
2149	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
2150	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
2151	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
2152	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
2153};
2154
2155static int inet_netconf_valid_get_req(struct sk_buff *skb,
2156				      const struct nlmsghdr *nlh,
2157				      struct nlattr **tb,
2158				      struct netlink_ext_ack *extack)
2159{
2160	int i, err;
2161
2162	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
2163		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
2164		return -EINVAL;
2165	}
2166
2167	if (!netlink_strict_get_check(skb))
2168		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
2169					      tb, NETCONFA_MAX,
2170					      devconf_ipv4_policy, extack);
2171
2172	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
2173					    tb, NETCONFA_MAX,
2174					    devconf_ipv4_policy, extack);
2175	if (err)
2176		return err;
2177
2178	for (i = 0; i <= NETCONFA_MAX; i++) {
2179		if (!tb[i])
2180			continue;
2181
2182		switch (i) {
2183		case NETCONFA_IFINDEX:
2184			break;
2185		default:
2186			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
2187			return -EINVAL;
2188		}
2189	}
2190
2191	return 0;
2192}
2193
2194static int inet_netconf_get_devconf(struct sk_buff *in_skb,
2195				    struct nlmsghdr *nlh,
2196				    struct netlink_ext_ack *extack)
2197{
2198	struct net *net = sock_net(in_skb->sk);
2199	struct nlattr *tb[NETCONFA_MAX + 1];
2200	const struct ipv4_devconf *devconf;
2201	struct in_device *in_dev = NULL;
2202	struct net_device *dev = NULL;
2203	struct sk_buff *skb;
 
 
 
2204	int ifindex;
2205	int err;
2206
2207	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
2208	if (err)
2209		return err;
2210
 
2211	if (!tb[NETCONFA_IFINDEX])
2212		return -EINVAL;
2213
2214	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
2215	switch (ifindex) {
2216	case NETCONFA_IFINDEX_ALL:
2217		devconf = net->ipv4.devconf_all;
2218		break;
2219	case NETCONFA_IFINDEX_DEFAULT:
2220		devconf = net->ipv4.devconf_dflt;
2221		break;
2222	default:
2223		err = -ENODEV;
2224		dev = dev_get_by_index(net, ifindex);
2225		if (dev)
2226			in_dev = in_dev_get(dev);
2227		if (!in_dev)
2228			goto errout;
2229		devconf = &in_dev->cnf;
2230		break;
2231	}
2232
2233	err = -ENOBUFS;
2234	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
2235	if (!skb)
2236		goto errout;
2237
2238	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
2239					NETLINK_CB(in_skb).portid,
2240					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
2241					NETCONFA_ALL);
2242	if (err < 0) {
2243		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2244		WARN_ON(err == -EMSGSIZE);
2245		kfree_skb(skb);
2246		goto errout;
2247	}
2248	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2249errout:
2250	if (in_dev)
2251		in_dev_put(in_dev);
2252	dev_put(dev);
2253	return err;
2254}
2255
2256static int inet_netconf_dump_devconf(struct sk_buff *skb,
2257				     struct netlink_callback *cb)
2258{
2259	const struct nlmsghdr *nlh = cb->nlh;
2260	struct net *net = sock_net(skb->sk);
2261	struct {
2262		unsigned long ifindex;
2263		unsigned int all_default;
2264	} *ctx = (void *)cb->ctx;
2265	const struct in_device *in_dev;
2266	struct net_device *dev;
2267	int err = 0;
 
2268
2269	if (cb->strict_check) {
2270		struct netlink_ext_ack *extack = cb->extack;
2271		struct netconfmsg *ncm;
2272
2273		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
2274			NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
2275			return -EINVAL;
2276		}
2277
2278		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
2279			NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
2280			return -EINVAL;
2281		}
2282	}
2283
2284	rcu_read_lock();
2285	for_each_netdev_dump(net, dev, ctx->ifindex) {
2286		in_dev = __in_dev_get_rcu(dev);
2287		if (!in_dev)
2288			continue;
2289		err = inet_netconf_fill_devconf(skb, dev->ifindex,
2290						&in_dev->cnf,
2291						NETLINK_CB(cb->skb).portid,
2292						nlh->nlmsg_seq,
2293						RTM_NEWNETCONF, NLM_F_MULTI,
2294						NETCONFA_ALL);
2295		if (err < 0)
2296			goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2297	}
2298	if (ctx->all_default == 0) {
2299		err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
2300						net->ipv4.devconf_all,
2301						NETLINK_CB(cb->skb).portid,
2302						nlh->nlmsg_seq,
2303						RTM_NEWNETCONF, NLM_F_MULTI,
2304						NETCONFA_ALL);
2305		if (err < 0)
2306			goto done;
2307		ctx->all_default++;
 
2308	}
2309	if (ctx->all_default == 1) {
2310		err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
2311						net->ipv4.devconf_dflt,
2312						NETLINK_CB(cb->skb).portid,
2313						nlh->nlmsg_seq,
2314						RTM_NEWNETCONF, NLM_F_MULTI,
2315						NETCONFA_ALL);
2316		if (err < 0)
2317			goto done;
2318		ctx->all_default++;
 
2319	}
2320done:
2321	rcu_read_unlock();
2322	return err;
 
 
2323}
2324
2325#ifdef CONFIG_SYSCTL
2326
2327static void devinet_copy_dflt_conf(struct net *net, int i)
2328{
2329	struct net_device *dev;
2330
2331	rcu_read_lock();
2332	for_each_netdev_rcu(net, dev) {
2333		struct in_device *in_dev;
2334
2335		in_dev = __in_dev_get_rcu(dev);
2336		if (in_dev && !test_bit(i, in_dev->cnf.state))
2337			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2338	}
2339	rcu_read_unlock();
2340}
2341
2342/* called with RTNL locked */
2343static void inet_forward_change(struct net *net)
2344{
2345	struct net_device *dev;
2346	int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2347
2348	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2349	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2350	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2351				    NETCONFA_FORWARDING,
2352				    NETCONFA_IFINDEX_ALL,
2353				    net->ipv4.devconf_all);
2354	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2355				    NETCONFA_FORWARDING,
2356				    NETCONFA_IFINDEX_DEFAULT,
2357				    net->ipv4.devconf_dflt);
2358
2359	for_each_netdev(net, dev) {
2360		struct in_device *in_dev;
2361
2362		if (on)
2363			dev_disable_lro(dev);
2364
2365		in_dev = __in_dev_get_rtnl(dev);
2366		if (in_dev) {
2367			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2368			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2369						    NETCONFA_FORWARDING,
2370						    dev->ifindex, &in_dev->cnf);
2371		}
2372	}
2373}
2374
2375static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2376{
2377	if (cnf == net->ipv4.devconf_dflt)
2378		return NETCONFA_IFINDEX_DEFAULT;
2379	else if (cnf == net->ipv4.devconf_all)
2380		return NETCONFA_IFINDEX_ALL;
2381	else {
2382		struct in_device *idev
2383			= container_of(cnf, struct in_device, cnf);
2384		return idev->dev->ifindex;
2385	}
2386}
2387
2388static int devinet_conf_proc(struct ctl_table *ctl, int write,
2389			     void *buffer, size_t *lenp, loff_t *ppos)
 
2390{
2391	int old_value = *(int *)ctl->data;
2392	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2393	int new_value = *(int *)ctl->data;
2394
2395	if (write) {
2396		struct ipv4_devconf *cnf = ctl->extra1;
2397		struct net *net = ctl->extra2;
2398		int i = (int *)ctl->data - cnf->data;
2399		int ifindex;
2400
2401		set_bit(i, cnf->state);
2402
2403		if (cnf == net->ipv4.devconf_dflt)
2404			devinet_copy_dflt_conf(net, i);
2405		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2406		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2407			if ((new_value == 0) && (old_value != 0))
2408				rt_cache_flush(net);
2409
2410		if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
2411		    new_value != old_value)
2412			rt_cache_flush(net);
2413
2414		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2415		    new_value != old_value) {
2416			ifindex = devinet_conf_ifindex(net, cnf);
2417			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2418						    NETCONFA_RP_FILTER,
2419						    ifindex, cnf);
2420		}
2421		if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2422		    new_value != old_value) {
2423			ifindex = devinet_conf_ifindex(net, cnf);
2424			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2425						    NETCONFA_PROXY_NEIGH,
2426						    ifindex, cnf);
2427		}
2428		if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2429		    new_value != old_value) {
2430			ifindex = devinet_conf_ifindex(net, cnf);
2431			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2432						    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2433						    ifindex, cnf);
2434		}
2435	}
2436
2437	return ret;
2438}
2439
2440static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2441				  void *buffer, size_t *lenp, loff_t *ppos)
 
2442{
2443	int *valp = ctl->data;
2444	int val = *valp;
2445	loff_t pos = *ppos;
2446	struct net *net = ctl->extra2;
2447	int ret;
2448
2449	if (write && !ns_capable(net->user_ns, CAP_NET_ADMIN))
2450		return -EPERM;
2451
2452	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2453
2454	if (write && *valp != val) {
 
 
2455		if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2456			if (!rtnl_trylock()) {
2457				/* Restore the original values before restarting */
2458				*valp = val;
2459				*ppos = pos;
2460				return restart_syscall();
2461			}
2462			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2463				inet_forward_change(net);
2464			} else {
2465				struct ipv4_devconf *cnf = ctl->extra1;
2466				struct in_device *idev =
2467					container_of(cnf, struct in_device, cnf);
2468				if (*valp)
2469					dev_disable_lro(idev->dev);
2470				inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2471							    NETCONFA_FORWARDING,
2472							    idev->dev->ifindex,
2473							    cnf);
2474			}
2475			rtnl_unlock();
2476			rt_cache_flush(net);
2477		} else
2478			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2479						    NETCONFA_FORWARDING,
2480						    NETCONFA_IFINDEX_DEFAULT,
2481						    net->ipv4.devconf_dflt);
2482	}
2483
2484	return ret;
2485}
2486
2487static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2488				void *buffer, size_t *lenp, loff_t *ppos)
 
2489{
2490	int *valp = ctl->data;
2491	int val = *valp;
2492	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2493	struct net *net = ctl->extra2;
2494
2495	if (write && *valp != val)
2496		rt_cache_flush(net);
2497
2498	return ret;
2499}
2500
2501#define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2502	{ \
2503		.procname	= name, \
2504		.data		= ipv4_devconf.data + \
2505				  IPV4_DEVCONF_ ## attr - 1, \
2506		.maxlen		= sizeof(int), \
2507		.mode		= mval, \
2508		.proc_handler	= proc, \
2509		.extra1		= &ipv4_devconf, \
2510	}
2511
2512#define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2513	DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2514
2515#define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2516	DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2517
2518#define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2519	DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2520
2521#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2522	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2523
2524static struct devinet_sysctl_table {
2525	struct ctl_table_header *sysctl_header;
2526	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2527} devinet_sysctl = {
2528	.devinet_vars = {
2529		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2530					     devinet_sysctl_forward),
2531		DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2532		DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
2533
2534		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2535		DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2536		DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2537		DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2538		DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2539		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2540					"accept_source_route"),
2541		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2542		DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2543		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2544		DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2545		DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2546		DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2547		DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2548		DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2549		DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2550		DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2551		DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2552		DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2553		DEVINET_SYSCTL_RW_ENTRY(ARP_EVICT_NOCARRIER,
2554					"arp_evict_nocarrier"),
2555		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2556		DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2557					"force_igmp_version"),
2558		DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2559					"igmpv2_unsolicited_report_interval"),
2560		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2561					"igmpv3_unsolicited_report_interval"),
2562		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2563					"ignore_routes_with_linkdown"),
2564		DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2565					"drop_gratuitous_arp"),
2566
2567		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2568		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2569		DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2570					      "promote_secondaries"),
2571		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2572					      "route_localnet"),
2573		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2574					      "drop_unicast_in_l2_multicast"),
2575	},
2576};
2577
2578static int __devinet_sysctl_register(struct net *net, char *dev_name,
2579				     int ifindex, struct ipv4_devconf *p)
2580{
2581	int i;
2582	struct devinet_sysctl_table *t;
2583	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2584
2585	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL_ACCOUNT);
2586	if (!t)
2587		goto out;
2588
2589	for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2590		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2591		t->devinet_vars[i].extra1 = p;
2592		t->devinet_vars[i].extra2 = net;
2593	}
2594
2595	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2596
2597	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2598	if (!t->sysctl_header)
2599		goto free;
2600
2601	p->sysctl = t;
2602
2603	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
2604				    ifindex, p);
2605	return 0;
2606
2607free:
2608	kfree(t);
2609out:
2610	return -ENOMEM;
2611}
2612
2613static void __devinet_sysctl_unregister(struct net *net,
2614					struct ipv4_devconf *cnf, int ifindex)
2615{
2616	struct devinet_sysctl_table *t = cnf->sysctl;
2617
2618	if (t) {
2619		cnf->sysctl = NULL;
2620		unregister_net_sysctl_table(t->sysctl_header);
2621		kfree(t);
2622	}
2623
2624	inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
2625}
2626
2627static int devinet_sysctl_register(struct in_device *idev)
2628{
2629	int err;
2630
2631	if (!sysctl_dev_name_is_allowed(idev->dev->name))
2632		return -EINVAL;
2633
2634	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2635	if (err)
2636		return err;
2637	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2638					idev->dev->ifindex, &idev->cnf);
2639	if (err)
2640		neigh_sysctl_unregister(idev->arp_parms);
2641	return err;
2642}
2643
2644static void devinet_sysctl_unregister(struct in_device *idev)
2645{
2646	struct net *net = dev_net(idev->dev);
2647
2648	__devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
2649	neigh_sysctl_unregister(idev->arp_parms);
2650}
2651
2652static struct ctl_table ctl_forward_entry[] = {
2653	{
2654		.procname	= "ip_forward",
2655		.data		= &ipv4_devconf.data[
2656					IPV4_DEVCONF_FORWARDING - 1],
2657		.maxlen		= sizeof(int),
2658		.mode		= 0644,
2659		.proc_handler	= devinet_sysctl_forward,
2660		.extra1		= &ipv4_devconf,
2661		.extra2		= &init_net,
2662	},
2663	{ },
2664};
2665#endif
2666
2667static __net_init int devinet_init_net(struct net *net)
2668{
2669	int err;
2670	struct ipv4_devconf *all, *dflt;
2671#ifdef CONFIG_SYSCTL
2672	struct ctl_table *tbl;
2673	struct ctl_table_header *forw_hdr;
2674#endif
2675
2676	err = -ENOMEM;
2677	all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
2678	if (!all)
2679		goto err_alloc_all;
2680
2681	dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2682	if (!dflt)
2683		goto err_alloc_dflt;
2684
2685#ifdef CONFIG_SYSCTL
2686	tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
2687	if (!tbl)
2688		goto err_alloc_ctl;
2689
2690	tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2691	tbl[0].extra1 = all;
2692	tbl[0].extra2 = net;
2693#endif
2694
2695	if (!net_eq(net, &init_net)) {
2696		switch (net_inherit_devconf()) {
2697		case 3:
2698			/* copy from the current netns */
2699			memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
2700			       sizeof(ipv4_devconf));
2701			memcpy(dflt,
2702			       current->nsproxy->net_ns->ipv4.devconf_dflt,
2703			       sizeof(ipv4_devconf_dflt));
2704			break;
2705		case 0:
2706		case 1:
2707			/* copy from init_net */
2708			memcpy(all, init_net.ipv4.devconf_all,
2709			       sizeof(ipv4_devconf));
2710			memcpy(dflt, init_net.ipv4.devconf_dflt,
2711			       sizeof(ipv4_devconf_dflt));
2712			break;
2713		case 2:
2714			/* use compiled values */
2715			break;
2716		}
2717	}
2718
2719#ifdef CONFIG_SYSCTL
2720	err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2721	if (err < 0)
2722		goto err_reg_all;
2723
2724	err = __devinet_sysctl_register(net, "default",
2725					NETCONFA_IFINDEX_DEFAULT, dflt);
2726	if (err < 0)
2727		goto err_reg_dflt;
2728
2729	err = -ENOMEM;
2730	forw_hdr = register_net_sysctl_sz(net, "net/ipv4", tbl,
2731					  ARRAY_SIZE(ctl_forward_entry));
2732	if (!forw_hdr)
2733		goto err_reg_ctl;
2734	net->ipv4.forw_hdr = forw_hdr;
2735#endif
2736
2737	net->ipv4.devconf_all = all;
2738	net->ipv4.devconf_dflt = dflt;
2739	return 0;
2740
2741#ifdef CONFIG_SYSCTL
2742err_reg_ctl:
2743	__devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
2744err_reg_dflt:
2745	__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
2746err_reg_all:
2747	kfree(tbl);
2748err_alloc_ctl:
2749#endif
2750	kfree(dflt);
2751err_alloc_dflt:
2752	kfree(all);
2753err_alloc_all:
2754	return err;
2755}
2756
2757static __net_exit void devinet_exit_net(struct net *net)
2758{
2759#ifdef CONFIG_SYSCTL
2760	struct ctl_table *tbl;
2761
2762	tbl = net->ipv4.forw_hdr->ctl_table_arg;
2763	unregister_net_sysctl_table(net->ipv4.forw_hdr);
2764	__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
2765				    NETCONFA_IFINDEX_DEFAULT);
2766	__devinet_sysctl_unregister(net, net->ipv4.devconf_all,
2767				    NETCONFA_IFINDEX_ALL);
2768	kfree(tbl);
2769#endif
2770	kfree(net->ipv4.devconf_dflt);
2771	kfree(net->ipv4.devconf_all);
2772}
2773
2774static __net_initdata struct pernet_operations devinet_ops = {
2775	.init = devinet_init_net,
2776	.exit = devinet_exit_net,
2777};
2778
2779static struct rtnl_af_ops inet_af_ops __read_mostly = {
2780	.family		  = AF_INET,
2781	.fill_link_af	  = inet_fill_link_af,
2782	.get_link_af_size = inet_get_link_af_size,
2783	.validate_link_af = inet_validate_link_af,
2784	.set_link_af	  = inet_set_link_af,
2785};
2786
2787void __init devinet_init(void)
2788{
2789	int i;
2790
2791	for (i = 0; i < IN4_ADDR_HSIZE; i++)
2792		INIT_HLIST_HEAD(&inet_addr_lst[i]);
2793
2794	register_pernet_subsys(&devinet_ops);
 
 
2795	register_netdevice_notifier(&ip_netdev_notifier);
2796
2797	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2798
2799	rtnl_af_register(&inet_af_ops);
2800
2801	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
2802	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
2803	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
2804		      RTNL_FLAG_DUMP_UNLOCKED);
2805	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2806		      inet_netconf_dump_devconf,
2807		      RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
2808}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	NET3	IP device support routines.
   4 *
   5 *	Derived from the IP parts of dev.c 1.0.19
   6 * 		Authors:	Ross Biro
   7 *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
   8 *				Mark Evans, <evansmp@uhura.aston.ac.uk>
   9 *
  10 *	Additional Authors:
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  13 *
  14 *	Changes:
  15 *		Alexey Kuznetsov:	pa_* fields are replaced with ifaddr
  16 *					lists.
  17 *		Cyrus Durgin:		updated for kmod
  18 *		Matthias Andree:	in devinet_ioctl, compare label and
  19 *					address (4.4BSD alias style support),
  20 *					fall back to comparing just the label
  21 *					if no match found.
  22 */
  23
  24
  25#include <linux/uaccess.h>
  26#include <linux/bitops.h>
  27#include <linux/capability.h>
  28#include <linux/module.h>
  29#include <linux/types.h>
  30#include <linux/kernel.h>
  31#include <linux/sched/signal.h>
  32#include <linux/string.h>
  33#include <linux/mm.h>
  34#include <linux/socket.h>
  35#include <linux/sockios.h>
  36#include <linux/in.h>
  37#include <linux/errno.h>
  38#include <linux/interrupt.h>
  39#include <linux/if_addr.h>
  40#include <linux/if_ether.h>
  41#include <linux/inet.h>
  42#include <linux/netdevice.h>
  43#include <linux/etherdevice.h>
  44#include <linux/skbuff.h>
  45#include <linux/init.h>
  46#include <linux/notifier.h>
  47#include <linux/inetdevice.h>
  48#include <linux/igmp.h>
  49#include <linux/slab.h>
  50#include <linux/hash.h>
  51#ifdef CONFIG_SYSCTL
  52#include <linux/sysctl.h>
  53#endif
  54#include <linux/kmod.h>
  55#include <linux/netconf.h>
  56
  57#include <net/arp.h>
  58#include <net/ip.h>
  59#include <net/route.h>
  60#include <net/ip_fib.h>
  61#include <net/rtnetlink.h>
  62#include <net/net_namespace.h>
  63#include <net/addrconf.h>
  64
  65#define IPV6ONLY_FLAGS	\
  66		(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
  67		 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
  68		 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
  69
  70static struct ipv4_devconf ipv4_devconf = {
  71	.data = {
  72		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  73		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  74		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  75		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  76		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  77		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
 
  78	},
  79};
  80
  81static struct ipv4_devconf ipv4_devconf_dflt = {
  82	.data = {
  83		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
  84		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
  85		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
  86		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
  87		[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
  88		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
  89		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
 
  90	},
  91};
  92
  93#define IPV4_DEVCONF_DFLT(net, attr) \
  94	IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
  95
  96static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
  97	[IFA_LOCAL]     	= { .type = NLA_U32 },
  98	[IFA_ADDRESS]   	= { .type = NLA_U32 },
  99	[IFA_BROADCAST] 	= { .type = NLA_U32 },
 100	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 101	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
 102	[IFA_FLAGS]		= { .type = NLA_U32 },
 103	[IFA_RT_PRIORITY]	= { .type = NLA_U32 },
 104	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
 
 105};
 106
 107struct inet_fill_args {
 108	u32 portid;
 109	u32 seq;
 110	int event;
 111	unsigned int flags;
 112	int netnsid;
 113	int ifindex;
 114};
 115
 116#define IN4_ADDR_HSIZE_SHIFT	8
 117#define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
 118
 119static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 120
 121static u32 inet_addr_hash(const struct net *net, __be32 addr)
 122{
 123	u32 val = (__force u32) addr ^ net_hash_mix(net);
 124
 125	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 126}
 127
 128static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 129{
 130	u32 hash = inet_addr_hash(net, ifa->ifa_local);
 131
 132	ASSERT_RTNL();
 133	hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
 134}
 135
 136static void inet_hash_remove(struct in_ifaddr *ifa)
 137{
 138	ASSERT_RTNL();
 139	hlist_del_init_rcu(&ifa->hash);
 140}
 141
 142/**
 143 * __ip_dev_find - find the first device with a given source address.
 144 * @net: the net namespace
 145 * @addr: the source address
 146 * @devref: if true, take a reference on the found device
 147 *
 148 * If a caller uses devref=false, it should be protected by RCU, or RTNL
 149 */
 150struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 151{
 152	struct net_device *result = NULL;
 153	struct in_ifaddr *ifa;
 154
 155	rcu_read_lock();
 156	ifa = inet_lookup_ifaddr_rcu(net, addr);
 157	if (!ifa) {
 158		struct flowi4 fl4 = { .daddr = addr };
 159		struct fib_result res = { 0 };
 160		struct fib_table *local;
 161
 162		/* Fallback to FIB local table so that communication
 163		 * over loopback subnets work.
 164		 */
 165		local = fib_get_table(net, RT_TABLE_LOCAL);
 166		if (local &&
 167		    !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
 168		    res.type == RTN_LOCAL)
 169			result = FIB_RES_DEV(res);
 170	} else {
 171		result = ifa->ifa_dev->dev;
 172	}
 173	if (result && devref)
 174		dev_hold(result);
 175	rcu_read_unlock();
 176	return result;
 177}
 178EXPORT_SYMBOL(__ip_dev_find);
 179
 180/* called under RCU lock */
 181struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
 182{
 183	u32 hash = inet_addr_hash(net, addr);
 184	struct in_ifaddr *ifa;
 185
 186	hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash)
 187		if (ifa->ifa_local == addr &&
 188		    net_eq(dev_net(ifa->ifa_dev->dev), net))
 189			return ifa;
 190
 191	return NULL;
 192}
 193
 194static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 195
 196static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
 197static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
 198static void inet_del_ifa(struct in_device *in_dev,
 199			 struct in_ifaddr __rcu **ifap,
 200			 int destroy);
 201#ifdef CONFIG_SYSCTL
 202static int devinet_sysctl_register(struct in_device *idev);
 203static void devinet_sysctl_unregister(struct in_device *idev);
 204#else
 205static int devinet_sysctl_register(struct in_device *idev)
 206{
 207	return 0;
 208}
 209static void devinet_sysctl_unregister(struct in_device *idev)
 210{
 211}
 212#endif
 213
 214/* Locks all the inet devices. */
 215
 216static struct in_ifaddr *inet_alloc_ifa(void)
 217{
 218	return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
 219}
 220
 221static void inet_rcu_free_ifa(struct rcu_head *head)
 222{
 223	struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
 224	if (ifa->ifa_dev)
 225		in_dev_put(ifa->ifa_dev);
 226	kfree(ifa);
 227}
 228
 229static void inet_free_ifa(struct in_ifaddr *ifa)
 230{
 231	call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 232}
 233
 
 
 
 
 
 
 
 
 234void in_dev_finish_destroy(struct in_device *idev)
 235{
 236	struct net_device *dev = idev->dev;
 237
 238	WARN_ON(idev->ifa_list);
 239	WARN_ON(idev->mc_list);
 240	kfree(rcu_dereference_protected(idev->mc_hash, 1));
 241#ifdef NET_REFCNT_DEBUG
 242	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
 243#endif
 244	dev_put(dev);
 245	if (!idev->dead)
 246		pr_err("Freeing alive in_device %p\n", idev);
 247	else
 248		kfree(idev);
 249}
 250EXPORT_SYMBOL(in_dev_finish_destroy);
 251
 252static struct in_device *inetdev_init(struct net_device *dev)
 253{
 254	struct in_device *in_dev;
 255	int err = -ENOMEM;
 256
 257	ASSERT_RTNL();
 258
 259	in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
 260	if (!in_dev)
 261		goto out;
 262	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
 263			sizeof(in_dev->cnf));
 264	in_dev->cnf.sysctl = NULL;
 265	in_dev->dev = dev;
 266	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
 267	if (!in_dev->arp_parms)
 268		goto out_kfree;
 269	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
 270		dev_disable_lro(dev);
 271	/* Reference in_dev->dev */
 272	dev_hold(dev);
 273	/* Account for reference dev->ip_ptr (below) */
 274	refcount_set(&in_dev->refcnt, 1);
 275
 276	err = devinet_sysctl_register(in_dev);
 277	if (err) {
 278		in_dev->dead = 1;
 
 279		in_dev_put(in_dev);
 280		in_dev = NULL;
 281		goto out;
 282	}
 283	ip_mc_init_dev(in_dev);
 284	if (dev->flags & IFF_UP)
 285		ip_mc_up(in_dev);
 286
 287	/* we can receive as soon as ip_ptr is set -- do this last */
 288	rcu_assign_pointer(dev->ip_ptr, in_dev);
 289out:
 290	return in_dev ?: ERR_PTR(err);
 291out_kfree:
 292	kfree(in_dev);
 293	in_dev = NULL;
 294	goto out;
 295}
 296
 297static void in_dev_rcu_put(struct rcu_head *head)
 298{
 299	struct in_device *idev = container_of(head, struct in_device, rcu_head);
 300	in_dev_put(idev);
 301}
 302
 303static void inetdev_destroy(struct in_device *in_dev)
 304{
 305	struct net_device *dev;
 306	struct in_ifaddr *ifa;
 307
 308	ASSERT_RTNL();
 309
 310	dev = in_dev->dev;
 311
 312	in_dev->dead = 1;
 313
 314	ip_mc_destroy_dev(in_dev);
 315
 316	while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
 317		inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
 318		inet_free_ifa(ifa);
 319	}
 320
 321	RCU_INIT_POINTER(dev->ip_ptr, NULL);
 322
 323	devinet_sysctl_unregister(in_dev);
 324	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
 325	arp_ifdown(dev);
 326
 327	call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
 328}
 329
 330int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
 331{
 332	const struct in_ifaddr *ifa;
 333
 334	rcu_read_lock();
 335	in_dev_for_each_ifa_rcu(ifa, in_dev) {
 336		if (inet_ifa_match(a, ifa)) {
 337			if (!b || inet_ifa_match(b, ifa)) {
 338				rcu_read_unlock();
 339				return 1;
 340			}
 341		}
 342	}
 343	rcu_read_unlock();
 344	return 0;
 345}
 346
 347static void __inet_del_ifa(struct in_device *in_dev,
 348			   struct in_ifaddr __rcu **ifap,
 349			   int destroy, struct nlmsghdr *nlh, u32 portid)
 350{
 351	struct in_ifaddr *promote = NULL;
 352	struct in_ifaddr *ifa, *ifa1;
 353	struct in_ifaddr *last_prim;
 354	struct in_ifaddr *prev_prom = NULL;
 355	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
 356
 357	ASSERT_RTNL();
 358
 359	ifa1 = rtnl_dereference(*ifap);
 360	last_prim = rtnl_dereference(in_dev->ifa_list);
 361	if (in_dev->dead)
 362		goto no_promotions;
 363
 364	/* 1. Deleting primary ifaddr forces deletion all secondaries
 365	 * unless alias promotion is set
 366	 **/
 367
 368	if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
 369		struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
 370
 371		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
 372			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
 373			    ifa1->ifa_scope <= ifa->ifa_scope)
 374				last_prim = ifa;
 375
 376			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
 377			    ifa1->ifa_mask != ifa->ifa_mask ||
 378			    !inet_ifa_match(ifa1->ifa_address, ifa)) {
 379				ifap1 = &ifa->ifa_next;
 380				prev_prom = ifa;
 381				continue;
 382			}
 383
 384			if (!do_promote) {
 385				inet_hash_remove(ifa);
 386				*ifap1 = ifa->ifa_next;
 387
 388				rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
 389				blocking_notifier_call_chain(&inetaddr_chain,
 390						NETDEV_DOWN, ifa);
 391				inet_free_ifa(ifa);
 392			} else {
 393				promote = ifa;
 394				break;
 395			}
 396		}
 397	}
 398
 399	/* On promotion all secondaries from subnet are changing
 400	 * the primary IP, we must remove all their routes silently
 401	 * and later to add them back with new prefsrc. Do this
 402	 * while all addresses are on the device list.
 403	 */
 404	for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
 405		if (ifa1->ifa_mask == ifa->ifa_mask &&
 406		    inet_ifa_match(ifa1->ifa_address, ifa))
 407			fib_del_ifaddr(ifa, ifa1);
 408	}
 409
 410no_promotions:
 411	/* 2. Unlink it */
 412
 413	*ifap = ifa1->ifa_next;
 414	inet_hash_remove(ifa1);
 415
 416	/* 3. Announce address deletion */
 417
 418	/* Send message first, then call notifier.
 419	   At first sight, FIB update triggered by notifier
 420	   will refer to already deleted ifaddr, that could confuse
 421	   netlink listeners. It is not true: look, gated sees
 422	   that route deleted and if it still thinks that ifaddr
 423	   is valid, it will try to restore deleted routes... Grr.
 424	   So that, this order is correct.
 425	 */
 426	rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
 427	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
 428
 429	if (promote) {
 430		struct in_ifaddr *next_sec;
 431
 432		next_sec = rtnl_dereference(promote->ifa_next);
 433		if (prev_prom) {
 434			struct in_ifaddr *last_sec;
 435
 436			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
 437
 438			last_sec = rtnl_dereference(last_prim->ifa_next);
 439			rcu_assign_pointer(promote->ifa_next, last_sec);
 440			rcu_assign_pointer(last_prim->ifa_next, promote);
 441		}
 442
 443		promote->ifa_flags &= ~IFA_F_SECONDARY;
 444		rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
 445		blocking_notifier_call_chain(&inetaddr_chain,
 446				NETDEV_UP, promote);
 447		for (ifa = next_sec; ifa;
 448		     ifa = rtnl_dereference(ifa->ifa_next)) {
 449			if (ifa1->ifa_mask != ifa->ifa_mask ||
 450			    !inet_ifa_match(ifa1->ifa_address, ifa))
 451					continue;
 452			fib_add_ifaddr(ifa);
 453		}
 454
 455	}
 456	if (destroy)
 457		inet_free_ifa(ifa1);
 458}
 459
 460static void inet_del_ifa(struct in_device *in_dev,
 461			 struct in_ifaddr __rcu **ifap,
 462			 int destroy)
 463{
 464	__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
 465}
 466
 467static void check_lifetime(struct work_struct *work);
 468
 469static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
 470
 471static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
 472			     u32 portid, struct netlink_ext_ack *extack)
 473{
 474	struct in_ifaddr __rcu **last_primary, **ifap;
 475	struct in_device *in_dev = ifa->ifa_dev;
 476	struct in_validator_info ivi;
 477	struct in_ifaddr *ifa1;
 478	int ret;
 479
 480	ASSERT_RTNL();
 481
 482	if (!ifa->ifa_local) {
 483		inet_free_ifa(ifa);
 484		return 0;
 485	}
 486
 487	ifa->ifa_flags &= ~IFA_F_SECONDARY;
 488	last_primary = &in_dev->ifa_list;
 489
 490	/* Don't set IPv6 only flags to IPv4 addresses */
 491	ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
 492
 493	ifap = &in_dev->ifa_list;
 494	ifa1 = rtnl_dereference(*ifap);
 495
 496	while (ifa1) {
 497		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
 498		    ifa->ifa_scope <= ifa1->ifa_scope)
 499			last_primary = &ifa1->ifa_next;
 500		if (ifa1->ifa_mask == ifa->ifa_mask &&
 501		    inet_ifa_match(ifa1->ifa_address, ifa)) {
 502			if (ifa1->ifa_local == ifa->ifa_local) {
 503				inet_free_ifa(ifa);
 504				return -EEXIST;
 505			}
 506			if (ifa1->ifa_scope != ifa->ifa_scope) {
 
 507				inet_free_ifa(ifa);
 508				return -EINVAL;
 509			}
 510			ifa->ifa_flags |= IFA_F_SECONDARY;
 511		}
 512
 513		ifap = &ifa1->ifa_next;
 514		ifa1 = rtnl_dereference(*ifap);
 515	}
 516
 517	/* Allow any devices that wish to register ifaddr validtors to weigh
 518	 * in now, before changes are committed.  The rntl lock is serializing
 519	 * access here, so the state should not change between a validator call
 520	 * and a final notify on commit.  This isn't invoked on promotion under
 521	 * the assumption that validators are checking the address itself, and
 522	 * not the flags.
 523	 */
 524	ivi.ivi_addr = ifa->ifa_address;
 525	ivi.ivi_dev = ifa->ifa_dev;
 526	ivi.extack = extack;
 527	ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
 528					   NETDEV_UP, &ivi);
 529	ret = notifier_to_errno(ret);
 530	if (ret) {
 531		inet_free_ifa(ifa);
 532		return ret;
 533	}
 534
 535	if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
 536		prandom_seed((__force u32) ifa->ifa_local);
 537		ifap = last_primary;
 538	}
 539
 540	rcu_assign_pointer(ifa->ifa_next, *ifap);
 541	rcu_assign_pointer(*ifap, ifa);
 542
 543	inet_hash_insert(dev_net(in_dev->dev), ifa);
 544
 545	cancel_delayed_work(&check_lifetime_work);
 546	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
 547
 548	/* Send message first, then call notifier.
 549	   Notifier will trigger FIB update, so that
 550	   listeners of netlink will know about new ifaddr */
 551	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
 552	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
 553
 554	return 0;
 555}
 556
 557static int inet_insert_ifa(struct in_ifaddr *ifa)
 558{
 559	return __inet_insert_ifa(ifa, NULL, 0, NULL);
 560}
 561
 562static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
 563{
 564	struct in_device *in_dev = __in_dev_get_rtnl(dev);
 565
 566	ASSERT_RTNL();
 567
 568	if (!in_dev) {
 569		inet_free_ifa(ifa);
 570		return -ENOBUFS;
 571	}
 572	ipv4_devconf_setall(in_dev);
 573	neigh_parms_data_state_setall(in_dev->arp_parms);
 574	if (ifa->ifa_dev != in_dev) {
 575		WARN_ON(ifa->ifa_dev);
 576		in_dev_hold(in_dev);
 577		ifa->ifa_dev = in_dev;
 578	}
 579	if (ipv4_is_loopback(ifa->ifa_local))
 580		ifa->ifa_scope = RT_SCOPE_HOST;
 581	return inet_insert_ifa(ifa);
 582}
 583
 584/* Caller must hold RCU or RTNL :
 585 * We dont take a reference on found in_device
 586 */
 587struct in_device *inetdev_by_index(struct net *net, int ifindex)
 588{
 589	struct net_device *dev;
 590	struct in_device *in_dev = NULL;
 591
 592	rcu_read_lock();
 593	dev = dev_get_by_index_rcu(net, ifindex);
 594	if (dev)
 595		in_dev = rcu_dereference_rtnl(dev->ip_ptr);
 596	rcu_read_unlock();
 597	return in_dev;
 598}
 599EXPORT_SYMBOL(inetdev_by_index);
 600
 601/* Called only from RTNL semaphored context. No locks. */
 602
 603struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
 604				    __be32 mask)
 605{
 606	struct in_ifaddr *ifa;
 607
 608	ASSERT_RTNL();
 609
 610	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
 611		if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
 612			return ifa;
 613	}
 614	return NULL;
 615}
 616
 617static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
 
 618{
 
 619	struct ip_mreqn mreq = {
 620		.imr_multiaddr.s_addr = ifa->ifa_address,
 621		.imr_ifindex = ifa->ifa_dev->dev->ifindex,
 622	};
 
 623	int ret;
 624
 625	ASSERT_RTNL();
 626
 627	lock_sock(sk);
 628	if (join)
 629		ret = ip_mc_join_group(sk, &mreq);
 630	else
 631		ret = ip_mc_leave_group(sk, &mreq);
 632	release_sock(sk);
 633
 634	return ret;
 
 
 
 635}
 636
 637static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
 638			    struct netlink_ext_ack *extack)
 639{
 640	struct net *net = sock_net(skb->sk);
 641	struct in_ifaddr __rcu **ifap;
 642	struct nlattr *tb[IFA_MAX+1];
 643	struct in_device *in_dev;
 644	struct ifaddrmsg *ifm;
 645	struct in_ifaddr *ifa;
 646
 647	int err = -EINVAL;
 648
 649	ASSERT_RTNL();
 650
 651	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 652				     ifa_ipv4_policy, extack);
 653	if (err < 0)
 654		goto errout;
 655
 656	ifm = nlmsg_data(nlh);
 657	in_dev = inetdev_by_index(net, ifm->ifa_index);
 658	if (!in_dev) {
 
 659		err = -ENODEV;
 660		goto errout;
 661	}
 662
 663	for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL;
 664	     ifap = &ifa->ifa_next) {
 665		if (tb[IFA_LOCAL] &&
 666		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
 667			continue;
 668
 669		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
 670			continue;
 671
 672		if (tb[IFA_ADDRESS] &&
 673		    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
 674		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
 675			continue;
 676
 677		if (ipv4_is_multicast(ifa->ifa_address))
 678			ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
 679		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
 680		return 0;
 681	}
 682
 
 683	err = -EADDRNOTAVAIL;
 684errout:
 685	return err;
 686}
 687
 688#define INFINITY_LIFE_TIME	0xFFFFFFFF
 689
 690static void check_lifetime(struct work_struct *work)
 691{
 692	unsigned long now, next, next_sec, next_sched;
 693	struct in_ifaddr *ifa;
 694	struct hlist_node *n;
 695	int i;
 696
 697	now = jiffies;
 698	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
 699
 700	for (i = 0; i < IN4_ADDR_HSIZE; i++) {
 701		bool change_needed = false;
 702
 703		rcu_read_lock();
 704		hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
 705			unsigned long age;
 
 
 
 706
 707			if (ifa->ifa_flags & IFA_F_PERMANENT)
 
 708				continue;
 709
 
 
 
 710			/* We try to batch several events at once. */
 711			age = (now - ifa->ifa_tstamp +
 712			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 713
 714			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
 715			    age >= ifa->ifa_valid_lft) {
 716				change_needed = true;
 717			} else if (ifa->ifa_preferred_lft ==
 718				   INFINITY_LIFE_TIME) {
 719				continue;
 720			} else if (age >= ifa->ifa_preferred_lft) {
 721				if (time_before(ifa->ifa_tstamp +
 722						ifa->ifa_valid_lft * HZ, next))
 723					next = ifa->ifa_tstamp +
 724					       ifa->ifa_valid_lft * HZ;
 725
 726				if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
 727					change_needed = true;
 728			} else if (time_before(ifa->ifa_tstamp +
 729					       ifa->ifa_preferred_lft * HZ,
 730					       next)) {
 731				next = ifa->ifa_tstamp +
 732				       ifa->ifa_preferred_lft * HZ;
 733			}
 734		}
 735		rcu_read_unlock();
 736		if (!change_needed)
 737			continue;
 738		rtnl_lock();
 739		hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
 740			unsigned long age;
 741
 742			if (ifa->ifa_flags & IFA_F_PERMANENT)
 743				continue;
 744
 745			/* We try to batch several events at once. */
 746			age = (now - ifa->ifa_tstamp +
 747			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
 748
 749			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
 750			    age >= ifa->ifa_valid_lft) {
 751				struct in_ifaddr __rcu **ifap;
 752				struct in_ifaddr *tmp;
 753
 754				ifap = &ifa->ifa_dev->ifa_list;
 755				tmp = rtnl_dereference(*ifap);
 756				while (tmp) {
 757					if (tmp == ifa) {
 758						inet_del_ifa(ifa->ifa_dev,
 759							     ifap, 1);
 760						break;
 761					}
 762					ifap = &tmp->ifa_next;
 763					tmp = rtnl_dereference(*ifap);
 764				}
 765			} else if (ifa->ifa_preferred_lft !=
 766				   INFINITY_LIFE_TIME &&
 767				   age >= ifa->ifa_preferred_lft &&
 768				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
 769				ifa->ifa_flags |= IFA_F_DEPRECATED;
 770				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
 771			}
 772		}
 773		rtnl_unlock();
 774	}
 775
 776	next_sec = round_jiffies_up(next);
 777	next_sched = next;
 778
 779	/* If rounded timeout is accurate enough, accept it. */
 780	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
 781		next_sched = next_sec;
 782
 783	now = jiffies;
 784	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
 785	if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
 786		next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
 787
 788	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
 789			next_sched - now);
 790}
 791
 792static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
 793			     __u32 prefered_lft)
 794{
 795	unsigned long timeout;
 
 796
 797	ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
 798
 799	timeout = addrconf_timeout_fixup(valid_lft, HZ);
 800	if (addrconf_finite_timeout(timeout))
 801		ifa->ifa_valid_lft = timeout;
 802	else
 803		ifa->ifa_flags |= IFA_F_PERMANENT;
 804
 805	timeout = addrconf_timeout_fixup(prefered_lft, HZ);
 806	if (addrconf_finite_timeout(timeout)) {
 807		if (timeout == 0)
 808			ifa->ifa_flags |= IFA_F_DEPRECATED;
 809		ifa->ifa_preferred_lft = timeout;
 810	}
 811	ifa->ifa_tstamp = jiffies;
 
 812	if (!ifa->ifa_cstamp)
 813		ifa->ifa_cstamp = ifa->ifa_tstamp;
 814}
 815
 816static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
 817				       __u32 *pvalid_lft, __u32 *pprefered_lft,
 818				       struct netlink_ext_ack *extack)
 819{
 820	struct nlattr *tb[IFA_MAX+1];
 821	struct in_ifaddr *ifa;
 822	struct ifaddrmsg *ifm;
 823	struct net_device *dev;
 824	struct in_device *in_dev;
 825	int err;
 826
 827	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
 828				     ifa_ipv4_policy, extack);
 829	if (err < 0)
 830		goto errout;
 831
 832	ifm = nlmsg_data(nlh);
 833	err = -EINVAL;
 834	if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
 
 
 
 
 
 
 
 835		goto errout;
 
 836
 837	dev = __dev_get_by_index(net, ifm->ifa_index);
 838	err = -ENODEV;
 839	if (!dev)
 
 840		goto errout;
 
 841
 842	in_dev = __in_dev_get_rtnl(dev);
 843	err = -ENOBUFS;
 844	if (!in_dev)
 845		goto errout;
 846
 847	ifa = inet_alloc_ifa();
 848	if (!ifa)
 849		/*
 850		 * A potential indev allocation can be left alive, it stays
 851		 * assigned to its device and is destroy with it.
 852		 */
 853		goto errout;
 854
 855	ipv4_devconf_setall(in_dev);
 856	neigh_parms_data_state_setall(in_dev->arp_parms);
 857	in_dev_hold(in_dev);
 858
 859	if (!tb[IFA_ADDRESS])
 860		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 861
 862	INIT_HLIST_NODE(&ifa->hash);
 863	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
 864	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
 865	ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
 866					 ifm->ifa_flags;
 867	ifa->ifa_scope = ifm->ifa_scope;
 868	ifa->ifa_dev = in_dev;
 869
 870	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
 871	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
 872
 873	if (tb[IFA_BROADCAST])
 874		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
 875
 876	if (tb[IFA_LABEL])
 877		nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
 878	else
 879		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
 880
 881	if (tb[IFA_RT_PRIORITY])
 882		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
 883
 
 
 
 884	if (tb[IFA_CACHEINFO]) {
 885		struct ifa_cacheinfo *ci;
 886
 887		ci = nla_data(tb[IFA_CACHEINFO]);
 888		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
 
 889			err = -EINVAL;
 890			goto errout_free;
 891		}
 892		*pvalid_lft = ci->ifa_valid;
 893		*pprefered_lft = ci->ifa_prefered;
 894	}
 895
 896	return ifa;
 897
 898errout_free:
 899	inet_free_ifa(ifa);
 900errout:
 901	return ERR_PTR(err);
 902}
 903
 904static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
 905{
 906	struct in_device *in_dev = ifa->ifa_dev;
 907	struct in_ifaddr *ifa1;
 908
 909	if (!ifa->ifa_local)
 910		return NULL;
 911
 912	in_dev_for_each_ifa_rtnl(ifa1, in_dev) {
 913		if (ifa1->ifa_mask == ifa->ifa_mask &&
 914		    inet_ifa_match(ifa1->ifa_address, ifa) &&
 915		    ifa1->ifa_local == ifa->ifa_local)
 916			return ifa1;
 917	}
 918	return NULL;
 919}
 920
 921static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
 922			    struct netlink_ext_ack *extack)
 923{
 924	struct net *net = sock_net(skb->sk);
 925	struct in_ifaddr *ifa;
 926	struct in_ifaddr *ifa_existing;
 927	__u32 valid_lft = INFINITY_LIFE_TIME;
 928	__u32 prefered_lft = INFINITY_LIFE_TIME;
 929
 930	ASSERT_RTNL();
 931
 932	ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
 933	if (IS_ERR(ifa))
 934		return PTR_ERR(ifa);
 935
 936	ifa_existing = find_matching_ifa(ifa);
 937	if (!ifa_existing) {
 938		/* It would be best to check for !NLM_F_CREATE here but
 939		 * userspace already relies on not having to provide this.
 940		 */
 941		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
 942		if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
 943			int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
 944					       true, ifa);
 945
 946			if (ret < 0) {
 
 947				inet_free_ifa(ifa);
 948				return ret;
 949			}
 950		}
 951		return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid,
 952					 extack);
 953	} else {
 954		u32 new_metric = ifa->ifa_rt_priority;
 
 955
 956		inet_free_ifa(ifa);
 957
 958		if (nlh->nlmsg_flags & NLM_F_EXCL ||
 959		    !(nlh->nlmsg_flags & NLM_F_REPLACE))
 
 960			return -EEXIST;
 
 961		ifa = ifa_existing;
 962
 963		if (ifa->ifa_rt_priority != new_metric) {
 964			fib_modify_prefix_metric(ifa, new_metric);
 965			ifa->ifa_rt_priority = new_metric;
 966		}
 967
 
 
 968		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
 969		cancel_delayed_work(&check_lifetime_work);
 970		queue_delayed_work(system_power_efficient_wq,
 971				&check_lifetime_work, 0);
 972		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
 973	}
 974	return 0;
 975}
 976
 977/*
 978 *	Determine a default network mask, based on the IP address.
 979 */
 980
 981static int inet_abc_len(__be32 addr)
 982{
 983	int rc = -1;	/* Something else, probably a multicast. */
 984
 985	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
 986		rc = 0;
 987	else {
 988		__u32 haddr = ntohl(addr);
 989		if (IN_CLASSA(haddr))
 990			rc = 8;
 991		else if (IN_CLASSB(haddr))
 992			rc = 16;
 993		else if (IN_CLASSC(haddr))
 994			rc = 24;
 995		else if (IN_CLASSE(haddr))
 996			rc = 32;
 997	}
 998
 999	return rc;
1000}
1001
1002
1003int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
1004{
1005	struct sockaddr_in sin_orig;
1006	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
1007	struct in_ifaddr __rcu **ifap = NULL;
1008	struct in_device *in_dev;
1009	struct in_ifaddr *ifa = NULL;
1010	struct net_device *dev;
1011	char *colon;
1012	int ret = -EFAULT;
1013	int tryaddrmatch = 0;
1014
1015	ifr->ifr_name[IFNAMSIZ - 1] = 0;
1016
1017	/* save original address for comparison */
1018	memcpy(&sin_orig, sin, sizeof(*sin));
1019
1020	colon = strchr(ifr->ifr_name, ':');
1021	if (colon)
1022		*colon = 0;
1023
1024	dev_load(net, ifr->ifr_name);
1025
1026	switch (cmd) {
1027	case SIOCGIFADDR:	/* Get interface address */
1028	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1029	case SIOCGIFDSTADDR:	/* Get the destination address */
1030	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1031		/* Note that these ioctls will not sleep,
1032		   so that we do not impose a lock.
1033		   One day we will be forced to put shlock here (I mean SMP)
1034		 */
1035		tryaddrmatch = (sin_orig.sin_family == AF_INET);
1036		memset(sin, 0, sizeof(*sin));
1037		sin->sin_family = AF_INET;
1038		break;
1039
1040	case SIOCSIFFLAGS:
1041		ret = -EPERM;
1042		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1043			goto out;
1044		break;
1045	case SIOCSIFADDR:	/* Set interface address (and family) */
1046	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1047	case SIOCSIFDSTADDR:	/* Set the destination address */
1048	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1049		ret = -EPERM;
1050		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1051			goto out;
1052		ret = -EINVAL;
1053		if (sin->sin_family != AF_INET)
1054			goto out;
1055		break;
1056	default:
1057		ret = -EINVAL;
1058		goto out;
1059	}
1060
1061	rtnl_lock();
1062
1063	ret = -ENODEV;
1064	dev = __dev_get_by_name(net, ifr->ifr_name);
1065	if (!dev)
1066		goto done;
1067
1068	if (colon)
1069		*colon = ':';
1070
1071	in_dev = __in_dev_get_rtnl(dev);
1072	if (in_dev) {
1073		if (tryaddrmatch) {
1074			/* Matthias Andree */
1075			/* compare label and address (4.4BSD style) */
1076			/* note: we only do this for a limited set of ioctls
1077			   and only if the original address family was AF_INET.
1078			   This is checked above. */
1079
1080			for (ifap = &in_dev->ifa_list;
1081			     (ifa = rtnl_dereference(*ifap)) != NULL;
1082			     ifap = &ifa->ifa_next) {
1083				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
1084				    sin_orig.sin_addr.s_addr ==
1085							ifa->ifa_local) {
1086					break; /* found */
1087				}
1088			}
1089		}
1090		/* we didn't get a match, maybe the application is
1091		   4.3BSD-style and passed in junk so we fall back to
1092		   comparing just the label */
1093		if (!ifa) {
1094			for (ifap = &in_dev->ifa_list;
1095			     (ifa = rtnl_dereference(*ifap)) != NULL;
1096			     ifap = &ifa->ifa_next)
1097				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
1098					break;
1099		}
1100	}
1101
1102	ret = -EADDRNOTAVAIL;
1103	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1104		goto done;
1105
1106	switch (cmd) {
1107	case SIOCGIFADDR:	/* Get interface address */
1108		ret = 0;
1109		sin->sin_addr.s_addr = ifa->ifa_local;
1110		break;
1111
1112	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1113		ret = 0;
1114		sin->sin_addr.s_addr = ifa->ifa_broadcast;
1115		break;
1116
1117	case SIOCGIFDSTADDR:	/* Get the destination address */
1118		ret = 0;
1119		sin->sin_addr.s_addr = ifa->ifa_address;
1120		break;
1121
1122	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1123		ret = 0;
1124		sin->sin_addr.s_addr = ifa->ifa_mask;
1125		break;
1126
1127	case SIOCSIFFLAGS:
1128		if (colon) {
1129			ret = -EADDRNOTAVAIL;
1130			if (!ifa)
1131				break;
1132			ret = 0;
1133			if (!(ifr->ifr_flags & IFF_UP))
1134				inet_del_ifa(in_dev, ifap, 1);
1135			break;
1136		}
1137		ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
1138		break;
1139
1140	case SIOCSIFADDR:	/* Set interface address (and family) */
1141		ret = -EINVAL;
1142		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1143			break;
1144
1145		if (!ifa) {
1146			ret = -ENOBUFS;
1147			ifa = inet_alloc_ifa();
1148			if (!ifa)
1149				break;
1150			INIT_HLIST_NODE(&ifa->hash);
1151			if (colon)
1152				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
1153			else
1154				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1155		} else {
1156			ret = 0;
1157			if (ifa->ifa_local == sin->sin_addr.s_addr)
1158				break;
1159			inet_del_ifa(in_dev, ifap, 0);
1160			ifa->ifa_broadcast = 0;
1161			ifa->ifa_scope = 0;
1162		}
1163
1164		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1165
1166		if (!(dev->flags & IFF_POINTOPOINT)) {
1167			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1168			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1169			if ((dev->flags & IFF_BROADCAST) &&
1170			    ifa->ifa_prefixlen < 31)
1171				ifa->ifa_broadcast = ifa->ifa_address |
1172						     ~ifa->ifa_mask;
1173		} else {
1174			ifa->ifa_prefixlen = 32;
1175			ifa->ifa_mask = inet_make_mask(32);
1176		}
1177		set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1178		ret = inet_set_ifa(dev, ifa);
1179		break;
1180
1181	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1182		ret = 0;
1183		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1184			inet_del_ifa(in_dev, ifap, 0);
1185			ifa->ifa_broadcast = sin->sin_addr.s_addr;
1186			inet_insert_ifa(ifa);
1187		}
1188		break;
1189
1190	case SIOCSIFDSTADDR:	/* Set the destination address */
1191		ret = 0;
1192		if (ifa->ifa_address == sin->sin_addr.s_addr)
1193			break;
1194		ret = -EINVAL;
1195		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1196			break;
1197		ret = 0;
1198		inet_del_ifa(in_dev, ifap, 0);
1199		ifa->ifa_address = sin->sin_addr.s_addr;
1200		inet_insert_ifa(ifa);
1201		break;
1202
1203	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1204
1205		/*
1206		 *	The mask we set must be legal.
1207		 */
1208		ret = -EINVAL;
1209		if (bad_mask(sin->sin_addr.s_addr, 0))
1210			break;
1211		ret = 0;
1212		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1213			__be32 old_mask = ifa->ifa_mask;
1214			inet_del_ifa(in_dev, ifap, 0);
1215			ifa->ifa_mask = sin->sin_addr.s_addr;
1216			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1217
1218			/* See if current broadcast address matches
1219			 * with current netmask, then recalculate
1220			 * the broadcast address. Otherwise it's a
1221			 * funny address, so don't touch it since
1222			 * the user seems to know what (s)he's doing...
1223			 */
1224			if ((dev->flags & IFF_BROADCAST) &&
1225			    (ifa->ifa_prefixlen < 31) &&
1226			    (ifa->ifa_broadcast ==
1227			     (ifa->ifa_local|~old_mask))) {
1228				ifa->ifa_broadcast = (ifa->ifa_local |
1229						      ~sin->sin_addr.s_addr);
1230			}
1231			inet_insert_ifa(ifa);
1232		}
1233		break;
1234	}
1235done:
1236	rtnl_unlock();
1237out:
1238	return ret;
1239}
1240
1241static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
1242{
1243	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1244	const struct in_ifaddr *ifa;
1245	struct ifreq ifr;
1246	int done = 0;
1247
1248	if (WARN_ON(size > sizeof(struct ifreq)))
1249		goto out;
1250
1251	if (!in_dev)
1252		goto out;
1253
1254	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1255		if (!buf) {
1256			done += size;
1257			continue;
1258		}
1259		if (len < size)
1260			break;
1261		memset(&ifr, 0, sizeof(struct ifreq));
1262		strcpy(ifr.ifr_name, ifa->ifa_label);
1263
1264		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1265		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1266								ifa->ifa_local;
1267
1268		if (copy_to_user(buf + done, &ifr, size)) {
1269			done = -EFAULT;
1270			break;
1271		}
1272		len  -= size;
1273		done += size;
1274	}
1275out:
1276	return done;
1277}
1278
1279static __be32 in_dev_select_addr(const struct in_device *in_dev,
1280				 int scope)
1281{
1282	const struct in_ifaddr *ifa;
1283
1284	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1285		if (ifa->ifa_flags & IFA_F_SECONDARY)
1286			continue;
1287		if (ifa->ifa_scope != RT_SCOPE_LINK &&
1288		    ifa->ifa_scope <= scope)
1289			return ifa->ifa_local;
1290	}
1291
1292	return 0;
1293}
1294
1295__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1296{
1297	const struct in_ifaddr *ifa;
1298	__be32 addr = 0;
1299	unsigned char localnet_scope = RT_SCOPE_HOST;
1300	struct in_device *in_dev;
1301	struct net *net = dev_net(dev);
1302	int master_idx;
1303
1304	rcu_read_lock();
1305	in_dev = __in_dev_get_rcu(dev);
1306	if (!in_dev)
1307		goto no_in_dev;
1308
1309	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1310		localnet_scope = RT_SCOPE_LINK;
1311
1312	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1313		if (ifa->ifa_flags & IFA_F_SECONDARY)
1314			continue;
1315		if (min(ifa->ifa_scope, localnet_scope) > scope)
1316			continue;
1317		if (!dst || inet_ifa_match(dst, ifa)) {
1318			addr = ifa->ifa_local;
1319			break;
1320		}
1321		if (!addr)
1322			addr = ifa->ifa_local;
1323	}
1324
1325	if (addr)
1326		goto out_unlock;
1327no_in_dev:
1328	master_idx = l3mdev_master_ifindex_rcu(dev);
1329
1330	/* For VRFs, the VRF device takes the place of the loopback device,
1331	 * with addresses on it being preferred.  Note in such cases the
1332	 * loopback device will be among the devices that fail the master_idx
1333	 * equality check in the loop below.
1334	 */
1335	if (master_idx &&
1336	    (dev = dev_get_by_index_rcu(net, master_idx)) &&
1337	    (in_dev = __in_dev_get_rcu(dev))) {
1338		addr = in_dev_select_addr(in_dev, scope);
1339		if (addr)
1340			goto out_unlock;
1341	}
1342
1343	/* Not loopback addresses on loopback should be preferred
1344	   in this case. It is important that lo is the first interface
1345	   in dev_base list.
1346	 */
1347	for_each_netdev_rcu(net, dev) {
1348		if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1349			continue;
1350
1351		in_dev = __in_dev_get_rcu(dev);
1352		if (!in_dev)
1353			continue;
1354
1355		addr = in_dev_select_addr(in_dev, scope);
1356		if (addr)
1357			goto out_unlock;
1358	}
1359out_unlock:
1360	rcu_read_unlock();
1361	return addr;
1362}
1363EXPORT_SYMBOL(inet_select_addr);
1364
1365static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1366			      __be32 local, int scope)
1367{
1368	unsigned char localnet_scope = RT_SCOPE_HOST;
1369	const struct in_ifaddr *ifa;
1370	__be32 addr = 0;
1371	int same = 0;
1372
1373	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1374		localnet_scope = RT_SCOPE_LINK;
1375
1376	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1377		unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
1378
1379		if (!addr &&
1380		    (local == ifa->ifa_local || !local) &&
1381		    min_scope <= scope) {
1382			addr = ifa->ifa_local;
1383			if (same)
1384				break;
1385		}
1386		if (!same) {
1387			same = (!local || inet_ifa_match(local, ifa)) &&
1388				(!dst || inet_ifa_match(dst, ifa));
1389			if (same && addr) {
1390				if (local || !dst)
1391					break;
1392				/* Is the selected addr into dst subnet? */
1393				if (inet_ifa_match(addr, ifa))
1394					break;
1395				/* No, then can we use new local src? */
1396				if (min_scope <= scope) {
1397					addr = ifa->ifa_local;
1398					break;
1399				}
1400				/* search for large dst subnet for addr */
1401				same = 0;
1402			}
1403		}
1404	}
1405
1406	return same ? addr : 0;
1407}
1408
1409/*
1410 * Confirm that local IP address exists using wildcards:
1411 * - net: netns to check, cannot be NULL
1412 * - in_dev: only on this interface, NULL=any interface
1413 * - dst: only in the same subnet as dst, 0=any dst
1414 * - local: address, 0=autoselect the local address
1415 * - scope: maximum allowed scope value for the local address
1416 */
1417__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1418			 __be32 dst, __be32 local, int scope)
1419{
1420	__be32 addr = 0;
1421	struct net_device *dev;
1422
1423	if (in_dev)
1424		return confirm_addr_indev(in_dev, dst, local, scope);
1425
1426	rcu_read_lock();
1427	for_each_netdev_rcu(net, dev) {
1428		in_dev = __in_dev_get_rcu(dev);
1429		if (in_dev) {
1430			addr = confirm_addr_indev(in_dev, dst, local, scope);
1431			if (addr)
1432				break;
1433		}
1434	}
1435	rcu_read_unlock();
1436
1437	return addr;
1438}
1439EXPORT_SYMBOL(inet_confirm_addr);
1440
1441/*
1442 *	Device notifier
1443 */
1444
1445int register_inetaddr_notifier(struct notifier_block *nb)
1446{
1447	return blocking_notifier_chain_register(&inetaddr_chain, nb);
1448}
1449EXPORT_SYMBOL(register_inetaddr_notifier);
1450
1451int unregister_inetaddr_notifier(struct notifier_block *nb)
1452{
1453	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1454}
1455EXPORT_SYMBOL(unregister_inetaddr_notifier);
1456
1457int register_inetaddr_validator_notifier(struct notifier_block *nb)
1458{
1459	return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
1460}
1461EXPORT_SYMBOL(register_inetaddr_validator_notifier);
1462
1463int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
1464{
1465	return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
1466	    nb);
1467}
1468EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
1469
1470/* Rename ifa_labels for a device name change. Make some effort to preserve
1471 * existing alias numbering and to create unique labels if possible.
1472*/
1473static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1474{
1475	struct in_ifaddr *ifa;
1476	int named = 0;
1477
1478	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1479		char old[IFNAMSIZ], *dot;
1480
1481		memcpy(old, ifa->ifa_label, IFNAMSIZ);
1482		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1483		if (named++ == 0)
1484			goto skip;
1485		dot = strchr(old, ':');
1486		if (!dot) {
1487			sprintf(old, ":%d", named);
1488			dot = old;
1489		}
1490		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1491			strcat(ifa->ifa_label, dot);
1492		else
1493			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1494skip:
1495		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1496	}
1497}
1498
1499static bool inetdev_valid_mtu(unsigned int mtu)
1500{
1501	return mtu >= IPV4_MIN_MTU;
1502}
1503
1504static void inetdev_send_gratuitous_arp(struct net_device *dev,
1505					struct in_device *in_dev)
1506
1507{
1508	const struct in_ifaddr *ifa;
1509
1510	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1511		arp_send(ARPOP_REQUEST, ETH_P_ARP,
1512			 ifa->ifa_local, dev,
1513			 ifa->ifa_local, NULL,
1514			 dev->dev_addr, NULL);
1515	}
1516}
1517
1518/* Called only under RTNL semaphore */
1519
1520static int inetdev_event(struct notifier_block *this, unsigned long event,
1521			 void *ptr)
1522{
1523	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1524	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1525
1526	ASSERT_RTNL();
1527
1528	if (!in_dev) {
1529		if (event == NETDEV_REGISTER) {
1530			in_dev = inetdev_init(dev);
1531			if (IS_ERR(in_dev))
1532				return notifier_from_errno(PTR_ERR(in_dev));
1533			if (dev->flags & IFF_LOOPBACK) {
1534				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1535				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1536			}
1537		} else if (event == NETDEV_CHANGEMTU) {
1538			/* Re-enabling IP */
1539			if (inetdev_valid_mtu(dev->mtu))
1540				in_dev = inetdev_init(dev);
1541		}
1542		goto out;
1543	}
1544
1545	switch (event) {
1546	case NETDEV_REGISTER:
1547		pr_debug("%s: bug\n", __func__);
1548		RCU_INIT_POINTER(dev->ip_ptr, NULL);
1549		break;
1550	case NETDEV_UP:
1551		if (!inetdev_valid_mtu(dev->mtu))
1552			break;
1553		if (dev->flags & IFF_LOOPBACK) {
1554			struct in_ifaddr *ifa = inet_alloc_ifa();
1555
1556			if (ifa) {
1557				INIT_HLIST_NODE(&ifa->hash);
1558				ifa->ifa_local =
1559				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
1560				ifa->ifa_prefixlen = 8;
1561				ifa->ifa_mask = inet_make_mask(8);
1562				in_dev_hold(in_dev);
1563				ifa->ifa_dev = in_dev;
1564				ifa->ifa_scope = RT_SCOPE_HOST;
1565				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1566				set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1567						 INFINITY_LIFE_TIME);
1568				ipv4_devconf_setall(in_dev);
1569				neigh_parms_data_state_setall(in_dev->arp_parms);
1570				inet_insert_ifa(ifa);
1571			}
1572		}
1573		ip_mc_up(in_dev);
1574		/* fall through */
1575	case NETDEV_CHANGEADDR:
1576		if (!IN_DEV_ARP_NOTIFY(in_dev))
1577			break;
1578		/* fall through */
1579	case NETDEV_NOTIFY_PEERS:
1580		/* Send gratuitous ARP to notify of link change */
1581		inetdev_send_gratuitous_arp(dev, in_dev);
1582		break;
1583	case NETDEV_DOWN:
1584		ip_mc_down(in_dev);
1585		break;
1586	case NETDEV_PRE_TYPE_CHANGE:
1587		ip_mc_unmap(in_dev);
1588		break;
1589	case NETDEV_POST_TYPE_CHANGE:
1590		ip_mc_remap(in_dev);
1591		break;
1592	case NETDEV_CHANGEMTU:
1593		if (inetdev_valid_mtu(dev->mtu))
1594			break;
1595		/* disable IP when MTU is not enough */
1596		/* fall through */
1597	case NETDEV_UNREGISTER:
1598		inetdev_destroy(in_dev);
1599		break;
1600	case NETDEV_CHANGENAME:
1601		/* Do not notify about label change, this event is
1602		 * not interesting to applications using netlink.
1603		 */
1604		inetdev_changename(dev, in_dev);
1605
1606		devinet_sysctl_unregister(in_dev);
1607		devinet_sysctl_register(in_dev);
1608		break;
1609	}
1610out:
1611	return NOTIFY_DONE;
1612}
1613
1614static struct notifier_block ip_netdev_notifier = {
1615	.notifier_call = inetdev_event,
1616};
1617
1618static size_t inet_nlmsg_size(void)
1619{
1620	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1621	       + nla_total_size(4) /* IFA_ADDRESS */
1622	       + nla_total_size(4) /* IFA_LOCAL */
1623	       + nla_total_size(4) /* IFA_BROADCAST */
1624	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1625	       + nla_total_size(4)  /* IFA_FLAGS */
 
1626	       + nla_total_size(4)  /* IFA_RT_PRIORITY */
1627	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1628}
1629
1630static inline u32 cstamp_delta(unsigned long cstamp)
1631{
1632	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1633}
1634
1635static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1636			 unsigned long tstamp, u32 preferred, u32 valid)
1637{
1638	struct ifa_cacheinfo ci;
1639
1640	ci.cstamp = cstamp_delta(cstamp);
1641	ci.tstamp = cstamp_delta(tstamp);
1642	ci.ifa_prefered = preferred;
1643	ci.ifa_valid = valid;
1644
1645	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1646}
1647
1648static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1649			    struct inet_fill_args *args)
1650{
1651	struct ifaddrmsg *ifm;
1652	struct nlmsghdr  *nlh;
 
1653	u32 preferred, valid;
 
1654
1655	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
1656			args->flags);
1657	if (!nlh)
1658		return -EMSGSIZE;
1659
1660	ifm = nlmsg_data(nlh);
1661	ifm->ifa_family = AF_INET;
1662	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1663	ifm->ifa_flags = ifa->ifa_flags;
 
 
 
 
 
 
1664	ifm->ifa_scope = ifa->ifa_scope;
1665	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1666
1667	if (args->netnsid >= 0 &&
1668	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
1669		goto nla_put_failure;
1670
1671	if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1672		preferred = ifa->ifa_preferred_lft;
1673		valid = ifa->ifa_valid_lft;
 
1674		if (preferred != INFINITY_LIFE_TIME) {
1675			long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1676
1677			if (preferred > tval)
1678				preferred -= tval;
1679			else
1680				preferred = 0;
1681			if (valid != INFINITY_LIFE_TIME) {
1682				if (valid > tval)
1683					valid -= tval;
1684				else
1685					valid = 0;
1686			}
1687		}
1688	} else {
1689		preferred = INFINITY_LIFE_TIME;
1690		valid = INFINITY_LIFE_TIME;
1691	}
1692	if ((ifa->ifa_address &&
1693	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1694	    (ifa->ifa_local &&
1695	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1696	    (ifa->ifa_broadcast &&
1697	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1698	    (ifa->ifa_label[0] &&
1699	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1700	    nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
 
 
1701	    (ifa->ifa_rt_priority &&
1702	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
1703	    put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1704			  preferred, valid))
1705		goto nla_put_failure;
1706
1707	nlmsg_end(skb, nlh);
1708	return 0;
1709
1710nla_put_failure:
1711	nlmsg_cancel(skb, nlh);
1712	return -EMSGSIZE;
1713}
1714
1715static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
1716				      struct inet_fill_args *fillargs,
1717				      struct net **tgt_net, struct sock *sk,
1718				      struct netlink_callback *cb)
1719{
1720	struct netlink_ext_ack *extack = cb->extack;
1721	struct nlattr *tb[IFA_MAX+1];
1722	struct ifaddrmsg *ifm;
1723	int err, i;
1724
1725	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
1726		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
1727		return -EINVAL;
1728	}
1729
1730	ifm = nlmsg_data(nlh);
1731	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
1732		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
1733		return -EINVAL;
1734	}
1735
1736	fillargs->ifindex = ifm->ifa_index;
1737	if (fillargs->ifindex) {
1738		cb->answer_flags |= NLM_F_DUMP_FILTERED;
1739		fillargs->flags |= NLM_F_DUMP_FILTERED;
1740	}
1741
1742	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
1743					    ifa_ipv4_policy, extack);
1744	if (err < 0)
1745		return err;
1746
1747	for (i = 0; i <= IFA_MAX; ++i) {
1748		if (!tb[i])
1749			continue;
1750
1751		if (i == IFA_TARGET_NETNSID) {
1752			struct net *net;
1753
1754			fillargs->netnsid = nla_get_s32(tb[i]);
1755
1756			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
1757			if (IS_ERR(net)) {
1758				fillargs->netnsid = -1;
1759				NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
1760				return PTR_ERR(net);
1761			}
1762			*tgt_net = net;
1763		} else {
1764			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
1765			return -EINVAL;
1766		}
1767	}
1768
1769	return 0;
1770}
1771
1772static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
1773			    struct netlink_callback *cb, int s_ip_idx,
1774			    struct inet_fill_args *fillargs)
1775{
1776	struct in_ifaddr *ifa;
1777	int ip_idx = 0;
1778	int err;
1779
1780	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1781		if (ip_idx < s_ip_idx) {
1782			ip_idx++;
1783			continue;
1784		}
1785		err = inet_fill_ifaddr(skb, ifa, fillargs);
1786		if (err < 0)
1787			goto done;
1788
1789		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1790		ip_idx++;
1791	}
1792	err = 0;
1793
1794done:
1795	cb->args[2] = ip_idx;
1796
1797	return err;
1798}
1799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1801{
1802	const struct nlmsghdr *nlh = cb->nlh;
1803	struct inet_fill_args fillargs = {
1804		.portid = NETLINK_CB(cb->skb).portid,
1805		.seq = nlh->nlmsg_seq,
1806		.event = RTM_NEWADDR,
1807		.flags = NLM_F_MULTI,
1808		.netnsid = -1,
1809	};
1810	struct net *net = sock_net(skb->sk);
1811	struct net *tgt_net = net;
1812	int h, s_h;
1813	int idx, s_idx;
1814	int s_ip_idx;
 
 
1815	struct net_device *dev;
1816	struct in_device *in_dev;
1817	struct hlist_head *head;
1818	int err = 0;
1819
1820	s_h = cb->args[0];
1821	s_idx = idx = cb->args[1];
1822	s_ip_idx = cb->args[2];
1823
1824	if (cb->strict_check) {
1825		err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
1826						 skb->sk, cb);
1827		if (err < 0)
1828			goto put_tgt_net;
1829
1830		err = 0;
1831		if (fillargs.ifindex) {
1832			dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
1833			if (!dev) {
1834				err = -ENODEV;
1835				goto put_tgt_net;
1836			}
1837
1838			in_dev = __in_dev_get_rtnl(dev);
1839			if (in_dev) {
1840				err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1841						       &fillargs);
1842			}
1843			goto put_tgt_net;
1844		}
1845	}
1846
1847	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1848		idx = 0;
1849		head = &tgt_net->dev_index_head[h];
1850		rcu_read_lock();
1851		cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
1852			  tgt_net->dev_base_seq;
1853		hlist_for_each_entry_rcu(dev, head, index_hlist) {
1854			if (idx < s_idx)
1855				goto cont;
1856			if (h > s_h || idx > s_idx)
1857				s_ip_idx = 0;
1858			in_dev = __in_dev_get_rcu(dev);
1859			if (!in_dev)
1860				goto cont;
1861
1862			err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
1863					       &fillargs);
1864			if (err < 0) {
1865				rcu_read_unlock();
1866				goto done;
1867			}
1868cont:
1869			idx++;
1870		}
1871		rcu_read_unlock();
1872	}
1873
 
 
 
 
 
 
 
 
 
 
 
1874done:
1875	cb->args[0] = h;
1876	cb->args[1] = idx;
1877put_tgt_net:
1878	if (fillargs.netnsid >= 0)
1879		put_net(tgt_net);
1880
1881	return skb->len ? : err;
1882}
1883
1884static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1885		      u32 portid)
1886{
1887	struct inet_fill_args fillargs = {
1888		.portid = portid,
1889		.seq = nlh ? nlh->nlmsg_seq : 0,
1890		.event = event,
1891		.flags = 0,
1892		.netnsid = -1,
1893	};
1894	struct sk_buff *skb;
1895	int err = -ENOBUFS;
1896	struct net *net;
1897
1898	net = dev_net(ifa->ifa_dev->dev);
1899	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1900	if (!skb)
1901		goto errout;
1902
1903	err = inet_fill_ifaddr(skb, ifa, &fillargs);
1904	if (err < 0) {
1905		/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1906		WARN_ON(err == -EMSGSIZE);
1907		kfree_skb(skb);
1908		goto errout;
1909	}
1910	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1911	return;
1912errout:
1913	if (err < 0)
1914		rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1915}
1916
1917static size_t inet_get_link_af_size(const struct net_device *dev,
1918				    u32 ext_filter_mask)
1919{
1920	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1921
1922	if (!in_dev)
1923		return 0;
1924
1925	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1926}
1927
1928static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1929			     u32 ext_filter_mask)
1930{
1931	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1932	struct nlattr *nla;
1933	int i;
1934
1935	if (!in_dev)
1936		return -ENODATA;
1937
1938	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1939	if (!nla)
1940		return -EMSGSIZE;
1941
1942	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1943		((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1944
1945	return 0;
1946}
1947
1948static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1949	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
1950};
1951
1952static int inet_validate_link_af(const struct net_device *dev,
1953				 const struct nlattr *nla)
 
1954{
1955	struct nlattr *a, *tb[IFLA_INET_MAX+1];
1956	int err, rem;
1957
1958	if (dev && !__in_dev_get_rcu(dev))
1959		return -EAFNOSUPPORT;
1960
1961	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
1962					  inet_af_policy, NULL);
1963	if (err < 0)
1964		return err;
1965
1966	if (tb[IFLA_INET_CONF]) {
1967		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1968			int cfgid = nla_type(a);
1969
1970			if (nla_len(a) < 4)
1971				return -EINVAL;
1972
1973			if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1974				return -EINVAL;
1975		}
1976	}
1977
1978	return 0;
1979}
1980
1981static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
 
1982{
1983	struct in_device *in_dev = __in_dev_get_rcu(dev);
1984	struct nlattr *a, *tb[IFLA_INET_MAX+1];
1985	int rem;
1986
1987	if (!in_dev)
1988		return -EAFNOSUPPORT;
1989
1990	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
1991		BUG();
1992
1993	if (tb[IFLA_INET_CONF]) {
1994		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1995			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1996	}
1997
1998	return 0;
1999}
2000
2001static int inet_netconf_msgsize_devconf(int type)
2002{
2003	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
2004		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
2005	bool all = false;
2006
2007	if (type == NETCONFA_ALL)
2008		all = true;
2009
2010	if (all || type == NETCONFA_FORWARDING)
2011		size += nla_total_size(4);
2012	if (all || type == NETCONFA_RP_FILTER)
2013		size += nla_total_size(4);
2014	if (all || type == NETCONFA_MC_FORWARDING)
2015		size += nla_total_size(4);
2016	if (all || type == NETCONFA_BC_FORWARDING)
2017		size += nla_total_size(4);
2018	if (all || type == NETCONFA_PROXY_NEIGH)
2019		size += nla_total_size(4);
2020	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
2021		size += nla_total_size(4);
2022
2023	return size;
2024}
2025
2026static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
2027				     struct ipv4_devconf *devconf, u32 portid,
2028				     u32 seq, int event, unsigned int flags,
2029				     int type)
2030{
2031	struct nlmsghdr  *nlh;
2032	struct netconfmsg *ncm;
2033	bool all = false;
2034
2035	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
2036			flags);
2037	if (!nlh)
2038		return -EMSGSIZE;
2039
2040	if (type == NETCONFA_ALL)
2041		all = true;
2042
2043	ncm = nlmsg_data(nlh);
2044	ncm->ncm_family = AF_INET;
2045
2046	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
2047		goto nla_put_failure;
2048
2049	if (!devconf)
2050		goto out;
2051
2052	if ((all || type == NETCONFA_FORWARDING) &&
2053	    nla_put_s32(skb, NETCONFA_FORWARDING,
2054			IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
2055		goto nla_put_failure;
2056	if ((all || type == NETCONFA_RP_FILTER) &&
2057	    nla_put_s32(skb, NETCONFA_RP_FILTER,
2058			IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
2059		goto nla_put_failure;
2060	if ((all || type == NETCONFA_MC_FORWARDING) &&
2061	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
2062			IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
2063		goto nla_put_failure;
2064	if ((all || type == NETCONFA_BC_FORWARDING) &&
2065	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
2066			IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
2067		goto nla_put_failure;
2068	if ((all || type == NETCONFA_PROXY_NEIGH) &&
2069	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
2070			IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
2071		goto nla_put_failure;
2072	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
2073	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2074			IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
 
2075		goto nla_put_failure;
2076
2077out:
2078	nlmsg_end(skb, nlh);
2079	return 0;
2080
2081nla_put_failure:
2082	nlmsg_cancel(skb, nlh);
2083	return -EMSGSIZE;
2084}
2085
2086void inet_netconf_notify_devconf(struct net *net, int event, int type,
2087				 int ifindex, struct ipv4_devconf *devconf)
2088{
2089	struct sk_buff *skb;
2090	int err = -ENOBUFS;
2091
2092	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
2093	if (!skb)
2094		goto errout;
2095
2096	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
2097					event, 0, type);
2098	if (err < 0) {
2099		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2100		WARN_ON(err == -EMSGSIZE);
2101		kfree_skb(skb);
2102		goto errout;
2103	}
2104	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
2105	return;
2106errout:
2107	if (err < 0)
2108		rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
2109}
2110
2111static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
2112	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
2113	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
2114	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
2115	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
2116	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
2117};
2118
2119static int inet_netconf_valid_get_req(struct sk_buff *skb,
2120				      const struct nlmsghdr *nlh,
2121				      struct nlattr **tb,
2122				      struct netlink_ext_ack *extack)
2123{
2124	int i, err;
2125
2126	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
2127		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
2128		return -EINVAL;
2129	}
2130
2131	if (!netlink_strict_get_check(skb))
2132		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
2133					      tb, NETCONFA_MAX,
2134					      devconf_ipv4_policy, extack);
2135
2136	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
2137					    tb, NETCONFA_MAX,
2138					    devconf_ipv4_policy, extack);
2139	if (err)
2140		return err;
2141
2142	for (i = 0; i <= NETCONFA_MAX; i++) {
2143		if (!tb[i])
2144			continue;
2145
2146		switch (i) {
2147		case NETCONFA_IFINDEX:
2148			break;
2149		default:
2150			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
2151			return -EINVAL;
2152		}
2153	}
2154
2155	return 0;
2156}
2157
2158static int inet_netconf_get_devconf(struct sk_buff *in_skb,
2159				    struct nlmsghdr *nlh,
2160				    struct netlink_ext_ack *extack)
2161{
2162	struct net *net = sock_net(in_skb->sk);
2163	struct nlattr *tb[NETCONFA_MAX+1];
 
 
 
2164	struct sk_buff *skb;
2165	struct ipv4_devconf *devconf;
2166	struct in_device *in_dev;
2167	struct net_device *dev;
2168	int ifindex;
2169	int err;
2170
2171	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
2172	if (err)
2173		goto errout;
2174
2175	err = -EINVAL;
2176	if (!tb[NETCONFA_IFINDEX])
2177		goto errout;
2178
2179	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
2180	switch (ifindex) {
2181	case NETCONFA_IFINDEX_ALL:
2182		devconf = net->ipv4.devconf_all;
2183		break;
2184	case NETCONFA_IFINDEX_DEFAULT:
2185		devconf = net->ipv4.devconf_dflt;
2186		break;
2187	default:
2188		dev = __dev_get_by_index(net, ifindex);
2189		if (!dev)
2190			goto errout;
2191		in_dev = __in_dev_get_rtnl(dev);
2192		if (!in_dev)
2193			goto errout;
2194		devconf = &in_dev->cnf;
2195		break;
2196	}
2197
2198	err = -ENOBUFS;
2199	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
2200	if (!skb)
2201		goto errout;
2202
2203	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
2204					NETLINK_CB(in_skb).portid,
2205					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
2206					NETCONFA_ALL);
2207	if (err < 0) {
2208		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2209		WARN_ON(err == -EMSGSIZE);
2210		kfree_skb(skb);
2211		goto errout;
2212	}
2213	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2214errout:
 
 
 
2215	return err;
2216}
2217
2218static int inet_netconf_dump_devconf(struct sk_buff *skb,
2219				     struct netlink_callback *cb)
2220{
2221	const struct nlmsghdr *nlh = cb->nlh;
2222	struct net *net = sock_net(skb->sk);
2223	int h, s_h;
2224	int idx, s_idx;
 
 
 
2225	struct net_device *dev;
2226	struct in_device *in_dev;
2227	struct hlist_head *head;
2228
2229	if (cb->strict_check) {
2230		struct netlink_ext_ack *extack = cb->extack;
2231		struct netconfmsg *ncm;
2232
2233		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
2234			NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
2235			return -EINVAL;
2236		}
2237
2238		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
2239			NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
2240			return -EINVAL;
2241		}
2242	}
2243
2244	s_h = cb->args[0];
2245	s_idx = idx = cb->args[1];
2246
2247	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2248		idx = 0;
2249		head = &net->dev_index_head[h];
2250		rcu_read_lock();
2251		cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
2252			  net->dev_base_seq;
2253		hlist_for_each_entry_rcu(dev, head, index_hlist) {
2254			if (idx < s_idx)
2255				goto cont;
2256			in_dev = __in_dev_get_rcu(dev);
2257			if (!in_dev)
2258				goto cont;
2259
2260			if (inet_netconf_fill_devconf(skb, dev->ifindex,
2261						      &in_dev->cnf,
2262						      NETLINK_CB(cb->skb).portid,
2263						      nlh->nlmsg_seq,
2264						      RTM_NEWNETCONF,
2265						      NLM_F_MULTI,
2266						      NETCONFA_ALL) < 0) {
2267				rcu_read_unlock();
2268				goto done;
2269			}
2270			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2271cont:
2272			idx++;
2273		}
2274		rcu_read_unlock();
2275	}
2276	if (h == NETDEV_HASHENTRIES) {
2277		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
2278					      net->ipv4.devconf_all,
2279					      NETLINK_CB(cb->skb).portid,
2280					      nlh->nlmsg_seq,
2281					      RTM_NEWNETCONF, NLM_F_MULTI,
2282					      NETCONFA_ALL) < 0)
 
2283			goto done;
2284		else
2285			h++;
2286	}
2287	if (h == NETDEV_HASHENTRIES + 1) {
2288		if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
2289					      net->ipv4.devconf_dflt,
2290					      NETLINK_CB(cb->skb).portid,
2291					      nlh->nlmsg_seq,
2292					      RTM_NEWNETCONF, NLM_F_MULTI,
2293					      NETCONFA_ALL) < 0)
 
2294			goto done;
2295		else
2296			h++;
2297	}
2298done:
2299	cb->args[0] = h;
2300	cb->args[1] = idx;
2301
2302	return skb->len;
2303}
2304
2305#ifdef CONFIG_SYSCTL
2306
2307static void devinet_copy_dflt_conf(struct net *net, int i)
2308{
2309	struct net_device *dev;
2310
2311	rcu_read_lock();
2312	for_each_netdev_rcu(net, dev) {
2313		struct in_device *in_dev;
2314
2315		in_dev = __in_dev_get_rcu(dev);
2316		if (in_dev && !test_bit(i, in_dev->cnf.state))
2317			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2318	}
2319	rcu_read_unlock();
2320}
2321
2322/* called with RTNL locked */
2323static void inet_forward_change(struct net *net)
2324{
2325	struct net_device *dev;
2326	int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2327
2328	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2329	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2330	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2331				    NETCONFA_FORWARDING,
2332				    NETCONFA_IFINDEX_ALL,
2333				    net->ipv4.devconf_all);
2334	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2335				    NETCONFA_FORWARDING,
2336				    NETCONFA_IFINDEX_DEFAULT,
2337				    net->ipv4.devconf_dflt);
2338
2339	for_each_netdev(net, dev) {
2340		struct in_device *in_dev;
2341
2342		if (on)
2343			dev_disable_lro(dev);
2344
2345		in_dev = __in_dev_get_rtnl(dev);
2346		if (in_dev) {
2347			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2348			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2349						    NETCONFA_FORWARDING,
2350						    dev->ifindex, &in_dev->cnf);
2351		}
2352	}
2353}
2354
2355static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2356{
2357	if (cnf == net->ipv4.devconf_dflt)
2358		return NETCONFA_IFINDEX_DEFAULT;
2359	else if (cnf == net->ipv4.devconf_all)
2360		return NETCONFA_IFINDEX_ALL;
2361	else {
2362		struct in_device *idev
2363			= container_of(cnf, struct in_device, cnf);
2364		return idev->dev->ifindex;
2365	}
2366}
2367
2368static int devinet_conf_proc(struct ctl_table *ctl, int write,
2369			     void __user *buffer,
2370			     size_t *lenp, loff_t *ppos)
2371{
2372	int old_value = *(int *)ctl->data;
2373	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2374	int new_value = *(int *)ctl->data;
2375
2376	if (write) {
2377		struct ipv4_devconf *cnf = ctl->extra1;
2378		struct net *net = ctl->extra2;
2379		int i = (int *)ctl->data - cnf->data;
2380		int ifindex;
2381
2382		set_bit(i, cnf->state);
2383
2384		if (cnf == net->ipv4.devconf_dflt)
2385			devinet_copy_dflt_conf(net, i);
2386		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2387		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2388			if ((new_value == 0) && (old_value != 0))
2389				rt_cache_flush(net);
2390
2391		if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
2392		    new_value != old_value)
2393			rt_cache_flush(net);
2394
2395		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2396		    new_value != old_value) {
2397			ifindex = devinet_conf_ifindex(net, cnf);
2398			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2399						    NETCONFA_RP_FILTER,
2400						    ifindex, cnf);
2401		}
2402		if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2403		    new_value != old_value) {
2404			ifindex = devinet_conf_ifindex(net, cnf);
2405			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2406						    NETCONFA_PROXY_NEIGH,
2407						    ifindex, cnf);
2408		}
2409		if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2410		    new_value != old_value) {
2411			ifindex = devinet_conf_ifindex(net, cnf);
2412			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2413						    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2414						    ifindex, cnf);
2415		}
2416	}
2417
2418	return ret;
2419}
2420
2421static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2422				  void __user *buffer,
2423				  size_t *lenp, loff_t *ppos)
2424{
2425	int *valp = ctl->data;
2426	int val = *valp;
2427	loff_t pos = *ppos;
2428	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 
 
 
 
 
 
2429
2430	if (write && *valp != val) {
2431		struct net *net = ctl->extra2;
2432
2433		if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2434			if (!rtnl_trylock()) {
2435				/* Restore the original values before restarting */
2436				*valp = val;
2437				*ppos = pos;
2438				return restart_syscall();
2439			}
2440			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2441				inet_forward_change(net);
2442			} else {
2443				struct ipv4_devconf *cnf = ctl->extra1;
2444				struct in_device *idev =
2445					container_of(cnf, struct in_device, cnf);
2446				if (*valp)
2447					dev_disable_lro(idev->dev);
2448				inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2449							    NETCONFA_FORWARDING,
2450							    idev->dev->ifindex,
2451							    cnf);
2452			}
2453			rtnl_unlock();
2454			rt_cache_flush(net);
2455		} else
2456			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2457						    NETCONFA_FORWARDING,
2458						    NETCONFA_IFINDEX_DEFAULT,
2459						    net->ipv4.devconf_dflt);
2460	}
2461
2462	return ret;
2463}
2464
2465static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2466				void __user *buffer,
2467				size_t *lenp, loff_t *ppos)
2468{
2469	int *valp = ctl->data;
2470	int val = *valp;
2471	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2472	struct net *net = ctl->extra2;
2473
2474	if (write && *valp != val)
2475		rt_cache_flush(net);
2476
2477	return ret;
2478}
2479
2480#define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2481	{ \
2482		.procname	= name, \
2483		.data		= ipv4_devconf.data + \
2484				  IPV4_DEVCONF_ ## attr - 1, \
2485		.maxlen		= sizeof(int), \
2486		.mode		= mval, \
2487		.proc_handler	= proc, \
2488		.extra1		= &ipv4_devconf, \
2489	}
2490
2491#define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2492	DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2493
2494#define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2495	DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2496
2497#define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2498	DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2499
2500#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2501	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2502
2503static struct devinet_sysctl_table {
2504	struct ctl_table_header *sysctl_header;
2505	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2506} devinet_sysctl = {
2507	.devinet_vars = {
2508		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2509					     devinet_sysctl_forward),
2510		DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2511		DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
2512
2513		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2514		DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2515		DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2516		DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2517		DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2518		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2519					"accept_source_route"),
2520		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2521		DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2522		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2523		DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2524		DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2525		DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2526		DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2527		DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2528		DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2529		DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2530		DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2531		DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
 
 
2532		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2533		DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2534					"force_igmp_version"),
2535		DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2536					"igmpv2_unsolicited_report_interval"),
2537		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2538					"igmpv3_unsolicited_report_interval"),
2539		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2540					"ignore_routes_with_linkdown"),
2541		DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2542					"drop_gratuitous_arp"),
2543
2544		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2545		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2546		DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2547					      "promote_secondaries"),
2548		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2549					      "route_localnet"),
2550		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2551					      "drop_unicast_in_l2_multicast"),
2552	},
2553};
2554
2555static int __devinet_sysctl_register(struct net *net, char *dev_name,
2556				     int ifindex, struct ipv4_devconf *p)
2557{
2558	int i;
2559	struct devinet_sysctl_table *t;
2560	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2561
2562	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2563	if (!t)
2564		goto out;
2565
2566	for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2567		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2568		t->devinet_vars[i].extra1 = p;
2569		t->devinet_vars[i].extra2 = net;
2570	}
2571
2572	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2573
2574	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2575	if (!t->sysctl_header)
2576		goto free;
2577
2578	p->sysctl = t;
2579
2580	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
2581				    ifindex, p);
2582	return 0;
2583
2584free:
2585	kfree(t);
2586out:
2587	return -ENOBUFS;
2588}
2589
2590static void __devinet_sysctl_unregister(struct net *net,
2591					struct ipv4_devconf *cnf, int ifindex)
2592{
2593	struct devinet_sysctl_table *t = cnf->sysctl;
2594
2595	if (t) {
2596		cnf->sysctl = NULL;
2597		unregister_net_sysctl_table(t->sysctl_header);
2598		kfree(t);
2599	}
2600
2601	inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
2602}
2603
2604static int devinet_sysctl_register(struct in_device *idev)
2605{
2606	int err;
2607
2608	if (!sysctl_dev_name_is_allowed(idev->dev->name))
2609		return -EINVAL;
2610
2611	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2612	if (err)
2613		return err;
2614	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2615					idev->dev->ifindex, &idev->cnf);
2616	if (err)
2617		neigh_sysctl_unregister(idev->arp_parms);
2618	return err;
2619}
2620
2621static void devinet_sysctl_unregister(struct in_device *idev)
2622{
2623	struct net *net = dev_net(idev->dev);
2624
2625	__devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
2626	neigh_sysctl_unregister(idev->arp_parms);
2627}
2628
2629static struct ctl_table ctl_forward_entry[] = {
2630	{
2631		.procname	= "ip_forward",
2632		.data		= &ipv4_devconf.data[
2633					IPV4_DEVCONF_FORWARDING - 1],
2634		.maxlen		= sizeof(int),
2635		.mode		= 0644,
2636		.proc_handler	= devinet_sysctl_forward,
2637		.extra1		= &ipv4_devconf,
2638		.extra2		= &init_net,
2639	},
2640	{ },
2641};
2642#endif
2643
2644static __net_init int devinet_init_net(struct net *net)
2645{
2646	int err;
2647	struct ipv4_devconf *all, *dflt;
2648#ifdef CONFIG_SYSCTL
2649	struct ctl_table *tbl;
2650	struct ctl_table_header *forw_hdr;
2651#endif
2652
2653	err = -ENOMEM;
2654	all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
2655	if (!all)
2656		goto err_alloc_all;
2657
2658	dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2659	if (!dflt)
2660		goto err_alloc_dflt;
2661
2662#ifdef CONFIG_SYSCTL
2663	tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
2664	if (!tbl)
2665		goto err_alloc_ctl;
2666
2667	tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2668	tbl[0].extra1 = all;
2669	tbl[0].extra2 = net;
2670#endif
2671
2672	if ((!IS_ENABLED(CONFIG_SYSCTL) ||
2673	     sysctl_devconf_inherit_init_net != 2) &&
2674	    !net_eq(net, &init_net)) {
2675		memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf));
2676		memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2677	}
2678
2679#ifdef CONFIG_SYSCTL
2680	err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2681	if (err < 0)
2682		goto err_reg_all;
2683
2684	err = __devinet_sysctl_register(net, "default",
2685					NETCONFA_IFINDEX_DEFAULT, dflt);
2686	if (err < 0)
2687		goto err_reg_dflt;
2688
2689	err = -ENOMEM;
2690	forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
 
2691	if (!forw_hdr)
2692		goto err_reg_ctl;
2693	net->ipv4.forw_hdr = forw_hdr;
2694#endif
2695
2696	net->ipv4.devconf_all = all;
2697	net->ipv4.devconf_dflt = dflt;
2698	return 0;
2699
2700#ifdef CONFIG_SYSCTL
2701err_reg_ctl:
2702	__devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
2703err_reg_dflt:
2704	__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
2705err_reg_all:
2706	kfree(tbl);
2707err_alloc_ctl:
2708#endif
2709	kfree(dflt);
2710err_alloc_dflt:
2711	kfree(all);
2712err_alloc_all:
2713	return err;
2714}
2715
2716static __net_exit void devinet_exit_net(struct net *net)
2717{
2718#ifdef CONFIG_SYSCTL
2719	struct ctl_table *tbl;
2720
2721	tbl = net->ipv4.forw_hdr->ctl_table_arg;
2722	unregister_net_sysctl_table(net->ipv4.forw_hdr);
2723	__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
2724				    NETCONFA_IFINDEX_DEFAULT);
2725	__devinet_sysctl_unregister(net, net->ipv4.devconf_all,
2726				    NETCONFA_IFINDEX_ALL);
2727	kfree(tbl);
2728#endif
2729	kfree(net->ipv4.devconf_dflt);
2730	kfree(net->ipv4.devconf_all);
2731}
2732
2733static __net_initdata struct pernet_operations devinet_ops = {
2734	.init = devinet_init_net,
2735	.exit = devinet_exit_net,
2736};
2737
2738static struct rtnl_af_ops inet_af_ops __read_mostly = {
2739	.family		  = AF_INET,
2740	.fill_link_af	  = inet_fill_link_af,
2741	.get_link_af_size = inet_get_link_af_size,
2742	.validate_link_af = inet_validate_link_af,
2743	.set_link_af	  = inet_set_link_af,
2744};
2745
2746void __init devinet_init(void)
2747{
2748	int i;
2749
2750	for (i = 0; i < IN4_ADDR_HSIZE; i++)
2751		INIT_HLIST_HEAD(&inet_addr_lst[i]);
2752
2753	register_pernet_subsys(&devinet_ops);
2754
2755	register_gifconf(PF_INET, inet_gifconf);
2756	register_netdevice_notifier(&ip_netdev_notifier);
2757
2758	queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2759
2760	rtnl_af_register(&inet_af_ops);
2761
2762	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
2763	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
2764	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
 
2765	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2766		      inet_netconf_dump_devconf, 0);
 
2767}