Loading...
1/*
2 * NET3 IP device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 *
18 * Changes:
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
20 * lists.
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
25 * if no match found.
26 */
27
28
29#include <asm/uaccess.h>
30#include <linux/bitops.h>
31#include <linux/capability.h>
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/string.h>
36#include <linux/mm.h>
37#include <linux/socket.h>
38#include <linux/sockios.h>
39#include <linux/in.h>
40#include <linux/errno.h>
41#include <linux/interrupt.h>
42#include <linux/if_addr.h>
43#include <linux/if_ether.h>
44#include <linux/inet.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48#include <linux/init.h>
49#include <linux/notifier.h>
50#include <linux/inetdevice.h>
51#include <linux/igmp.h>
52#include <linux/slab.h>
53#include <linux/hash.h>
54#ifdef CONFIG_SYSCTL
55#include <linux/sysctl.h>
56#endif
57#include <linux/kmod.h>
58#include <linux/netconf.h>
59
60#include <net/arp.h>
61#include <net/ip.h>
62#include <net/route.h>
63#include <net/ip_fib.h>
64#include <net/rtnetlink.h>
65#include <net/net_namespace.h>
66#include <net/addrconf.h>
67
68#include "fib_lookup.h"
69
70static struct ipv4_devconf ipv4_devconf = {
71 .data = {
72 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
73 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
74 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
75 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
76 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
77 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
78 },
79};
80
81static struct ipv4_devconf ipv4_devconf_dflt = {
82 .data = {
83 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
84 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
85 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
86 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
87 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
88 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
89 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
90 },
91};
92
93#define IPV4_DEVCONF_DFLT(net, attr) \
94 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
95
96static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
97 [IFA_LOCAL] = { .type = NLA_U32 },
98 [IFA_ADDRESS] = { .type = NLA_U32 },
99 [IFA_BROADCAST] = { .type = NLA_U32 },
100 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
101 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
102 [IFA_FLAGS] = { .type = NLA_U32 },
103};
104
105#define IN4_ADDR_HSIZE_SHIFT 8
106#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
107
108static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
109
110static u32 inet_addr_hash(const struct net *net, __be32 addr)
111{
112 u32 val = (__force u32) addr ^ net_hash_mix(net);
113
114 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
115}
116
117static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
118{
119 u32 hash = inet_addr_hash(net, ifa->ifa_local);
120
121 ASSERT_RTNL();
122 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
123}
124
125static void inet_hash_remove(struct in_ifaddr *ifa)
126{
127 ASSERT_RTNL();
128 hlist_del_init_rcu(&ifa->hash);
129}
130
131/**
132 * __ip_dev_find - find the first device with a given source address.
133 * @net: the net namespace
134 * @addr: the source address
135 * @devref: if true, take a reference on the found device
136 *
137 * If a caller uses devref=false, it should be protected by RCU, or RTNL
138 */
139struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
140{
141 u32 hash = inet_addr_hash(net, addr);
142 struct net_device *result = NULL;
143 struct in_ifaddr *ifa;
144
145 rcu_read_lock();
146 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
147 if (ifa->ifa_local == addr) {
148 struct net_device *dev = ifa->ifa_dev->dev;
149
150 if (!net_eq(dev_net(dev), net))
151 continue;
152 result = dev;
153 break;
154 }
155 }
156 if (!result) {
157 struct flowi4 fl4 = { .daddr = addr };
158 struct fib_result res = { 0 };
159 struct fib_table *local;
160
161 /* Fallback to FIB local table so that communication
162 * over loopback subnets work.
163 */
164 local = fib_get_table(net, RT_TABLE_LOCAL);
165 if (local &&
166 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
167 res.type == RTN_LOCAL)
168 result = FIB_RES_DEV(res);
169 }
170 if (result && devref)
171 dev_hold(result);
172 rcu_read_unlock();
173 return result;
174}
175EXPORT_SYMBOL(__ip_dev_find);
176
177static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
178
179static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
180static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
181 int destroy);
182#ifdef CONFIG_SYSCTL
183static int devinet_sysctl_register(struct in_device *idev);
184static void devinet_sysctl_unregister(struct in_device *idev);
185#else
186static int devinet_sysctl_register(struct in_device *idev)
187{
188 return 0;
189}
190static void devinet_sysctl_unregister(struct in_device *idev)
191{
192}
193#endif
194
195/* Locks all the inet devices. */
196
197static struct in_ifaddr *inet_alloc_ifa(void)
198{
199 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
200}
201
202static void inet_rcu_free_ifa(struct rcu_head *head)
203{
204 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
205 if (ifa->ifa_dev)
206 in_dev_put(ifa->ifa_dev);
207 kfree(ifa);
208}
209
210static void inet_free_ifa(struct in_ifaddr *ifa)
211{
212 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
213}
214
215void in_dev_finish_destroy(struct in_device *idev)
216{
217 struct net_device *dev = idev->dev;
218
219 WARN_ON(idev->ifa_list);
220 WARN_ON(idev->mc_list);
221 kfree(rcu_dereference_protected(idev->mc_hash, 1));
222#ifdef NET_REFCNT_DEBUG
223 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
224#endif
225 dev_put(dev);
226 if (!idev->dead)
227 pr_err("Freeing alive in_device %p\n", idev);
228 else
229 kfree(idev);
230}
231EXPORT_SYMBOL(in_dev_finish_destroy);
232
233static struct in_device *inetdev_init(struct net_device *dev)
234{
235 struct in_device *in_dev;
236 int err = -ENOMEM;
237
238 ASSERT_RTNL();
239
240 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
241 if (!in_dev)
242 goto out;
243 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
244 sizeof(in_dev->cnf));
245 in_dev->cnf.sysctl = NULL;
246 in_dev->dev = dev;
247 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
248 if (!in_dev->arp_parms)
249 goto out_kfree;
250 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
251 dev_disable_lro(dev);
252 /* Reference in_dev->dev */
253 dev_hold(dev);
254 /* Account for reference dev->ip_ptr (below) */
255 in_dev_hold(in_dev);
256
257 err = devinet_sysctl_register(in_dev);
258 if (err) {
259 in_dev->dead = 1;
260 in_dev_put(in_dev);
261 in_dev = NULL;
262 goto out;
263 }
264 ip_mc_init_dev(in_dev);
265 if (dev->flags & IFF_UP)
266 ip_mc_up(in_dev);
267
268 /* we can receive as soon as ip_ptr is set -- do this last */
269 rcu_assign_pointer(dev->ip_ptr, in_dev);
270out:
271 return in_dev ?: ERR_PTR(err);
272out_kfree:
273 kfree(in_dev);
274 in_dev = NULL;
275 goto out;
276}
277
278static void in_dev_rcu_put(struct rcu_head *head)
279{
280 struct in_device *idev = container_of(head, struct in_device, rcu_head);
281 in_dev_put(idev);
282}
283
284static void inetdev_destroy(struct in_device *in_dev)
285{
286 struct in_ifaddr *ifa;
287 struct net_device *dev;
288
289 ASSERT_RTNL();
290
291 dev = in_dev->dev;
292
293 in_dev->dead = 1;
294
295 ip_mc_destroy_dev(in_dev);
296
297 while ((ifa = in_dev->ifa_list) != NULL) {
298 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
299 inet_free_ifa(ifa);
300 }
301
302 RCU_INIT_POINTER(dev->ip_ptr, NULL);
303
304 devinet_sysctl_unregister(in_dev);
305 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
306 arp_ifdown(dev);
307
308 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
309}
310
311int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
312{
313 rcu_read_lock();
314 for_primary_ifa(in_dev) {
315 if (inet_ifa_match(a, ifa)) {
316 if (!b || inet_ifa_match(b, ifa)) {
317 rcu_read_unlock();
318 return 1;
319 }
320 }
321 } endfor_ifa(in_dev);
322 rcu_read_unlock();
323 return 0;
324}
325
326static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
327 int destroy, struct nlmsghdr *nlh, u32 portid)
328{
329 struct in_ifaddr *promote = NULL;
330 struct in_ifaddr *ifa, *ifa1 = *ifap;
331 struct in_ifaddr *last_prim = in_dev->ifa_list;
332 struct in_ifaddr *prev_prom = NULL;
333 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
334
335 ASSERT_RTNL();
336
337 if (in_dev->dead)
338 goto no_promotions;
339
340 /* 1. Deleting primary ifaddr forces deletion all secondaries
341 * unless alias promotion is set
342 **/
343
344 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
345 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
346
347 while ((ifa = *ifap1) != NULL) {
348 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
349 ifa1->ifa_scope <= ifa->ifa_scope)
350 last_prim = ifa;
351
352 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
353 ifa1->ifa_mask != ifa->ifa_mask ||
354 !inet_ifa_match(ifa1->ifa_address, ifa)) {
355 ifap1 = &ifa->ifa_next;
356 prev_prom = ifa;
357 continue;
358 }
359
360 if (!do_promote) {
361 inet_hash_remove(ifa);
362 *ifap1 = ifa->ifa_next;
363
364 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
365 blocking_notifier_call_chain(&inetaddr_chain,
366 NETDEV_DOWN, ifa);
367 inet_free_ifa(ifa);
368 } else {
369 promote = ifa;
370 break;
371 }
372 }
373 }
374
375 /* On promotion all secondaries from subnet are changing
376 * the primary IP, we must remove all their routes silently
377 * and later to add them back with new prefsrc. Do this
378 * while all addresses are on the device list.
379 */
380 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
381 if (ifa1->ifa_mask == ifa->ifa_mask &&
382 inet_ifa_match(ifa1->ifa_address, ifa))
383 fib_del_ifaddr(ifa, ifa1);
384 }
385
386no_promotions:
387 /* 2. Unlink it */
388
389 *ifap = ifa1->ifa_next;
390 inet_hash_remove(ifa1);
391
392 /* 3. Announce address deletion */
393
394 /* Send message first, then call notifier.
395 At first sight, FIB update triggered by notifier
396 will refer to already deleted ifaddr, that could confuse
397 netlink listeners. It is not true: look, gated sees
398 that route deleted and if it still thinks that ifaddr
399 is valid, it will try to restore deleted routes... Grr.
400 So that, this order is correct.
401 */
402 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
403 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
404
405 if (promote) {
406 struct in_ifaddr *next_sec = promote->ifa_next;
407
408 if (prev_prom) {
409 prev_prom->ifa_next = promote->ifa_next;
410 promote->ifa_next = last_prim->ifa_next;
411 last_prim->ifa_next = promote;
412 }
413
414 promote->ifa_flags &= ~IFA_F_SECONDARY;
415 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
416 blocking_notifier_call_chain(&inetaddr_chain,
417 NETDEV_UP, promote);
418 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
419 if (ifa1->ifa_mask != ifa->ifa_mask ||
420 !inet_ifa_match(ifa1->ifa_address, ifa))
421 continue;
422 fib_add_ifaddr(ifa);
423 }
424
425 }
426 if (destroy)
427 inet_free_ifa(ifa1);
428}
429
430static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
431 int destroy)
432{
433 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
434}
435
436static void check_lifetime(struct work_struct *work);
437
438static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
439
440static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
441 u32 portid)
442{
443 struct in_device *in_dev = ifa->ifa_dev;
444 struct in_ifaddr *ifa1, **ifap, **last_primary;
445
446 ASSERT_RTNL();
447
448 if (!ifa->ifa_local) {
449 inet_free_ifa(ifa);
450 return 0;
451 }
452
453 ifa->ifa_flags &= ~IFA_F_SECONDARY;
454 last_primary = &in_dev->ifa_list;
455
456 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
457 ifap = &ifa1->ifa_next) {
458 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
459 ifa->ifa_scope <= ifa1->ifa_scope)
460 last_primary = &ifa1->ifa_next;
461 if (ifa1->ifa_mask == ifa->ifa_mask &&
462 inet_ifa_match(ifa1->ifa_address, ifa)) {
463 if (ifa1->ifa_local == ifa->ifa_local) {
464 inet_free_ifa(ifa);
465 return -EEXIST;
466 }
467 if (ifa1->ifa_scope != ifa->ifa_scope) {
468 inet_free_ifa(ifa);
469 return -EINVAL;
470 }
471 ifa->ifa_flags |= IFA_F_SECONDARY;
472 }
473 }
474
475 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
476 prandom_seed((__force u32) ifa->ifa_local);
477 ifap = last_primary;
478 }
479
480 ifa->ifa_next = *ifap;
481 *ifap = ifa;
482
483 inet_hash_insert(dev_net(in_dev->dev), ifa);
484
485 cancel_delayed_work(&check_lifetime_work);
486 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
487
488 /* Send message first, then call notifier.
489 Notifier will trigger FIB update, so that
490 listeners of netlink will know about new ifaddr */
491 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
492 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
493
494 return 0;
495}
496
497static int inet_insert_ifa(struct in_ifaddr *ifa)
498{
499 return __inet_insert_ifa(ifa, NULL, 0);
500}
501
502static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
503{
504 struct in_device *in_dev = __in_dev_get_rtnl(dev);
505
506 ASSERT_RTNL();
507
508 if (!in_dev) {
509 inet_free_ifa(ifa);
510 return -ENOBUFS;
511 }
512 ipv4_devconf_setall(in_dev);
513 neigh_parms_data_state_setall(in_dev->arp_parms);
514 if (ifa->ifa_dev != in_dev) {
515 WARN_ON(ifa->ifa_dev);
516 in_dev_hold(in_dev);
517 ifa->ifa_dev = in_dev;
518 }
519 if (ipv4_is_loopback(ifa->ifa_local))
520 ifa->ifa_scope = RT_SCOPE_HOST;
521 return inet_insert_ifa(ifa);
522}
523
524/* Caller must hold RCU or RTNL :
525 * We dont take a reference on found in_device
526 */
527struct in_device *inetdev_by_index(struct net *net, int ifindex)
528{
529 struct net_device *dev;
530 struct in_device *in_dev = NULL;
531
532 rcu_read_lock();
533 dev = dev_get_by_index_rcu(net, ifindex);
534 if (dev)
535 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
536 rcu_read_unlock();
537 return in_dev;
538}
539EXPORT_SYMBOL(inetdev_by_index);
540
541/* Called only from RTNL semaphored context. No locks. */
542
543struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
544 __be32 mask)
545{
546 ASSERT_RTNL();
547
548 for_primary_ifa(in_dev) {
549 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
550 return ifa;
551 } endfor_ifa(in_dev);
552 return NULL;
553}
554
555static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
556{
557 struct ip_mreqn mreq = {
558 .imr_multiaddr.s_addr = ifa->ifa_address,
559 .imr_ifindex = ifa->ifa_dev->dev->ifindex,
560 };
561 int ret;
562
563 ASSERT_RTNL();
564
565 lock_sock(sk);
566 if (join)
567 ret = ip_mc_join_group(sk, &mreq);
568 else
569 ret = ip_mc_leave_group(sk, &mreq);
570 release_sock(sk);
571
572 return ret;
573}
574
575static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
576{
577 struct net *net = sock_net(skb->sk);
578 struct nlattr *tb[IFA_MAX+1];
579 struct in_device *in_dev;
580 struct ifaddrmsg *ifm;
581 struct in_ifaddr *ifa, **ifap;
582 int err = -EINVAL;
583
584 ASSERT_RTNL();
585
586 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
587 if (err < 0)
588 goto errout;
589
590 ifm = nlmsg_data(nlh);
591 in_dev = inetdev_by_index(net, ifm->ifa_index);
592 if (!in_dev) {
593 err = -ENODEV;
594 goto errout;
595 }
596
597 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
598 ifap = &ifa->ifa_next) {
599 if (tb[IFA_LOCAL] &&
600 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
601 continue;
602
603 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
604 continue;
605
606 if (tb[IFA_ADDRESS] &&
607 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
608 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
609 continue;
610
611 if (ipv4_is_multicast(ifa->ifa_address))
612 ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
613 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
614 return 0;
615 }
616
617 err = -EADDRNOTAVAIL;
618errout:
619 return err;
620}
621
622#define INFINITY_LIFE_TIME 0xFFFFFFFF
623
624static void check_lifetime(struct work_struct *work)
625{
626 unsigned long now, next, next_sec, next_sched;
627 struct in_ifaddr *ifa;
628 struct hlist_node *n;
629 int i;
630
631 now = jiffies;
632 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
633
634 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
635 bool change_needed = false;
636
637 rcu_read_lock();
638 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
639 unsigned long age;
640
641 if (ifa->ifa_flags & IFA_F_PERMANENT)
642 continue;
643
644 /* We try to batch several events at once. */
645 age = (now - ifa->ifa_tstamp +
646 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
647
648 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
649 age >= ifa->ifa_valid_lft) {
650 change_needed = true;
651 } else if (ifa->ifa_preferred_lft ==
652 INFINITY_LIFE_TIME) {
653 continue;
654 } else if (age >= ifa->ifa_preferred_lft) {
655 if (time_before(ifa->ifa_tstamp +
656 ifa->ifa_valid_lft * HZ, next))
657 next = ifa->ifa_tstamp +
658 ifa->ifa_valid_lft * HZ;
659
660 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
661 change_needed = true;
662 } else if (time_before(ifa->ifa_tstamp +
663 ifa->ifa_preferred_lft * HZ,
664 next)) {
665 next = ifa->ifa_tstamp +
666 ifa->ifa_preferred_lft * HZ;
667 }
668 }
669 rcu_read_unlock();
670 if (!change_needed)
671 continue;
672 rtnl_lock();
673 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
674 unsigned long age;
675
676 if (ifa->ifa_flags & IFA_F_PERMANENT)
677 continue;
678
679 /* We try to batch several events at once. */
680 age = (now - ifa->ifa_tstamp +
681 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
682
683 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
684 age >= ifa->ifa_valid_lft) {
685 struct in_ifaddr **ifap;
686
687 for (ifap = &ifa->ifa_dev->ifa_list;
688 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
689 if (*ifap == ifa) {
690 inet_del_ifa(ifa->ifa_dev,
691 ifap, 1);
692 break;
693 }
694 }
695 } else if (ifa->ifa_preferred_lft !=
696 INFINITY_LIFE_TIME &&
697 age >= ifa->ifa_preferred_lft &&
698 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
699 ifa->ifa_flags |= IFA_F_DEPRECATED;
700 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
701 }
702 }
703 rtnl_unlock();
704 }
705
706 next_sec = round_jiffies_up(next);
707 next_sched = next;
708
709 /* If rounded timeout is accurate enough, accept it. */
710 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
711 next_sched = next_sec;
712
713 now = jiffies;
714 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
715 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
716 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
717
718 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
719 next_sched - now);
720}
721
722static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
723 __u32 prefered_lft)
724{
725 unsigned long timeout;
726
727 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
728
729 timeout = addrconf_timeout_fixup(valid_lft, HZ);
730 if (addrconf_finite_timeout(timeout))
731 ifa->ifa_valid_lft = timeout;
732 else
733 ifa->ifa_flags |= IFA_F_PERMANENT;
734
735 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
736 if (addrconf_finite_timeout(timeout)) {
737 if (timeout == 0)
738 ifa->ifa_flags |= IFA_F_DEPRECATED;
739 ifa->ifa_preferred_lft = timeout;
740 }
741 ifa->ifa_tstamp = jiffies;
742 if (!ifa->ifa_cstamp)
743 ifa->ifa_cstamp = ifa->ifa_tstamp;
744}
745
746static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
747 __u32 *pvalid_lft, __u32 *pprefered_lft)
748{
749 struct nlattr *tb[IFA_MAX+1];
750 struct in_ifaddr *ifa;
751 struct ifaddrmsg *ifm;
752 struct net_device *dev;
753 struct in_device *in_dev;
754 int err;
755
756 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
757 if (err < 0)
758 goto errout;
759
760 ifm = nlmsg_data(nlh);
761 err = -EINVAL;
762 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
763 goto errout;
764
765 dev = __dev_get_by_index(net, ifm->ifa_index);
766 err = -ENODEV;
767 if (!dev)
768 goto errout;
769
770 in_dev = __in_dev_get_rtnl(dev);
771 err = -ENOBUFS;
772 if (!in_dev)
773 goto errout;
774
775 ifa = inet_alloc_ifa();
776 if (!ifa)
777 /*
778 * A potential indev allocation can be left alive, it stays
779 * assigned to its device and is destroy with it.
780 */
781 goto errout;
782
783 ipv4_devconf_setall(in_dev);
784 neigh_parms_data_state_setall(in_dev->arp_parms);
785 in_dev_hold(in_dev);
786
787 if (!tb[IFA_ADDRESS])
788 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
789
790 INIT_HLIST_NODE(&ifa->hash);
791 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
792 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
793 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
794 ifm->ifa_flags;
795 ifa->ifa_scope = ifm->ifa_scope;
796 ifa->ifa_dev = in_dev;
797
798 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
799 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
800
801 if (tb[IFA_BROADCAST])
802 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
803
804 if (tb[IFA_LABEL])
805 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
806 else
807 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
808
809 if (tb[IFA_CACHEINFO]) {
810 struct ifa_cacheinfo *ci;
811
812 ci = nla_data(tb[IFA_CACHEINFO]);
813 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
814 err = -EINVAL;
815 goto errout_free;
816 }
817 *pvalid_lft = ci->ifa_valid;
818 *pprefered_lft = ci->ifa_prefered;
819 }
820
821 return ifa;
822
823errout_free:
824 inet_free_ifa(ifa);
825errout:
826 return ERR_PTR(err);
827}
828
829static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
830{
831 struct in_device *in_dev = ifa->ifa_dev;
832 struct in_ifaddr *ifa1, **ifap;
833
834 if (!ifa->ifa_local)
835 return NULL;
836
837 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
838 ifap = &ifa1->ifa_next) {
839 if (ifa1->ifa_mask == ifa->ifa_mask &&
840 inet_ifa_match(ifa1->ifa_address, ifa) &&
841 ifa1->ifa_local == ifa->ifa_local)
842 return ifa1;
843 }
844 return NULL;
845}
846
847static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
848{
849 struct net *net = sock_net(skb->sk);
850 struct in_ifaddr *ifa;
851 struct in_ifaddr *ifa_existing;
852 __u32 valid_lft = INFINITY_LIFE_TIME;
853 __u32 prefered_lft = INFINITY_LIFE_TIME;
854
855 ASSERT_RTNL();
856
857 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
858 if (IS_ERR(ifa))
859 return PTR_ERR(ifa);
860
861 ifa_existing = find_matching_ifa(ifa);
862 if (!ifa_existing) {
863 /* It would be best to check for !NLM_F_CREATE here but
864 * userspace already relies on not having to provide this.
865 */
866 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
867 if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
868 int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
869 true, ifa);
870
871 if (ret < 0) {
872 inet_free_ifa(ifa);
873 return ret;
874 }
875 }
876 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
877 } else {
878 inet_free_ifa(ifa);
879
880 if (nlh->nlmsg_flags & NLM_F_EXCL ||
881 !(nlh->nlmsg_flags & NLM_F_REPLACE))
882 return -EEXIST;
883 ifa = ifa_existing;
884 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
885 cancel_delayed_work(&check_lifetime_work);
886 queue_delayed_work(system_power_efficient_wq,
887 &check_lifetime_work, 0);
888 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
889 }
890 return 0;
891}
892
893/*
894 * Determine a default network mask, based on the IP address.
895 */
896
897static int inet_abc_len(__be32 addr)
898{
899 int rc = -1; /* Something else, probably a multicast. */
900
901 if (ipv4_is_zeronet(addr))
902 rc = 0;
903 else {
904 __u32 haddr = ntohl(addr);
905
906 if (IN_CLASSA(haddr))
907 rc = 8;
908 else if (IN_CLASSB(haddr))
909 rc = 16;
910 else if (IN_CLASSC(haddr))
911 rc = 24;
912 }
913
914 return rc;
915}
916
917
918int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
919{
920 struct ifreq ifr;
921 struct sockaddr_in sin_orig;
922 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
923 struct in_device *in_dev;
924 struct in_ifaddr **ifap = NULL;
925 struct in_ifaddr *ifa = NULL;
926 struct net_device *dev;
927 char *colon;
928 int ret = -EFAULT;
929 int tryaddrmatch = 0;
930
931 /*
932 * Fetch the caller's info block into kernel space
933 */
934
935 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
936 goto out;
937 ifr.ifr_name[IFNAMSIZ - 1] = 0;
938
939 /* save original address for comparison */
940 memcpy(&sin_orig, sin, sizeof(*sin));
941
942 colon = strchr(ifr.ifr_name, ':');
943 if (colon)
944 *colon = 0;
945
946 dev_load(net, ifr.ifr_name);
947
948 switch (cmd) {
949 case SIOCGIFADDR: /* Get interface address */
950 case SIOCGIFBRDADDR: /* Get the broadcast address */
951 case SIOCGIFDSTADDR: /* Get the destination address */
952 case SIOCGIFNETMASK: /* Get the netmask for the interface */
953 /* Note that these ioctls will not sleep,
954 so that we do not impose a lock.
955 One day we will be forced to put shlock here (I mean SMP)
956 */
957 tryaddrmatch = (sin_orig.sin_family == AF_INET);
958 memset(sin, 0, sizeof(*sin));
959 sin->sin_family = AF_INET;
960 break;
961
962 case SIOCSIFFLAGS:
963 ret = -EPERM;
964 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
965 goto out;
966 break;
967 case SIOCSIFADDR: /* Set interface address (and family) */
968 case SIOCSIFBRDADDR: /* Set the broadcast address */
969 case SIOCSIFDSTADDR: /* Set the destination address */
970 case SIOCSIFNETMASK: /* Set the netmask for the interface */
971 ret = -EPERM;
972 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
973 goto out;
974 ret = -EINVAL;
975 if (sin->sin_family != AF_INET)
976 goto out;
977 break;
978 default:
979 ret = -EINVAL;
980 goto out;
981 }
982
983 rtnl_lock();
984
985 ret = -ENODEV;
986 dev = __dev_get_by_name(net, ifr.ifr_name);
987 if (!dev)
988 goto done;
989
990 if (colon)
991 *colon = ':';
992
993 in_dev = __in_dev_get_rtnl(dev);
994 if (in_dev) {
995 if (tryaddrmatch) {
996 /* Matthias Andree */
997 /* compare label and address (4.4BSD style) */
998 /* note: we only do this for a limited set of ioctls
999 and only if the original address family was AF_INET.
1000 This is checked above. */
1001 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1002 ifap = &ifa->ifa_next) {
1003 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
1004 sin_orig.sin_addr.s_addr ==
1005 ifa->ifa_local) {
1006 break; /* found */
1007 }
1008 }
1009 }
1010 /* we didn't get a match, maybe the application is
1011 4.3BSD-style and passed in junk so we fall back to
1012 comparing just the label */
1013 if (!ifa) {
1014 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1015 ifap = &ifa->ifa_next)
1016 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
1017 break;
1018 }
1019 }
1020
1021 ret = -EADDRNOTAVAIL;
1022 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1023 goto done;
1024
1025 switch (cmd) {
1026 case SIOCGIFADDR: /* Get interface address */
1027 sin->sin_addr.s_addr = ifa->ifa_local;
1028 goto rarok;
1029
1030 case SIOCGIFBRDADDR: /* Get the broadcast address */
1031 sin->sin_addr.s_addr = ifa->ifa_broadcast;
1032 goto rarok;
1033
1034 case SIOCGIFDSTADDR: /* Get the destination address */
1035 sin->sin_addr.s_addr = ifa->ifa_address;
1036 goto rarok;
1037
1038 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1039 sin->sin_addr.s_addr = ifa->ifa_mask;
1040 goto rarok;
1041
1042 case SIOCSIFFLAGS:
1043 if (colon) {
1044 ret = -EADDRNOTAVAIL;
1045 if (!ifa)
1046 break;
1047 ret = 0;
1048 if (!(ifr.ifr_flags & IFF_UP))
1049 inet_del_ifa(in_dev, ifap, 1);
1050 break;
1051 }
1052 ret = dev_change_flags(dev, ifr.ifr_flags);
1053 break;
1054
1055 case SIOCSIFADDR: /* Set interface address (and family) */
1056 ret = -EINVAL;
1057 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1058 break;
1059
1060 if (!ifa) {
1061 ret = -ENOBUFS;
1062 ifa = inet_alloc_ifa();
1063 if (!ifa)
1064 break;
1065 INIT_HLIST_NODE(&ifa->hash);
1066 if (colon)
1067 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
1068 else
1069 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1070 } else {
1071 ret = 0;
1072 if (ifa->ifa_local == sin->sin_addr.s_addr)
1073 break;
1074 inet_del_ifa(in_dev, ifap, 0);
1075 ifa->ifa_broadcast = 0;
1076 ifa->ifa_scope = 0;
1077 }
1078
1079 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1080
1081 if (!(dev->flags & IFF_POINTOPOINT)) {
1082 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1083 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1084 if ((dev->flags & IFF_BROADCAST) &&
1085 ifa->ifa_prefixlen < 31)
1086 ifa->ifa_broadcast = ifa->ifa_address |
1087 ~ifa->ifa_mask;
1088 } else {
1089 ifa->ifa_prefixlen = 32;
1090 ifa->ifa_mask = inet_make_mask(32);
1091 }
1092 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1093 ret = inet_set_ifa(dev, ifa);
1094 break;
1095
1096 case SIOCSIFBRDADDR: /* Set the broadcast address */
1097 ret = 0;
1098 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1099 inet_del_ifa(in_dev, ifap, 0);
1100 ifa->ifa_broadcast = sin->sin_addr.s_addr;
1101 inet_insert_ifa(ifa);
1102 }
1103 break;
1104
1105 case SIOCSIFDSTADDR: /* Set the destination address */
1106 ret = 0;
1107 if (ifa->ifa_address == sin->sin_addr.s_addr)
1108 break;
1109 ret = -EINVAL;
1110 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1111 break;
1112 ret = 0;
1113 inet_del_ifa(in_dev, ifap, 0);
1114 ifa->ifa_address = sin->sin_addr.s_addr;
1115 inet_insert_ifa(ifa);
1116 break;
1117
1118 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1119
1120 /*
1121 * The mask we set must be legal.
1122 */
1123 ret = -EINVAL;
1124 if (bad_mask(sin->sin_addr.s_addr, 0))
1125 break;
1126 ret = 0;
1127 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1128 __be32 old_mask = ifa->ifa_mask;
1129 inet_del_ifa(in_dev, ifap, 0);
1130 ifa->ifa_mask = sin->sin_addr.s_addr;
1131 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1132
1133 /* See if current broadcast address matches
1134 * with current netmask, then recalculate
1135 * the broadcast address. Otherwise it's a
1136 * funny address, so don't touch it since
1137 * the user seems to know what (s)he's doing...
1138 */
1139 if ((dev->flags & IFF_BROADCAST) &&
1140 (ifa->ifa_prefixlen < 31) &&
1141 (ifa->ifa_broadcast ==
1142 (ifa->ifa_local|~old_mask))) {
1143 ifa->ifa_broadcast = (ifa->ifa_local |
1144 ~sin->sin_addr.s_addr);
1145 }
1146 inet_insert_ifa(ifa);
1147 }
1148 break;
1149 }
1150done:
1151 rtnl_unlock();
1152out:
1153 return ret;
1154rarok:
1155 rtnl_unlock();
1156 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
1157 goto out;
1158}
1159
1160static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
1161{
1162 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1163 struct in_ifaddr *ifa;
1164 struct ifreq ifr;
1165 int done = 0;
1166
1167 if (!in_dev)
1168 goto out;
1169
1170 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1171 if (!buf) {
1172 done += sizeof(ifr);
1173 continue;
1174 }
1175 if (len < (int) sizeof(ifr))
1176 break;
1177 memset(&ifr, 0, sizeof(struct ifreq));
1178 strcpy(ifr.ifr_name, ifa->ifa_label);
1179
1180 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1181 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1182 ifa->ifa_local;
1183
1184 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
1185 done = -EFAULT;
1186 break;
1187 }
1188 buf += sizeof(struct ifreq);
1189 len -= sizeof(struct ifreq);
1190 done += sizeof(struct ifreq);
1191 }
1192out:
1193 return done;
1194}
1195
1196__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1197{
1198 __be32 addr = 0;
1199 struct in_device *in_dev;
1200 struct net *net = dev_net(dev);
1201 int master_idx;
1202
1203 rcu_read_lock();
1204 in_dev = __in_dev_get_rcu(dev);
1205 if (!in_dev)
1206 goto no_in_dev;
1207
1208 for_primary_ifa(in_dev) {
1209 if (ifa->ifa_scope > scope)
1210 continue;
1211 if (!dst || inet_ifa_match(dst, ifa)) {
1212 addr = ifa->ifa_local;
1213 break;
1214 }
1215 if (!addr)
1216 addr = ifa->ifa_local;
1217 } endfor_ifa(in_dev);
1218
1219 if (addr)
1220 goto out_unlock;
1221no_in_dev:
1222 master_idx = l3mdev_master_ifindex_rcu(dev);
1223
1224 /* For VRFs, the VRF device takes the place of the loopback device,
1225 * with addresses on it being preferred. Note in such cases the
1226 * loopback device will be among the devices that fail the master_idx
1227 * equality check in the loop below.
1228 */
1229 if (master_idx &&
1230 (dev = dev_get_by_index_rcu(net, master_idx)) &&
1231 (in_dev = __in_dev_get_rcu(dev))) {
1232 for_primary_ifa(in_dev) {
1233 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1234 ifa->ifa_scope <= scope) {
1235 addr = ifa->ifa_local;
1236 goto out_unlock;
1237 }
1238 } endfor_ifa(in_dev);
1239 }
1240
1241 /* Not loopback addresses on loopback should be preferred
1242 in this case. It is important that lo is the first interface
1243 in dev_base list.
1244 */
1245 for_each_netdev_rcu(net, dev) {
1246 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1247 continue;
1248
1249 in_dev = __in_dev_get_rcu(dev);
1250 if (!in_dev)
1251 continue;
1252
1253 for_primary_ifa(in_dev) {
1254 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1255 ifa->ifa_scope <= scope) {
1256 addr = ifa->ifa_local;
1257 goto out_unlock;
1258 }
1259 } endfor_ifa(in_dev);
1260 }
1261out_unlock:
1262 rcu_read_unlock();
1263 return addr;
1264}
1265EXPORT_SYMBOL(inet_select_addr);
1266
1267static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1268 __be32 local, int scope)
1269{
1270 int same = 0;
1271 __be32 addr = 0;
1272
1273 for_ifa(in_dev) {
1274 if (!addr &&
1275 (local == ifa->ifa_local || !local) &&
1276 ifa->ifa_scope <= scope) {
1277 addr = ifa->ifa_local;
1278 if (same)
1279 break;
1280 }
1281 if (!same) {
1282 same = (!local || inet_ifa_match(local, ifa)) &&
1283 (!dst || inet_ifa_match(dst, ifa));
1284 if (same && addr) {
1285 if (local || !dst)
1286 break;
1287 /* Is the selected addr into dst subnet? */
1288 if (inet_ifa_match(addr, ifa))
1289 break;
1290 /* No, then can we use new local src? */
1291 if (ifa->ifa_scope <= scope) {
1292 addr = ifa->ifa_local;
1293 break;
1294 }
1295 /* search for large dst subnet for addr */
1296 same = 0;
1297 }
1298 }
1299 } endfor_ifa(in_dev);
1300
1301 return same ? addr : 0;
1302}
1303
1304/*
1305 * Confirm that local IP address exists using wildcards:
1306 * - net: netns to check, cannot be NULL
1307 * - in_dev: only on this interface, NULL=any interface
1308 * - dst: only in the same subnet as dst, 0=any dst
1309 * - local: address, 0=autoselect the local address
1310 * - scope: maximum allowed scope value for the local address
1311 */
1312__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1313 __be32 dst, __be32 local, int scope)
1314{
1315 __be32 addr = 0;
1316 struct net_device *dev;
1317
1318 if (in_dev)
1319 return confirm_addr_indev(in_dev, dst, local, scope);
1320
1321 rcu_read_lock();
1322 for_each_netdev_rcu(net, dev) {
1323 in_dev = __in_dev_get_rcu(dev);
1324 if (in_dev) {
1325 addr = confirm_addr_indev(in_dev, dst, local, scope);
1326 if (addr)
1327 break;
1328 }
1329 }
1330 rcu_read_unlock();
1331
1332 return addr;
1333}
1334EXPORT_SYMBOL(inet_confirm_addr);
1335
1336/*
1337 * Device notifier
1338 */
1339
1340int register_inetaddr_notifier(struct notifier_block *nb)
1341{
1342 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1343}
1344EXPORT_SYMBOL(register_inetaddr_notifier);
1345
1346int unregister_inetaddr_notifier(struct notifier_block *nb)
1347{
1348 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1349}
1350EXPORT_SYMBOL(unregister_inetaddr_notifier);
1351
1352/* Rename ifa_labels for a device name change. Make some effort to preserve
1353 * existing alias numbering and to create unique labels if possible.
1354*/
1355static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1356{
1357 struct in_ifaddr *ifa;
1358 int named = 0;
1359
1360 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1361 char old[IFNAMSIZ], *dot;
1362
1363 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1364 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1365 if (named++ == 0)
1366 goto skip;
1367 dot = strchr(old, ':');
1368 if (!dot) {
1369 sprintf(old, ":%d", named);
1370 dot = old;
1371 }
1372 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1373 strcat(ifa->ifa_label, dot);
1374 else
1375 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1376skip:
1377 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1378 }
1379}
1380
1381static bool inetdev_valid_mtu(unsigned int mtu)
1382{
1383 return mtu >= 68;
1384}
1385
1386static void inetdev_send_gratuitous_arp(struct net_device *dev,
1387 struct in_device *in_dev)
1388
1389{
1390 struct in_ifaddr *ifa;
1391
1392 for (ifa = in_dev->ifa_list; ifa;
1393 ifa = ifa->ifa_next) {
1394 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1395 ifa->ifa_local, dev,
1396 ifa->ifa_local, NULL,
1397 dev->dev_addr, NULL);
1398 }
1399}
1400
1401/* Called only under RTNL semaphore */
1402
1403static int inetdev_event(struct notifier_block *this, unsigned long event,
1404 void *ptr)
1405{
1406 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1407 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1408
1409 ASSERT_RTNL();
1410
1411 if (!in_dev) {
1412 if (event == NETDEV_REGISTER) {
1413 in_dev = inetdev_init(dev);
1414 if (IS_ERR(in_dev))
1415 return notifier_from_errno(PTR_ERR(in_dev));
1416 if (dev->flags & IFF_LOOPBACK) {
1417 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1418 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1419 }
1420 } else if (event == NETDEV_CHANGEMTU) {
1421 /* Re-enabling IP */
1422 if (inetdev_valid_mtu(dev->mtu))
1423 in_dev = inetdev_init(dev);
1424 }
1425 goto out;
1426 }
1427
1428 switch (event) {
1429 case NETDEV_REGISTER:
1430 pr_debug("%s: bug\n", __func__);
1431 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1432 break;
1433 case NETDEV_UP:
1434 if (!inetdev_valid_mtu(dev->mtu))
1435 break;
1436 if (dev->flags & IFF_LOOPBACK) {
1437 struct in_ifaddr *ifa = inet_alloc_ifa();
1438
1439 if (ifa) {
1440 INIT_HLIST_NODE(&ifa->hash);
1441 ifa->ifa_local =
1442 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1443 ifa->ifa_prefixlen = 8;
1444 ifa->ifa_mask = inet_make_mask(8);
1445 in_dev_hold(in_dev);
1446 ifa->ifa_dev = in_dev;
1447 ifa->ifa_scope = RT_SCOPE_HOST;
1448 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1449 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1450 INFINITY_LIFE_TIME);
1451 ipv4_devconf_setall(in_dev);
1452 neigh_parms_data_state_setall(in_dev->arp_parms);
1453 inet_insert_ifa(ifa);
1454 }
1455 }
1456 ip_mc_up(in_dev);
1457 /* fall through */
1458 case NETDEV_CHANGEADDR:
1459 if (!IN_DEV_ARP_NOTIFY(in_dev))
1460 break;
1461 /* fall through */
1462 case NETDEV_NOTIFY_PEERS:
1463 /* Send gratuitous ARP to notify of link change */
1464 inetdev_send_gratuitous_arp(dev, in_dev);
1465 break;
1466 case NETDEV_DOWN:
1467 ip_mc_down(in_dev);
1468 break;
1469 case NETDEV_PRE_TYPE_CHANGE:
1470 ip_mc_unmap(in_dev);
1471 break;
1472 case NETDEV_POST_TYPE_CHANGE:
1473 ip_mc_remap(in_dev);
1474 break;
1475 case NETDEV_CHANGEMTU:
1476 if (inetdev_valid_mtu(dev->mtu))
1477 break;
1478 /* disable IP when MTU is not enough */
1479 case NETDEV_UNREGISTER:
1480 inetdev_destroy(in_dev);
1481 break;
1482 case NETDEV_CHANGENAME:
1483 /* Do not notify about label change, this event is
1484 * not interesting to applications using netlink.
1485 */
1486 inetdev_changename(dev, in_dev);
1487
1488 devinet_sysctl_unregister(in_dev);
1489 devinet_sysctl_register(in_dev);
1490 break;
1491 }
1492out:
1493 return NOTIFY_DONE;
1494}
1495
1496static struct notifier_block ip_netdev_notifier = {
1497 .notifier_call = inetdev_event,
1498};
1499
1500static size_t inet_nlmsg_size(void)
1501{
1502 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1503 + nla_total_size(4) /* IFA_ADDRESS */
1504 + nla_total_size(4) /* IFA_LOCAL */
1505 + nla_total_size(4) /* IFA_BROADCAST */
1506 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1507 + nla_total_size(4) /* IFA_FLAGS */
1508 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1509}
1510
1511static inline u32 cstamp_delta(unsigned long cstamp)
1512{
1513 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1514}
1515
1516static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1517 unsigned long tstamp, u32 preferred, u32 valid)
1518{
1519 struct ifa_cacheinfo ci;
1520
1521 ci.cstamp = cstamp_delta(cstamp);
1522 ci.tstamp = cstamp_delta(tstamp);
1523 ci.ifa_prefered = preferred;
1524 ci.ifa_valid = valid;
1525
1526 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1527}
1528
1529static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1530 u32 portid, u32 seq, int event, unsigned int flags)
1531{
1532 struct ifaddrmsg *ifm;
1533 struct nlmsghdr *nlh;
1534 u32 preferred, valid;
1535
1536 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1537 if (!nlh)
1538 return -EMSGSIZE;
1539
1540 ifm = nlmsg_data(nlh);
1541 ifm->ifa_family = AF_INET;
1542 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1543 ifm->ifa_flags = ifa->ifa_flags;
1544 ifm->ifa_scope = ifa->ifa_scope;
1545 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1546
1547 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1548 preferred = ifa->ifa_preferred_lft;
1549 valid = ifa->ifa_valid_lft;
1550 if (preferred != INFINITY_LIFE_TIME) {
1551 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1552
1553 if (preferred > tval)
1554 preferred -= tval;
1555 else
1556 preferred = 0;
1557 if (valid != INFINITY_LIFE_TIME) {
1558 if (valid > tval)
1559 valid -= tval;
1560 else
1561 valid = 0;
1562 }
1563 }
1564 } else {
1565 preferred = INFINITY_LIFE_TIME;
1566 valid = INFINITY_LIFE_TIME;
1567 }
1568 if ((ifa->ifa_address &&
1569 nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1570 (ifa->ifa_local &&
1571 nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1572 (ifa->ifa_broadcast &&
1573 nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1574 (ifa->ifa_label[0] &&
1575 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1576 nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1577 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1578 preferred, valid))
1579 goto nla_put_failure;
1580
1581 nlmsg_end(skb, nlh);
1582 return 0;
1583
1584nla_put_failure:
1585 nlmsg_cancel(skb, nlh);
1586 return -EMSGSIZE;
1587}
1588
1589static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1590{
1591 struct net *net = sock_net(skb->sk);
1592 int h, s_h;
1593 int idx, s_idx;
1594 int ip_idx, s_ip_idx;
1595 struct net_device *dev;
1596 struct in_device *in_dev;
1597 struct in_ifaddr *ifa;
1598 struct hlist_head *head;
1599
1600 s_h = cb->args[0];
1601 s_idx = idx = cb->args[1];
1602 s_ip_idx = ip_idx = cb->args[2];
1603
1604 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1605 idx = 0;
1606 head = &net->dev_index_head[h];
1607 rcu_read_lock();
1608 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1609 net->dev_base_seq;
1610 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1611 if (idx < s_idx)
1612 goto cont;
1613 if (h > s_h || idx > s_idx)
1614 s_ip_idx = 0;
1615 in_dev = __in_dev_get_rcu(dev);
1616 if (!in_dev)
1617 goto cont;
1618
1619 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1620 ifa = ifa->ifa_next, ip_idx++) {
1621 if (ip_idx < s_ip_idx)
1622 continue;
1623 if (inet_fill_ifaddr(skb, ifa,
1624 NETLINK_CB(cb->skb).portid,
1625 cb->nlh->nlmsg_seq,
1626 RTM_NEWADDR, NLM_F_MULTI) < 0) {
1627 rcu_read_unlock();
1628 goto done;
1629 }
1630 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1631 }
1632cont:
1633 idx++;
1634 }
1635 rcu_read_unlock();
1636 }
1637
1638done:
1639 cb->args[0] = h;
1640 cb->args[1] = idx;
1641 cb->args[2] = ip_idx;
1642
1643 return skb->len;
1644}
1645
1646static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1647 u32 portid)
1648{
1649 struct sk_buff *skb;
1650 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1651 int err = -ENOBUFS;
1652 struct net *net;
1653
1654 net = dev_net(ifa->ifa_dev->dev);
1655 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1656 if (!skb)
1657 goto errout;
1658
1659 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1660 if (err < 0) {
1661 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1662 WARN_ON(err == -EMSGSIZE);
1663 kfree_skb(skb);
1664 goto errout;
1665 }
1666 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1667 return;
1668errout:
1669 if (err < 0)
1670 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1671}
1672
1673static size_t inet_get_link_af_size(const struct net_device *dev,
1674 u32 ext_filter_mask)
1675{
1676 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1677
1678 if (!in_dev)
1679 return 0;
1680
1681 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1682}
1683
1684static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1685 u32 ext_filter_mask)
1686{
1687 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1688 struct nlattr *nla;
1689 int i;
1690
1691 if (!in_dev)
1692 return -ENODATA;
1693
1694 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1695 if (!nla)
1696 return -EMSGSIZE;
1697
1698 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1699 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1700
1701 return 0;
1702}
1703
1704static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1705 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1706};
1707
1708static int inet_validate_link_af(const struct net_device *dev,
1709 const struct nlattr *nla)
1710{
1711 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1712 int err, rem;
1713
1714 if (dev && !__in_dev_get_rtnl(dev))
1715 return -EAFNOSUPPORT;
1716
1717 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1718 if (err < 0)
1719 return err;
1720
1721 if (tb[IFLA_INET_CONF]) {
1722 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1723 int cfgid = nla_type(a);
1724
1725 if (nla_len(a) < 4)
1726 return -EINVAL;
1727
1728 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1729 return -EINVAL;
1730 }
1731 }
1732
1733 return 0;
1734}
1735
1736static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1737{
1738 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1739 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1740 int rem;
1741
1742 if (!in_dev)
1743 return -EAFNOSUPPORT;
1744
1745 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1746 BUG();
1747
1748 if (tb[IFLA_INET_CONF]) {
1749 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1750 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1751 }
1752
1753 return 0;
1754}
1755
1756static int inet_netconf_msgsize_devconf(int type)
1757{
1758 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1759 + nla_total_size(4); /* NETCONFA_IFINDEX */
1760 bool all = false;
1761
1762 if (type == NETCONFA_ALL)
1763 all = true;
1764
1765 if (all || type == NETCONFA_FORWARDING)
1766 size += nla_total_size(4);
1767 if (all || type == NETCONFA_RP_FILTER)
1768 size += nla_total_size(4);
1769 if (all || type == NETCONFA_MC_FORWARDING)
1770 size += nla_total_size(4);
1771 if (all || type == NETCONFA_PROXY_NEIGH)
1772 size += nla_total_size(4);
1773 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1774 size += nla_total_size(4);
1775
1776 return size;
1777}
1778
1779static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1780 struct ipv4_devconf *devconf, u32 portid,
1781 u32 seq, int event, unsigned int flags,
1782 int type)
1783{
1784 struct nlmsghdr *nlh;
1785 struct netconfmsg *ncm;
1786 bool all = false;
1787
1788 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1789 flags);
1790 if (!nlh)
1791 return -EMSGSIZE;
1792
1793 if (type == NETCONFA_ALL)
1794 all = true;
1795
1796 ncm = nlmsg_data(nlh);
1797 ncm->ncm_family = AF_INET;
1798
1799 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1800 goto nla_put_failure;
1801
1802 if ((all || type == NETCONFA_FORWARDING) &&
1803 nla_put_s32(skb, NETCONFA_FORWARDING,
1804 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1805 goto nla_put_failure;
1806 if ((all || type == NETCONFA_RP_FILTER) &&
1807 nla_put_s32(skb, NETCONFA_RP_FILTER,
1808 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1809 goto nla_put_failure;
1810 if ((all || type == NETCONFA_MC_FORWARDING) &&
1811 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1812 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1813 goto nla_put_failure;
1814 if ((all || type == NETCONFA_PROXY_NEIGH) &&
1815 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1816 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1817 goto nla_put_failure;
1818 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1819 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1820 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1821 goto nla_put_failure;
1822
1823 nlmsg_end(skb, nlh);
1824 return 0;
1825
1826nla_put_failure:
1827 nlmsg_cancel(skb, nlh);
1828 return -EMSGSIZE;
1829}
1830
1831void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1832 struct ipv4_devconf *devconf)
1833{
1834 struct sk_buff *skb;
1835 int err = -ENOBUFS;
1836
1837 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
1838 if (!skb)
1839 goto errout;
1840
1841 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1842 RTM_NEWNETCONF, 0, type);
1843 if (err < 0) {
1844 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1845 WARN_ON(err == -EMSGSIZE);
1846 kfree_skb(skb);
1847 goto errout;
1848 }
1849 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
1850 return;
1851errout:
1852 if (err < 0)
1853 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1854}
1855
1856static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1857 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1858 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1859 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1860 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1861 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1862};
1863
1864static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1865 struct nlmsghdr *nlh)
1866{
1867 struct net *net = sock_net(in_skb->sk);
1868 struct nlattr *tb[NETCONFA_MAX+1];
1869 struct netconfmsg *ncm;
1870 struct sk_buff *skb;
1871 struct ipv4_devconf *devconf;
1872 struct in_device *in_dev;
1873 struct net_device *dev;
1874 int ifindex;
1875 int err;
1876
1877 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1878 devconf_ipv4_policy);
1879 if (err < 0)
1880 goto errout;
1881
1882 err = -EINVAL;
1883 if (!tb[NETCONFA_IFINDEX])
1884 goto errout;
1885
1886 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1887 switch (ifindex) {
1888 case NETCONFA_IFINDEX_ALL:
1889 devconf = net->ipv4.devconf_all;
1890 break;
1891 case NETCONFA_IFINDEX_DEFAULT:
1892 devconf = net->ipv4.devconf_dflt;
1893 break;
1894 default:
1895 dev = __dev_get_by_index(net, ifindex);
1896 if (!dev)
1897 goto errout;
1898 in_dev = __in_dev_get_rtnl(dev);
1899 if (!in_dev)
1900 goto errout;
1901 devconf = &in_dev->cnf;
1902 break;
1903 }
1904
1905 err = -ENOBUFS;
1906 skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_ATOMIC);
1907 if (!skb)
1908 goto errout;
1909
1910 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1911 NETLINK_CB(in_skb).portid,
1912 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1913 NETCONFA_ALL);
1914 if (err < 0) {
1915 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1916 WARN_ON(err == -EMSGSIZE);
1917 kfree_skb(skb);
1918 goto errout;
1919 }
1920 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1921errout:
1922 return err;
1923}
1924
1925static int inet_netconf_dump_devconf(struct sk_buff *skb,
1926 struct netlink_callback *cb)
1927{
1928 struct net *net = sock_net(skb->sk);
1929 int h, s_h;
1930 int idx, s_idx;
1931 struct net_device *dev;
1932 struct in_device *in_dev;
1933 struct hlist_head *head;
1934
1935 s_h = cb->args[0];
1936 s_idx = idx = cb->args[1];
1937
1938 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1939 idx = 0;
1940 head = &net->dev_index_head[h];
1941 rcu_read_lock();
1942 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1943 net->dev_base_seq;
1944 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1945 if (idx < s_idx)
1946 goto cont;
1947 in_dev = __in_dev_get_rcu(dev);
1948 if (!in_dev)
1949 goto cont;
1950
1951 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1952 &in_dev->cnf,
1953 NETLINK_CB(cb->skb).portid,
1954 cb->nlh->nlmsg_seq,
1955 RTM_NEWNETCONF,
1956 NLM_F_MULTI,
1957 NETCONFA_ALL) < 0) {
1958 rcu_read_unlock();
1959 goto done;
1960 }
1961 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1962cont:
1963 idx++;
1964 }
1965 rcu_read_unlock();
1966 }
1967 if (h == NETDEV_HASHENTRIES) {
1968 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1969 net->ipv4.devconf_all,
1970 NETLINK_CB(cb->skb).portid,
1971 cb->nlh->nlmsg_seq,
1972 RTM_NEWNETCONF, NLM_F_MULTI,
1973 NETCONFA_ALL) < 0)
1974 goto done;
1975 else
1976 h++;
1977 }
1978 if (h == NETDEV_HASHENTRIES + 1) {
1979 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1980 net->ipv4.devconf_dflt,
1981 NETLINK_CB(cb->skb).portid,
1982 cb->nlh->nlmsg_seq,
1983 RTM_NEWNETCONF, NLM_F_MULTI,
1984 NETCONFA_ALL) < 0)
1985 goto done;
1986 else
1987 h++;
1988 }
1989done:
1990 cb->args[0] = h;
1991 cb->args[1] = idx;
1992
1993 return skb->len;
1994}
1995
1996#ifdef CONFIG_SYSCTL
1997
1998static void devinet_copy_dflt_conf(struct net *net, int i)
1999{
2000 struct net_device *dev;
2001
2002 rcu_read_lock();
2003 for_each_netdev_rcu(net, dev) {
2004 struct in_device *in_dev;
2005
2006 in_dev = __in_dev_get_rcu(dev);
2007 if (in_dev && !test_bit(i, in_dev->cnf.state))
2008 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2009 }
2010 rcu_read_unlock();
2011}
2012
2013/* called with RTNL locked */
2014static void inet_forward_change(struct net *net)
2015{
2016 struct net_device *dev;
2017 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2018
2019 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2020 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2021 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2022 NETCONFA_IFINDEX_ALL,
2023 net->ipv4.devconf_all);
2024 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2025 NETCONFA_IFINDEX_DEFAULT,
2026 net->ipv4.devconf_dflt);
2027
2028 for_each_netdev(net, dev) {
2029 struct in_device *in_dev;
2030 if (on)
2031 dev_disable_lro(dev);
2032 rcu_read_lock();
2033 in_dev = __in_dev_get_rcu(dev);
2034 if (in_dev) {
2035 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2036 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2037 dev->ifindex, &in_dev->cnf);
2038 }
2039 rcu_read_unlock();
2040 }
2041}
2042
2043static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2044{
2045 if (cnf == net->ipv4.devconf_dflt)
2046 return NETCONFA_IFINDEX_DEFAULT;
2047 else if (cnf == net->ipv4.devconf_all)
2048 return NETCONFA_IFINDEX_ALL;
2049 else {
2050 struct in_device *idev
2051 = container_of(cnf, struct in_device, cnf);
2052 return idev->dev->ifindex;
2053 }
2054}
2055
2056static int devinet_conf_proc(struct ctl_table *ctl, int write,
2057 void __user *buffer,
2058 size_t *lenp, loff_t *ppos)
2059{
2060 int old_value = *(int *)ctl->data;
2061 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2062 int new_value = *(int *)ctl->data;
2063
2064 if (write) {
2065 struct ipv4_devconf *cnf = ctl->extra1;
2066 struct net *net = ctl->extra2;
2067 int i = (int *)ctl->data - cnf->data;
2068 int ifindex;
2069
2070 set_bit(i, cnf->state);
2071
2072 if (cnf == net->ipv4.devconf_dflt)
2073 devinet_copy_dflt_conf(net, i);
2074 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2075 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2076 if ((new_value == 0) && (old_value != 0))
2077 rt_cache_flush(net);
2078
2079 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2080 new_value != old_value) {
2081 ifindex = devinet_conf_ifindex(net, cnf);
2082 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
2083 ifindex, cnf);
2084 }
2085 if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2086 new_value != old_value) {
2087 ifindex = devinet_conf_ifindex(net, cnf);
2088 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2089 ifindex, cnf);
2090 }
2091 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2092 new_value != old_value) {
2093 ifindex = devinet_conf_ifindex(net, cnf);
2094 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2095 ifindex, cnf);
2096 }
2097 }
2098
2099 return ret;
2100}
2101
2102static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2103 void __user *buffer,
2104 size_t *lenp, loff_t *ppos)
2105{
2106 int *valp = ctl->data;
2107 int val = *valp;
2108 loff_t pos = *ppos;
2109 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2110
2111 if (write && *valp != val) {
2112 struct net *net = ctl->extra2;
2113
2114 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2115 if (!rtnl_trylock()) {
2116 /* Restore the original values before restarting */
2117 *valp = val;
2118 *ppos = pos;
2119 return restart_syscall();
2120 }
2121 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2122 inet_forward_change(net);
2123 } else {
2124 struct ipv4_devconf *cnf = ctl->extra1;
2125 struct in_device *idev =
2126 container_of(cnf, struct in_device, cnf);
2127 if (*valp)
2128 dev_disable_lro(idev->dev);
2129 inet_netconf_notify_devconf(net,
2130 NETCONFA_FORWARDING,
2131 idev->dev->ifindex,
2132 cnf);
2133 }
2134 rtnl_unlock();
2135 rt_cache_flush(net);
2136 } else
2137 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2138 NETCONFA_IFINDEX_DEFAULT,
2139 net->ipv4.devconf_dflt);
2140 }
2141
2142 return ret;
2143}
2144
2145static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2146 void __user *buffer,
2147 size_t *lenp, loff_t *ppos)
2148{
2149 int *valp = ctl->data;
2150 int val = *valp;
2151 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2152 struct net *net = ctl->extra2;
2153
2154 if (write && *valp != val)
2155 rt_cache_flush(net);
2156
2157 return ret;
2158}
2159
2160#define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2161 { \
2162 .procname = name, \
2163 .data = ipv4_devconf.data + \
2164 IPV4_DEVCONF_ ## attr - 1, \
2165 .maxlen = sizeof(int), \
2166 .mode = mval, \
2167 .proc_handler = proc, \
2168 .extra1 = &ipv4_devconf, \
2169 }
2170
2171#define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2172 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2173
2174#define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2175 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2176
2177#define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2178 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2179
2180#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2181 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2182
2183static struct devinet_sysctl_table {
2184 struct ctl_table_header *sysctl_header;
2185 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2186} devinet_sysctl = {
2187 .devinet_vars = {
2188 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2189 devinet_sysctl_forward),
2190 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2191
2192 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2193 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2194 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2195 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2196 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2197 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2198 "accept_source_route"),
2199 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2200 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2201 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2202 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2203 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2204 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2205 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2206 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2207 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2208 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2209 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2210 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2211 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2212 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2213 "force_igmp_version"),
2214 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2215 "igmpv2_unsolicited_report_interval"),
2216 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2217 "igmpv3_unsolicited_report_interval"),
2218 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2219 "ignore_routes_with_linkdown"),
2220 DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2221 "drop_gratuitous_arp"),
2222
2223 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2224 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2225 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2226 "promote_secondaries"),
2227 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2228 "route_localnet"),
2229 DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2230 "drop_unicast_in_l2_multicast"),
2231 },
2232};
2233
2234static int __devinet_sysctl_register(struct net *net, char *dev_name,
2235 struct ipv4_devconf *p)
2236{
2237 int i;
2238 struct devinet_sysctl_table *t;
2239 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2240
2241 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2242 if (!t)
2243 goto out;
2244
2245 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2246 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2247 t->devinet_vars[i].extra1 = p;
2248 t->devinet_vars[i].extra2 = net;
2249 }
2250
2251 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2252
2253 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2254 if (!t->sysctl_header)
2255 goto free;
2256
2257 p->sysctl = t;
2258 return 0;
2259
2260free:
2261 kfree(t);
2262out:
2263 return -ENOBUFS;
2264}
2265
2266static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2267{
2268 struct devinet_sysctl_table *t = cnf->sysctl;
2269
2270 if (!t)
2271 return;
2272
2273 cnf->sysctl = NULL;
2274 unregister_net_sysctl_table(t->sysctl_header);
2275 kfree(t);
2276}
2277
2278static int devinet_sysctl_register(struct in_device *idev)
2279{
2280 int err;
2281
2282 if (!sysctl_dev_name_is_allowed(idev->dev->name))
2283 return -EINVAL;
2284
2285 err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2286 if (err)
2287 return err;
2288 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2289 &idev->cnf);
2290 if (err)
2291 neigh_sysctl_unregister(idev->arp_parms);
2292 return err;
2293}
2294
2295static void devinet_sysctl_unregister(struct in_device *idev)
2296{
2297 __devinet_sysctl_unregister(&idev->cnf);
2298 neigh_sysctl_unregister(idev->arp_parms);
2299}
2300
2301static struct ctl_table ctl_forward_entry[] = {
2302 {
2303 .procname = "ip_forward",
2304 .data = &ipv4_devconf.data[
2305 IPV4_DEVCONF_FORWARDING - 1],
2306 .maxlen = sizeof(int),
2307 .mode = 0644,
2308 .proc_handler = devinet_sysctl_forward,
2309 .extra1 = &ipv4_devconf,
2310 .extra2 = &init_net,
2311 },
2312 { },
2313};
2314#endif
2315
2316static __net_init int devinet_init_net(struct net *net)
2317{
2318 int err;
2319 struct ipv4_devconf *all, *dflt;
2320#ifdef CONFIG_SYSCTL
2321 struct ctl_table *tbl = ctl_forward_entry;
2322 struct ctl_table_header *forw_hdr;
2323#endif
2324
2325 err = -ENOMEM;
2326 all = &ipv4_devconf;
2327 dflt = &ipv4_devconf_dflt;
2328
2329 if (!net_eq(net, &init_net)) {
2330 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2331 if (!all)
2332 goto err_alloc_all;
2333
2334 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2335 if (!dflt)
2336 goto err_alloc_dflt;
2337
2338#ifdef CONFIG_SYSCTL
2339 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2340 if (!tbl)
2341 goto err_alloc_ctl;
2342
2343 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2344 tbl[0].extra1 = all;
2345 tbl[0].extra2 = net;
2346#endif
2347 }
2348
2349#ifdef CONFIG_SYSCTL
2350 err = __devinet_sysctl_register(net, "all", all);
2351 if (err < 0)
2352 goto err_reg_all;
2353
2354 err = __devinet_sysctl_register(net, "default", dflt);
2355 if (err < 0)
2356 goto err_reg_dflt;
2357
2358 err = -ENOMEM;
2359 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2360 if (!forw_hdr)
2361 goto err_reg_ctl;
2362 net->ipv4.forw_hdr = forw_hdr;
2363#endif
2364
2365 net->ipv4.devconf_all = all;
2366 net->ipv4.devconf_dflt = dflt;
2367 return 0;
2368
2369#ifdef CONFIG_SYSCTL
2370err_reg_ctl:
2371 __devinet_sysctl_unregister(dflt);
2372err_reg_dflt:
2373 __devinet_sysctl_unregister(all);
2374err_reg_all:
2375 if (tbl != ctl_forward_entry)
2376 kfree(tbl);
2377err_alloc_ctl:
2378#endif
2379 if (dflt != &ipv4_devconf_dflt)
2380 kfree(dflt);
2381err_alloc_dflt:
2382 if (all != &ipv4_devconf)
2383 kfree(all);
2384err_alloc_all:
2385 return err;
2386}
2387
2388static __net_exit void devinet_exit_net(struct net *net)
2389{
2390#ifdef CONFIG_SYSCTL
2391 struct ctl_table *tbl;
2392
2393 tbl = net->ipv4.forw_hdr->ctl_table_arg;
2394 unregister_net_sysctl_table(net->ipv4.forw_hdr);
2395 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
2396 __devinet_sysctl_unregister(net->ipv4.devconf_all);
2397 kfree(tbl);
2398#endif
2399 kfree(net->ipv4.devconf_dflt);
2400 kfree(net->ipv4.devconf_all);
2401}
2402
2403static __net_initdata struct pernet_operations devinet_ops = {
2404 .init = devinet_init_net,
2405 .exit = devinet_exit_net,
2406};
2407
2408static struct rtnl_af_ops inet_af_ops __read_mostly = {
2409 .family = AF_INET,
2410 .fill_link_af = inet_fill_link_af,
2411 .get_link_af_size = inet_get_link_af_size,
2412 .validate_link_af = inet_validate_link_af,
2413 .set_link_af = inet_set_link_af,
2414};
2415
2416void __init devinet_init(void)
2417{
2418 int i;
2419
2420 for (i = 0; i < IN4_ADDR_HSIZE; i++)
2421 INIT_HLIST_HEAD(&inet_addr_lst[i]);
2422
2423 register_pernet_subsys(&devinet_ops);
2424
2425 register_gifconf(PF_INET, inet_gifconf);
2426 register_netdevice_notifier(&ip_netdev_notifier);
2427
2428 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2429
2430 rtnl_af_register(&inet_af_ops);
2431
2432 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
2433 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2434 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2435 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2436 inet_netconf_dump_devconf, NULL);
2437}
1/*
2 * NET3 IP device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 *
18 * Changes:
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
20 * lists.
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
25 * if no match found.
26 */
27
28
29#include <asm/uaccess.h>
30#include <linux/bitops.h>
31#include <linux/capability.h>
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/string.h>
36#include <linux/mm.h>
37#include <linux/socket.h>
38#include <linux/sockios.h>
39#include <linux/in.h>
40#include <linux/errno.h>
41#include <linux/interrupt.h>
42#include <linux/if_addr.h>
43#include <linux/if_ether.h>
44#include <linux/inet.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48#include <linux/init.h>
49#include <linux/notifier.h>
50#include <linux/inetdevice.h>
51#include <linux/igmp.h>
52#include <linux/slab.h>
53#include <linux/hash.h>
54#ifdef CONFIG_SYSCTL
55#include <linux/sysctl.h>
56#endif
57#include <linux/kmod.h>
58
59#include <net/arp.h>
60#include <net/ip.h>
61#include <net/route.h>
62#include <net/ip_fib.h>
63#include <net/rtnetlink.h>
64#include <net/net_namespace.h>
65
66#include "fib_lookup.h"
67
68static struct ipv4_devconf ipv4_devconf = {
69 .data = {
70 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
71 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
72 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
73 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
74 },
75};
76
77static struct ipv4_devconf ipv4_devconf_dflt = {
78 .data = {
79 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
80 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
81 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
82 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
83 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
84 },
85};
86
87#define IPV4_DEVCONF_DFLT(net, attr) \
88 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
89
90static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
91 [IFA_LOCAL] = { .type = NLA_U32 },
92 [IFA_ADDRESS] = { .type = NLA_U32 },
93 [IFA_BROADCAST] = { .type = NLA_U32 },
94 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
95};
96
97/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
98 * value. So if you change this define, make appropriate changes to
99 * inet_addr_hash as well.
100 */
101#define IN4_ADDR_HSIZE 256
102static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
103static DEFINE_SPINLOCK(inet_addr_hash_lock);
104
105static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
106{
107 u32 val = (__force u32) addr ^ hash_ptr(net, 8);
108
109 return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
110 (IN4_ADDR_HSIZE - 1));
111}
112
113static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
114{
115 unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
116
117 spin_lock(&inet_addr_hash_lock);
118 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
119 spin_unlock(&inet_addr_hash_lock);
120}
121
122static void inet_hash_remove(struct in_ifaddr *ifa)
123{
124 spin_lock(&inet_addr_hash_lock);
125 hlist_del_init_rcu(&ifa->hash);
126 spin_unlock(&inet_addr_hash_lock);
127}
128
129/**
130 * __ip_dev_find - find the first device with a given source address.
131 * @net: the net namespace
132 * @addr: the source address
133 * @devref: if true, take a reference on the found device
134 *
135 * If a caller uses devref=false, it should be protected by RCU, or RTNL
136 */
137struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
138{
139 unsigned int hash = inet_addr_hash(net, addr);
140 struct net_device *result = NULL;
141 struct in_ifaddr *ifa;
142 struct hlist_node *node;
143
144 rcu_read_lock();
145 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
146 struct net_device *dev = ifa->ifa_dev->dev;
147
148 if (!net_eq(dev_net(dev), net))
149 continue;
150 if (ifa->ifa_local == addr) {
151 result = dev;
152 break;
153 }
154 }
155 if (!result) {
156 struct flowi4 fl4 = { .daddr = addr };
157 struct fib_result res = { 0 };
158 struct fib_table *local;
159
160 /* Fallback to FIB local table so that communication
161 * over loopback subnets work.
162 */
163 local = fib_get_table(net, RT_TABLE_LOCAL);
164 if (local &&
165 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
166 res.type == RTN_LOCAL)
167 result = FIB_RES_DEV(res);
168 }
169 if (result && devref)
170 dev_hold(result);
171 rcu_read_unlock();
172 return result;
173}
174EXPORT_SYMBOL(__ip_dev_find);
175
176static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
177
178static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
179static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
180 int destroy);
181#ifdef CONFIG_SYSCTL
182static void devinet_sysctl_register(struct in_device *idev);
183static void devinet_sysctl_unregister(struct in_device *idev);
184#else
185static inline void devinet_sysctl_register(struct in_device *idev)
186{
187}
188static inline void devinet_sysctl_unregister(struct in_device *idev)
189{
190}
191#endif
192
193/* Locks all the inet devices. */
194
195static struct in_ifaddr *inet_alloc_ifa(void)
196{
197 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
198}
199
200static void inet_rcu_free_ifa(struct rcu_head *head)
201{
202 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
203 if (ifa->ifa_dev)
204 in_dev_put(ifa->ifa_dev);
205 kfree(ifa);
206}
207
208static inline void inet_free_ifa(struct in_ifaddr *ifa)
209{
210 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
211}
212
213void in_dev_finish_destroy(struct in_device *idev)
214{
215 struct net_device *dev = idev->dev;
216
217 WARN_ON(idev->ifa_list);
218 WARN_ON(idev->mc_list);
219#ifdef NET_REFCNT_DEBUG
220 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
221#endif
222 dev_put(dev);
223 if (!idev->dead)
224 pr_err("Freeing alive in_device %p\n", idev);
225 else
226 kfree(idev);
227}
228EXPORT_SYMBOL(in_dev_finish_destroy);
229
230static struct in_device *inetdev_init(struct net_device *dev)
231{
232 struct in_device *in_dev;
233
234 ASSERT_RTNL();
235
236 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
237 if (!in_dev)
238 goto out;
239 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
240 sizeof(in_dev->cnf));
241 in_dev->cnf.sysctl = NULL;
242 in_dev->dev = dev;
243 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
244 if (!in_dev->arp_parms)
245 goto out_kfree;
246 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
247 dev_disable_lro(dev);
248 /* Reference in_dev->dev */
249 dev_hold(dev);
250 /* Account for reference dev->ip_ptr (below) */
251 in_dev_hold(in_dev);
252
253 devinet_sysctl_register(in_dev);
254 ip_mc_init_dev(in_dev);
255 if (dev->flags & IFF_UP)
256 ip_mc_up(in_dev);
257
258 /* we can receive as soon as ip_ptr is set -- do this last */
259 rcu_assign_pointer(dev->ip_ptr, in_dev);
260out:
261 return in_dev;
262out_kfree:
263 kfree(in_dev);
264 in_dev = NULL;
265 goto out;
266}
267
268static void in_dev_rcu_put(struct rcu_head *head)
269{
270 struct in_device *idev = container_of(head, struct in_device, rcu_head);
271 in_dev_put(idev);
272}
273
274static void inetdev_destroy(struct in_device *in_dev)
275{
276 struct in_ifaddr *ifa;
277 struct net_device *dev;
278
279 ASSERT_RTNL();
280
281 dev = in_dev->dev;
282
283 in_dev->dead = 1;
284
285 ip_mc_destroy_dev(in_dev);
286
287 while ((ifa = in_dev->ifa_list) != NULL) {
288 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
289 inet_free_ifa(ifa);
290 }
291
292 RCU_INIT_POINTER(dev->ip_ptr, NULL);
293
294 devinet_sysctl_unregister(in_dev);
295 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
296 arp_ifdown(dev);
297
298 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
299}
300
301int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
302{
303 rcu_read_lock();
304 for_primary_ifa(in_dev) {
305 if (inet_ifa_match(a, ifa)) {
306 if (!b || inet_ifa_match(b, ifa)) {
307 rcu_read_unlock();
308 return 1;
309 }
310 }
311 } endfor_ifa(in_dev);
312 rcu_read_unlock();
313 return 0;
314}
315
316static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
317 int destroy, struct nlmsghdr *nlh, u32 pid)
318{
319 struct in_ifaddr *promote = NULL;
320 struct in_ifaddr *ifa, *ifa1 = *ifap;
321 struct in_ifaddr *last_prim = in_dev->ifa_list;
322 struct in_ifaddr *prev_prom = NULL;
323 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
324
325 ASSERT_RTNL();
326
327 /* 1. Deleting primary ifaddr forces deletion all secondaries
328 * unless alias promotion is set
329 **/
330
331 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
332 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
333
334 while ((ifa = *ifap1) != NULL) {
335 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
336 ifa1->ifa_scope <= ifa->ifa_scope)
337 last_prim = ifa;
338
339 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
340 ifa1->ifa_mask != ifa->ifa_mask ||
341 !inet_ifa_match(ifa1->ifa_address, ifa)) {
342 ifap1 = &ifa->ifa_next;
343 prev_prom = ifa;
344 continue;
345 }
346
347 if (!do_promote) {
348 inet_hash_remove(ifa);
349 *ifap1 = ifa->ifa_next;
350
351 rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
352 blocking_notifier_call_chain(&inetaddr_chain,
353 NETDEV_DOWN, ifa);
354 inet_free_ifa(ifa);
355 } else {
356 promote = ifa;
357 break;
358 }
359 }
360 }
361
362 /* On promotion all secondaries from subnet are changing
363 * the primary IP, we must remove all their routes silently
364 * and later to add them back with new prefsrc. Do this
365 * while all addresses are on the device list.
366 */
367 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
368 if (ifa1->ifa_mask == ifa->ifa_mask &&
369 inet_ifa_match(ifa1->ifa_address, ifa))
370 fib_del_ifaddr(ifa, ifa1);
371 }
372
373 /* 2. Unlink it */
374
375 *ifap = ifa1->ifa_next;
376 inet_hash_remove(ifa1);
377
378 /* 3. Announce address deletion */
379
380 /* Send message first, then call notifier.
381 At first sight, FIB update triggered by notifier
382 will refer to already deleted ifaddr, that could confuse
383 netlink listeners. It is not true: look, gated sees
384 that route deleted and if it still thinks that ifaddr
385 is valid, it will try to restore deleted routes... Grr.
386 So that, this order is correct.
387 */
388 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, pid);
389 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
390
391 if (promote) {
392 struct in_ifaddr *next_sec = promote->ifa_next;
393
394 if (prev_prom) {
395 prev_prom->ifa_next = promote->ifa_next;
396 promote->ifa_next = last_prim->ifa_next;
397 last_prim->ifa_next = promote;
398 }
399
400 promote->ifa_flags &= ~IFA_F_SECONDARY;
401 rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid);
402 blocking_notifier_call_chain(&inetaddr_chain,
403 NETDEV_UP, promote);
404 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
405 if (ifa1->ifa_mask != ifa->ifa_mask ||
406 !inet_ifa_match(ifa1->ifa_address, ifa))
407 continue;
408 fib_add_ifaddr(ifa);
409 }
410
411 }
412 if (destroy)
413 inet_free_ifa(ifa1);
414}
415
416static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
417 int destroy)
418{
419 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
420}
421
422static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
423 u32 pid)
424{
425 struct in_device *in_dev = ifa->ifa_dev;
426 struct in_ifaddr *ifa1, **ifap, **last_primary;
427
428 ASSERT_RTNL();
429
430 if (!ifa->ifa_local) {
431 inet_free_ifa(ifa);
432 return 0;
433 }
434
435 ifa->ifa_flags &= ~IFA_F_SECONDARY;
436 last_primary = &in_dev->ifa_list;
437
438 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
439 ifap = &ifa1->ifa_next) {
440 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
441 ifa->ifa_scope <= ifa1->ifa_scope)
442 last_primary = &ifa1->ifa_next;
443 if (ifa1->ifa_mask == ifa->ifa_mask &&
444 inet_ifa_match(ifa1->ifa_address, ifa)) {
445 if (ifa1->ifa_local == ifa->ifa_local) {
446 inet_free_ifa(ifa);
447 return -EEXIST;
448 }
449 if (ifa1->ifa_scope != ifa->ifa_scope) {
450 inet_free_ifa(ifa);
451 return -EINVAL;
452 }
453 ifa->ifa_flags |= IFA_F_SECONDARY;
454 }
455 }
456
457 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
458 net_srandom(ifa->ifa_local);
459 ifap = last_primary;
460 }
461
462 ifa->ifa_next = *ifap;
463 *ifap = ifa;
464
465 inet_hash_insert(dev_net(in_dev->dev), ifa);
466
467 /* Send message first, then call notifier.
468 Notifier will trigger FIB update, so that
469 listeners of netlink will know about new ifaddr */
470 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid);
471 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
472
473 return 0;
474}
475
476static int inet_insert_ifa(struct in_ifaddr *ifa)
477{
478 return __inet_insert_ifa(ifa, NULL, 0);
479}
480
481static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
482{
483 struct in_device *in_dev = __in_dev_get_rtnl(dev);
484
485 ASSERT_RTNL();
486
487 if (!in_dev) {
488 inet_free_ifa(ifa);
489 return -ENOBUFS;
490 }
491 ipv4_devconf_setall(in_dev);
492 if (ifa->ifa_dev != in_dev) {
493 WARN_ON(ifa->ifa_dev);
494 in_dev_hold(in_dev);
495 ifa->ifa_dev = in_dev;
496 }
497 if (ipv4_is_loopback(ifa->ifa_local))
498 ifa->ifa_scope = RT_SCOPE_HOST;
499 return inet_insert_ifa(ifa);
500}
501
502/* Caller must hold RCU or RTNL :
503 * We dont take a reference on found in_device
504 */
505struct in_device *inetdev_by_index(struct net *net, int ifindex)
506{
507 struct net_device *dev;
508 struct in_device *in_dev = NULL;
509
510 rcu_read_lock();
511 dev = dev_get_by_index_rcu(net, ifindex);
512 if (dev)
513 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
514 rcu_read_unlock();
515 return in_dev;
516}
517EXPORT_SYMBOL(inetdev_by_index);
518
519/* Called only from RTNL semaphored context. No locks. */
520
521struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
522 __be32 mask)
523{
524 ASSERT_RTNL();
525
526 for_primary_ifa(in_dev) {
527 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
528 return ifa;
529 } endfor_ifa(in_dev);
530 return NULL;
531}
532
533static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
534{
535 struct net *net = sock_net(skb->sk);
536 struct nlattr *tb[IFA_MAX+1];
537 struct in_device *in_dev;
538 struct ifaddrmsg *ifm;
539 struct in_ifaddr *ifa, **ifap;
540 int err = -EINVAL;
541
542 ASSERT_RTNL();
543
544 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
545 if (err < 0)
546 goto errout;
547
548 ifm = nlmsg_data(nlh);
549 in_dev = inetdev_by_index(net, ifm->ifa_index);
550 if (in_dev == NULL) {
551 err = -ENODEV;
552 goto errout;
553 }
554
555 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
556 ifap = &ifa->ifa_next) {
557 if (tb[IFA_LOCAL] &&
558 ifa->ifa_local != nla_get_be32(tb[IFA_LOCAL]))
559 continue;
560
561 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
562 continue;
563
564 if (tb[IFA_ADDRESS] &&
565 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
566 !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
567 continue;
568
569 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid);
570 return 0;
571 }
572
573 err = -EADDRNOTAVAIL;
574errout:
575 return err;
576}
577
578static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
579{
580 struct nlattr *tb[IFA_MAX+1];
581 struct in_ifaddr *ifa;
582 struct ifaddrmsg *ifm;
583 struct net_device *dev;
584 struct in_device *in_dev;
585 int err;
586
587 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
588 if (err < 0)
589 goto errout;
590
591 ifm = nlmsg_data(nlh);
592 err = -EINVAL;
593 if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL)
594 goto errout;
595
596 dev = __dev_get_by_index(net, ifm->ifa_index);
597 err = -ENODEV;
598 if (dev == NULL)
599 goto errout;
600
601 in_dev = __in_dev_get_rtnl(dev);
602 err = -ENOBUFS;
603 if (in_dev == NULL)
604 goto errout;
605
606 ifa = inet_alloc_ifa();
607 if (ifa == NULL)
608 /*
609 * A potential indev allocation can be left alive, it stays
610 * assigned to its device and is destroy with it.
611 */
612 goto errout;
613
614 ipv4_devconf_setall(in_dev);
615 in_dev_hold(in_dev);
616
617 if (tb[IFA_ADDRESS] == NULL)
618 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
619
620 INIT_HLIST_NODE(&ifa->hash);
621 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
622 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
623 ifa->ifa_flags = ifm->ifa_flags;
624 ifa->ifa_scope = ifm->ifa_scope;
625 ifa->ifa_dev = in_dev;
626
627 ifa->ifa_local = nla_get_be32(tb[IFA_LOCAL]);
628 ifa->ifa_address = nla_get_be32(tb[IFA_ADDRESS]);
629
630 if (tb[IFA_BROADCAST])
631 ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]);
632
633 if (tb[IFA_LABEL])
634 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
635 else
636 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
637
638 return ifa;
639
640errout:
641 return ERR_PTR(err);
642}
643
644static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
645{
646 struct net *net = sock_net(skb->sk);
647 struct in_ifaddr *ifa;
648
649 ASSERT_RTNL();
650
651 ifa = rtm_to_ifaddr(net, nlh);
652 if (IS_ERR(ifa))
653 return PTR_ERR(ifa);
654
655 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).pid);
656}
657
658/*
659 * Determine a default network mask, based on the IP address.
660 */
661
662static inline int inet_abc_len(__be32 addr)
663{
664 int rc = -1; /* Something else, probably a multicast. */
665
666 if (ipv4_is_zeronet(addr))
667 rc = 0;
668 else {
669 __u32 haddr = ntohl(addr);
670
671 if (IN_CLASSA(haddr))
672 rc = 8;
673 else if (IN_CLASSB(haddr))
674 rc = 16;
675 else if (IN_CLASSC(haddr))
676 rc = 24;
677 }
678
679 return rc;
680}
681
682
683int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
684{
685 struct ifreq ifr;
686 struct sockaddr_in sin_orig;
687 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
688 struct in_device *in_dev;
689 struct in_ifaddr **ifap = NULL;
690 struct in_ifaddr *ifa = NULL;
691 struct net_device *dev;
692 char *colon;
693 int ret = -EFAULT;
694 int tryaddrmatch = 0;
695
696 /*
697 * Fetch the caller's info block into kernel space
698 */
699
700 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
701 goto out;
702 ifr.ifr_name[IFNAMSIZ - 1] = 0;
703
704 /* save original address for comparison */
705 memcpy(&sin_orig, sin, sizeof(*sin));
706
707 colon = strchr(ifr.ifr_name, ':');
708 if (colon)
709 *colon = 0;
710
711 dev_load(net, ifr.ifr_name);
712
713 switch (cmd) {
714 case SIOCGIFADDR: /* Get interface address */
715 case SIOCGIFBRDADDR: /* Get the broadcast address */
716 case SIOCGIFDSTADDR: /* Get the destination address */
717 case SIOCGIFNETMASK: /* Get the netmask for the interface */
718 /* Note that these ioctls will not sleep,
719 so that we do not impose a lock.
720 One day we will be forced to put shlock here (I mean SMP)
721 */
722 tryaddrmatch = (sin_orig.sin_family == AF_INET);
723 memset(sin, 0, sizeof(*sin));
724 sin->sin_family = AF_INET;
725 break;
726
727 case SIOCSIFFLAGS:
728 ret = -EACCES;
729 if (!capable(CAP_NET_ADMIN))
730 goto out;
731 break;
732 case SIOCSIFADDR: /* Set interface address (and family) */
733 case SIOCSIFBRDADDR: /* Set the broadcast address */
734 case SIOCSIFDSTADDR: /* Set the destination address */
735 case SIOCSIFNETMASK: /* Set the netmask for the interface */
736 ret = -EACCES;
737 if (!capable(CAP_NET_ADMIN))
738 goto out;
739 ret = -EINVAL;
740 if (sin->sin_family != AF_INET)
741 goto out;
742 break;
743 default:
744 ret = -EINVAL;
745 goto out;
746 }
747
748 rtnl_lock();
749
750 ret = -ENODEV;
751 dev = __dev_get_by_name(net, ifr.ifr_name);
752 if (!dev)
753 goto done;
754
755 if (colon)
756 *colon = ':';
757
758 in_dev = __in_dev_get_rtnl(dev);
759 if (in_dev) {
760 if (tryaddrmatch) {
761 /* Matthias Andree */
762 /* compare label and address (4.4BSD style) */
763 /* note: we only do this for a limited set of ioctls
764 and only if the original address family was AF_INET.
765 This is checked above. */
766 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
767 ifap = &ifa->ifa_next) {
768 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
769 sin_orig.sin_addr.s_addr ==
770 ifa->ifa_local) {
771 break; /* found */
772 }
773 }
774 }
775 /* we didn't get a match, maybe the application is
776 4.3BSD-style and passed in junk so we fall back to
777 comparing just the label */
778 if (!ifa) {
779 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
780 ifap = &ifa->ifa_next)
781 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
782 break;
783 }
784 }
785
786 ret = -EADDRNOTAVAIL;
787 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
788 goto done;
789
790 switch (cmd) {
791 case SIOCGIFADDR: /* Get interface address */
792 sin->sin_addr.s_addr = ifa->ifa_local;
793 goto rarok;
794
795 case SIOCGIFBRDADDR: /* Get the broadcast address */
796 sin->sin_addr.s_addr = ifa->ifa_broadcast;
797 goto rarok;
798
799 case SIOCGIFDSTADDR: /* Get the destination address */
800 sin->sin_addr.s_addr = ifa->ifa_address;
801 goto rarok;
802
803 case SIOCGIFNETMASK: /* Get the netmask for the interface */
804 sin->sin_addr.s_addr = ifa->ifa_mask;
805 goto rarok;
806
807 case SIOCSIFFLAGS:
808 if (colon) {
809 ret = -EADDRNOTAVAIL;
810 if (!ifa)
811 break;
812 ret = 0;
813 if (!(ifr.ifr_flags & IFF_UP))
814 inet_del_ifa(in_dev, ifap, 1);
815 break;
816 }
817 ret = dev_change_flags(dev, ifr.ifr_flags);
818 break;
819
820 case SIOCSIFADDR: /* Set interface address (and family) */
821 ret = -EINVAL;
822 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
823 break;
824
825 if (!ifa) {
826 ret = -ENOBUFS;
827 ifa = inet_alloc_ifa();
828 INIT_HLIST_NODE(&ifa->hash);
829 if (!ifa)
830 break;
831 if (colon)
832 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
833 else
834 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
835 } else {
836 ret = 0;
837 if (ifa->ifa_local == sin->sin_addr.s_addr)
838 break;
839 inet_del_ifa(in_dev, ifap, 0);
840 ifa->ifa_broadcast = 0;
841 ifa->ifa_scope = 0;
842 }
843
844 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
845
846 if (!(dev->flags & IFF_POINTOPOINT)) {
847 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
848 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
849 if ((dev->flags & IFF_BROADCAST) &&
850 ifa->ifa_prefixlen < 31)
851 ifa->ifa_broadcast = ifa->ifa_address |
852 ~ifa->ifa_mask;
853 } else {
854 ifa->ifa_prefixlen = 32;
855 ifa->ifa_mask = inet_make_mask(32);
856 }
857 ret = inet_set_ifa(dev, ifa);
858 break;
859
860 case SIOCSIFBRDADDR: /* Set the broadcast address */
861 ret = 0;
862 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
863 inet_del_ifa(in_dev, ifap, 0);
864 ifa->ifa_broadcast = sin->sin_addr.s_addr;
865 inet_insert_ifa(ifa);
866 }
867 break;
868
869 case SIOCSIFDSTADDR: /* Set the destination address */
870 ret = 0;
871 if (ifa->ifa_address == sin->sin_addr.s_addr)
872 break;
873 ret = -EINVAL;
874 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
875 break;
876 ret = 0;
877 inet_del_ifa(in_dev, ifap, 0);
878 ifa->ifa_address = sin->sin_addr.s_addr;
879 inet_insert_ifa(ifa);
880 break;
881
882 case SIOCSIFNETMASK: /* Set the netmask for the interface */
883
884 /*
885 * The mask we set must be legal.
886 */
887 ret = -EINVAL;
888 if (bad_mask(sin->sin_addr.s_addr, 0))
889 break;
890 ret = 0;
891 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
892 __be32 old_mask = ifa->ifa_mask;
893 inet_del_ifa(in_dev, ifap, 0);
894 ifa->ifa_mask = sin->sin_addr.s_addr;
895 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
896
897 /* See if current broadcast address matches
898 * with current netmask, then recalculate
899 * the broadcast address. Otherwise it's a
900 * funny address, so don't touch it since
901 * the user seems to know what (s)he's doing...
902 */
903 if ((dev->flags & IFF_BROADCAST) &&
904 (ifa->ifa_prefixlen < 31) &&
905 (ifa->ifa_broadcast ==
906 (ifa->ifa_local|~old_mask))) {
907 ifa->ifa_broadcast = (ifa->ifa_local |
908 ~sin->sin_addr.s_addr);
909 }
910 inet_insert_ifa(ifa);
911 }
912 break;
913 }
914done:
915 rtnl_unlock();
916out:
917 return ret;
918rarok:
919 rtnl_unlock();
920 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
921 goto out;
922}
923
924static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
925{
926 struct in_device *in_dev = __in_dev_get_rtnl(dev);
927 struct in_ifaddr *ifa;
928 struct ifreq ifr;
929 int done = 0;
930
931 if (!in_dev)
932 goto out;
933
934 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
935 if (!buf) {
936 done += sizeof(ifr);
937 continue;
938 }
939 if (len < (int) sizeof(ifr))
940 break;
941 memset(&ifr, 0, sizeof(struct ifreq));
942 if (ifa->ifa_label)
943 strcpy(ifr.ifr_name, ifa->ifa_label);
944 else
945 strcpy(ifr.ifr_name, dev->name);
946
947 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
948 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
949 ifa->ifa_local;
950
951 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
952 done = -EFAULT;
953 break;
954 }
955 buf += sizeof(struct ifreq);
956 len -= sizeof(struct ifreq);
957 done += sizeof(struct ifreq);
958 }
959out:
960 return done;
961}
962
963__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
964{
965 __be32 addr = 0;
966 struct in_device *in_dev;
967 struct net *net = dev_net(dev);
968
969 rcu_read_lock();
970 in_dev = __in_dev_get_rcu(dev);
971 if (!in_dev)
972 goto no_in_dev;
973
974 for_primary_ifa(in_dev) {
975 if (ifa->ifa_scope > scope)
976 continue;
977 if (!dst || inet_ifa_match(dst, ifa)) {
978 addr = ifa->ifa_local;
979 break;
980 }
981 if (!addr)
982 addr = ifa->ifa_local;
983 } endfor_ifa(in_dev);
984
985 if (addr)
986 goto out_unlock;
987no_in_dev:
988
989 /* Not loopback addresses on loopback should be preferred
990 in this case. It is importnat that lo is the first interface
991 in dev_base list.
992 */
993 for_each_netdev_rcu(net, dev) {
994 in_dev = __in_dev_get_rcu(dev);
995 if (!in_dev)
996 continue;
997
998 for_primary_ifa(in_dev) {
999 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1000 ifa->ifa_scope <= scope) {
1001 addr = ifa->ifa_local;
1002 goto out_unlock;
1003 }
1004 } endfor_ifa(in_dev);
1005 }
1006out_unlock:
1007 rcu_read_unlock();
1008 return addr;
1009}
1010EXPORT_SYMBOL(inet_select_addr);
1011
1012static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1013 __be32 local, int scope)
1014{
1015 int same = 0;
1016 __be32 addr = 0;
1017
1018 for_ifa(in_dev) {
1019 if (!addr &&
1020 (local == ifa->ifa_local || !local) &&
1021 ifa->ifa_scope <= scope) {
1022 addr = ifa->ifa_local;
1023 if (same)
1024 break;
1025 }
1026 if (!same) {
1027 same = (!local || inet_ifa_match(local, ifa)) &&
1028 (!dst || inet_ifa_match(dst, ifa));
1029 if (same && addr) {
1030 if (local || !dst)
1031 break;
1032 /* Is the selected addr into dst subnet? */
1033 if (inet_ifa_match(addr, ifa))
1034 break;
1035 /* No, then can we use new local src? */
1036 if (ifa->ifa_scope <= scope) {
1037 addr = ifa->ifa_local;
1038 break;
1039 }
1040 /* search for large dst subnet for addr */
1041 same = 0;
1042 }
1043 }
1044 } endfor_ifa(in_dev);
1045
1046 return same ? addr : 0;
1047}
1048
1049/*
1050 * Confirm that local IP address exists using wildcards:
1051 * - in_dev: only on this interface, 0=any interface
1052 * - dst: only in the same subnet as dst, 0=any dst
1053 * - local: address, 0=autoselect the local address
1054 * - scope: maximum allowed scope value for the local address
1055 */
1056__be32 inet_confirm_addr(struct in_device *in_dev,
1057 __be32 dst, __be32 local, int scope)
1058{
1059 __be32 addr = 0;
1060 struct net_device *dev;
1061 struct net *net;
1062
1063 if (scope != RT_SCOPE_LINK)
1064 return confirm_addr_indev(in_dev, dst, local, scope);
1065
1066 net = dev_net(in_dev->dev);
1067 rcu_read_lock();
1068 for_each_netdev_rcu(net, dev) {
1069 in_dev = __in_dev_get_rcu(dev);
1070 if (in_dev) {
1071 addr = confirm_addr_indev(in_dev, dst, local, scope);
1072 if (addr)
1073 break;
1074 }
1075 }
1076 rcu_read_unlock();
1077
1078 return addr;
1079}
1080EXPORT_SYMBOL(inet_confirm_addr);
1081
1082/*
1083 * Device notifier
1084 */
1085
1086int register_inetaddr_notifier(struct notifier_block *nb)
1087{
1088 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1089}
1090EXPORT_SYMBOL(register_inetaddr_notifier);
1091
1092int unregister_inetaddr_notifier(struct notifier_block *nb)
1093{
1094 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1095}
1096EXPORT_SYMBOL(unregister_inetaddr_notifier);
1097
1098/* Rename ifa_labels for a device name change. Make some effort to preserve
1099 * existing alias numbering and to create unique labels if possible.
1100*/
1101static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1102{
1103 struct in_ifaddr *ifa;
1104 int named = 0;
1105
1106 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1107 char old[IFNAMSIZ], *dot;
1108
1109 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1110 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1111 if (named++ == 0)
1112 goto skip;
1113 dot = strchr(old, ':');
1114 if (dot == NULL) {
1115 sprintf(old, ":%d", named);
1116 dot = old;
1117 }
1118 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1119 strcat(ifa->ifa_label, dot);
1120 else
1121 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1122skip:
1123 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1124 }
1125}
1126
1127static inline bool inetdev_valid_mtu(unsigned int mtu)
1128{
1129 return mtu >= 68;
1130}
1131
1132static void inetdev_send_gratuitous_arp(struct net_device *dev,
1133 struct in_device *in_dev)
1134
1135{
1136 struct in_ifaddr *ifa;
1137
1138 for (ifa = in_dev->ifa_list; ifa;
1139 ifa = ifa->ifa_next) {
1140 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1141 ifa->ifa_local, dev,
1142 ifa->ifa_local, NULL,
1143 dev->dev_addr, NULL);
1144 }
1145}
1146
1147/* Called only under RTNL semaphore */
1148
1149static int inetdev_event(struct notifier_block *this, unsigned long event,
1150 void *ptr)
1151{
1152 struct net_device *dev = ptr;
1153 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1154
1155 ASSERT_RTNL();
1156
1157 if (!in_dev) {
1158 if (event == NETDEV_REGISTER) {
1159 in_dev = inetdev_init(dev);
1160 if (!in_dev)
1161 return notifier_from_errno(-ENOMEM);
1162 if (dev->flags & IFF_LOOPBACK) {
1163 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1164 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1165 }
1166 } else if (event == NETDEV_CHANGEMTU) {
1167 /* Re-enabling IP */
1168 if (inetdev_valid_mtu(dev->mtu))
1169 in_dev = inetdev_init(dev);
1170 }
1171 goto out;
1172 }
1173
1174 switch (event) {
1175 case NETDEV_REGISTER:
1176 pr_debug("%s: bug\n", __func__);
1177 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1178 break;
1179 case NETDEV_UP:
1180 if (!inetdev_valid_mtu(dev->mtu))
1181 break;
1182 if (dev->flags & IFF_LOOPBACK) {
1183 struct in_ifaddr *ifa = inet_alloc_ifa();
1184
1185 if (ifa) {
1186 INIT_HLIST_NODE(&ifa->hash);
1187 ifa->ifa_local =
1188 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1189 ifa->ifa_prefixlen = 8;
1190 ifa->ifa_mask = inet_make_mask(8);
1191 in_dev_hold(in_dev);
1192 ifa->ifa_dev = in_dev;
1193 ifa->ifa_scope = RT_SCOPE_HOST;
1194 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1195 inet_insert_ifa(ifa);
1196 }
1197 }
1198 ip_mc_up(in_dev);
1199 /* fall through */
1200 case NETDEV_CHANGEADDR:
1201 if (!IN_DEV_ARP_NOTIFY(in_dev))
1202 break;
1203 /* fall through */
1204 case NETDEV_NOTIFY_PEERS:
1205 /* Send gratuitous ARP to notify of link change */
1206 inetdev_send_gratuitous_arp(dev, in_dev);
1207 break;
1208 case NETDEV_DOWN:
1209 ip_mc_down(in_dev);
1210 break;
1211 case NETDEV_PRE_TYPE_CHANGE:
1212 ip_mc_unmap(in_dev);
1213 break;
1214 case NETDEV_POST_TYPE_CHANGE:
1215 ip_mc_remap(in_dev);
1216 break;
1217 case NETDEV_CHANGEMTU:
1218 if (inetdev_valid_mtu(dev->mtu))
1219 break;
1220 /* disable IP when MTU is not enough */
1221 case NETDEV_UNREGISTER:
1222 inetdev_destroy(in_dev);
1223 break;
1224 case NETDEV_CHANGENAME:
1225 /* Do not notify about label change, this event is
1226 * not interesting to applications using netlink.
1227 */
1228 inetdev_changename(dev, in_dev);
1229
1230 devinet_sysctl_unregister(in_dev);
1231 devinet_sysctl_register(in_dev);
1232 break;
1233 }
1234out:
1235 return NOTIFY_DONE;
1236}
1237
1238static struct notifier_block ip_netdev_notifier = {
1239 .notifier_call = inetdev_event,
1240};
1241
1242static inline size_t inet_nlmsg_size(void)
1243{
1244 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1245 + nla_total_size(4) /* IFA_ADDRESS */
1246 + nla_total_size(4) /* IFA_LOCAL */
1247 + nla_total_size(4) /* IFA_BROADCAST */
1248 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
1249}
1250
1251static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1252 u32 pid, u32 seq, int event, unsigned int flags)
1253{
1254 struct ifaddrmsg *ifm;
1255 struct nlmsghdr *nlh;
1256
1257 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
1258 if (nlh == NULL)
1259 return -EMSGSIZE;
1260
1261 ifm = nlmsg_data(nlh);
1262 ifm->ifa_family = AF_INET;
1263 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1264 ifm->ifa_flags = ifa->ifa_flags|IFA_F_PERMANENT;
1265 ifm->ifa_scope = ifa->ifa_scope;
1266 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1267
1268 if ((ifa->ifa_address &&
1269 nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1270 (ifa->ifa_local &&
1271 nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
1272 (ifa->ifa_broadcast &&
1273 nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1274 (ifa->ifa_label[0] &&
1275 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
1276 goto nla_put_failure;
1277
1278 return nlmsg_end(skb, nlh);
1279
1280nla_put_failure:
1281 nlmsg_cancel(skb, nlh);
1282 return -EMSGSIZE;
1283}
1284
1285static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1286{
1287 struct net *net = sock_net(skb->sk);
1288 int h, s_h;
1289 int idx, s_idx;
1290 int ip_idx, s_ip_idx;
1291 struct net_device *dev;
1292 struct in_device *in_dev;
1293 struct in_ifaddr *ifa;
1294 struct hlist_head *head;
1295 struct hlist_node *node;
1296
1297 s_h = cb->args[0];
1298 s_idx = idx = cb->args[1];
1299 s_ip_idx = ip_idx = cb->args[2];
1300
1301 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1302 idx = 0;
1303 head = &net->dev_index_head[h];
1304 rcu_read_lock();
1305 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1306 if (idx < s_idx)
1307 goto cont;
1308 if (h > s_h || idx > s_idx)
1309 s_ip_idx = 0;
1310 in_dev = __in_dev_get_rcu(dev);
1311 if (!in_dev)
1312 goto cont;
1313
1314 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1315 ifa = ifa->ifa_next, ip_idx++) {
1316 if (ip_idx < s_ip_idx)
1317 continue;
1318 if (inet_fill_ifaddr(skb, ifa,
1319 NETLINK_CB(cb->skb).pid,
1320 cb->nlh->nlmsg_seq,
1321 RTM_NEWADDR, NLM_F_MULTI) <= 0) {
1322 rcu_read_unlock();
1323 goto done;
1324 }
1325 }
1326cont:
1327 idx++;
1328 }
1329 rcu_read_unlock();
1330 }
1331
1332done:
1333 cb->args[0] = h;
1334 cb->args[1] = idx;
1335 cb->args[2] = ip_idx;
1336
1337 return skb->len;
1338}
1339
1340static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1341 u32 pid)
1342{
1343 struct sk_buff *skb;
1344 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1345 int err = -ENOBUFS;
1346 struct net *net;
1347
1348 net = dev_net(ifa->ifa_dev->dev);
1349 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1350 if (skb == NULL)
1351 goto errout;
1352
1353 err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0);
1354 if (err < 0) {
1355 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1356 WARN_ON(err == -EMSGSIZE);
1357 kfree_skb(skb);
1358 goto errout;
1359 }
1360 rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1361 return;
1362errout:
1363 if (err < 0)
1364 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1365}
1366
1367static size_t inet_get_link_af_size(const struct net_device *dev)
1368{
1369 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1370
1371 if (!in_dev)
1372 return 0;
1373
1374 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1375}
1376
1377static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
1378{
1379 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1380 struct nlattr *nla;
1381 int i;
1382
1383 if (!in_dev)
1384 return -ENODATA;
1385
1386 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1387 if (nla == NULL)
1388 return -EMSGSIZE;
1389
1390 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1391 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1392
1393 return 0;
1394}
1395
1396static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1397 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1398};
1399
1400static int inet_validate_link_af(const struct net_device *dev,
1401 const struct nlattr *nla)
1402{
1403 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1404 int err, rem;
1405
1406 if (dev && !__in_dev_get_rtnl(dev))
1407 return -EAFNOSUPPORT;
1408
1409 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1410 if (err < 0)
1411 return err;
1412
1413 if (tb[IFLA_INET_CONF]) {
1414 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1415 int cfgid = nla_type(a);
1416
1417 if (nla_len(a) < 4)
1418 return -EINVAL;
1419
1420 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1421 return -EINVAL;
1422 }
1423 }
1424
1425 return 0;
1426}
1427
1428static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1429{
1430 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1431 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1432 int rem;
1433
1434 if (!in_dev)
1435 return -EAFNOSUPPORT;
1436
1437 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1438 BUG();
1439
1440 if (tb[IFLA_INET_CONF]) {
1441 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1442 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1443 }
1444
1445 return 0;
1446}
1447
1448#ifdef CONFIG_SYSCTL
1449
1450static void devinet_copy_dflt_conf(struct net *net, int i)
1451{
1452 struct net_device *dev;
1453
1454 rcu_read_lock();
1455 for_each_netdev_rcu(net, dev) {
1456 struct in_device *in_dev;
1457
1458 in_dev = __in_dev_get_rcu(dev);
1459 if (in_dev && !test_bit(i, in_dev->cnf.state))
1460 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
1461 }
1462 rcu_read_unlock();
1463}
1464
1465/* called with RTNL locked */
1466static void inet_forward_change(struct net *net)
1467{
1468 struct net_device *dev;
1469 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
1470
1471 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
1472 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
1473
1474 for_each_netdev(net, dev) {
1475 struct in_device *in_dev;
1476 if (on)
1477 dev_disable_lro(dev);
1478 rcu_read_lock();
1479 in_dev = __in_dev_get_rcu(dev);
1480 if (in_dev)
1481 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
1482 rcu_read_unlock();
1483 }
1484}
1485
1486static int devinet_conf_proc(ctl_table *ctl, int write,
1487 void __user *buffer,
1488 size_t *lenp, loff_t *ppos)
1489{
1490 int old_value = *(int *)ctl->data;
1491 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1492 int new_value = *(int *)ctl->data;
1493
1494 if (write) {
1495 struct ipv4_devconf *cnf = ctl->extra1;
1496 struct net *net = ctl->extra2;
1497 int i = (int *)ctl->data - cnf->data;
1498
1499 set_bit(i, cnf->state);
1500
1501 if (cnf == net->ipv4.devconf_dflt)
1502 devinet_copy_dflt_conf(net, i);
1503 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
1504 if ((new_value == 0) && (old_value != 0))
1505 rt_cache_flush(net, 0);
1506 }
1507
1508 return ret;
1509}
1510
1511static int devinet_sysctl_forward(ctl_table *ctl, int write,
1512 void __user *buffer,
1513 size_t *lenp, loff_t *ppos)
1514{
1515 int *valp = ctl->data;
1516 int val = *valp;
1517 loff_t pos = *ppos;
1518 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1519
1520 if (write && *valp != val) {
1521 struct net *net = ctl->extra2;
1522
1523 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1524 if (!rtnl_trylock()) {
1525 /* Restore the original values before restarting */
1526 *valp = val;
1527 *ppos = pos;
1528 return restart_syscall();
1529 }
1530 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1531 inet_forward_change(net);
1532 } else if (*valp) {
1533 struct ipv4_devconf *cnf = ctl->extra1;
1534 struct in_device *idev =
1535 container_of(cnf, struct in_device, cnf);
1536 dev_disable_lro(idev->dev);
1537 }
1538 rtnl_unlock();
1539 rt_cache_flush(net, 0);
1540 }
1541 }
1542
1543 return ret;
1544}
1545
1546static int ipv4_doint_and_flush(ctl_table *ctl, int write,
1547 void __user *buffer,
1548 size_t *lenp, loff_t *ppos)
1549{
1550 int *valp = ctl->data;
1551 int val = *valp;
1552 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1553 struct net *net = ctl->extra2;
1554
1555 if (write && *valp != val)
1556 rt_cache_flush(net, 0);
1557
1558 return ret;
1559}
1560
1561#define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
1562 { \
1563 .procname = name, \
1564 .data = ipv4_devconf.data + \
1565 IPV4_DEVCONF_ ## attr - 1, \
1566 .maxlen = sizeof(int), \
1567 .mode = mval, \
1568 .proc_handler = proc, \
1569 .extra1 = &ipv4_devconf, \
1570 }
1571
1572#define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
1573 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
1574
1575#define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
1576 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
1577
1578#define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
1579 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
1580
1581#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
1582 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
1583
1584static struct devinet_sysctl_table {
1585 struct ctl_table_header *sysctl_header;
1586 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
1587} devinet_sysctl = {
1588 .devinet_vars = {
1589 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
1590 devinet_sysctl_forward),
1591 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
1592
1593 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
1594 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
1595 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
1596 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
1597 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
1598 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
1599 "accept_source_route"),
1600 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
1601 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
1602 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
1603 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
1604 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
1605 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
1606 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
1607 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
1608 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
1609 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
1610 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
1611 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
1612 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
1613
1614 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
1615 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
1616 DEVINET_SYSCTL_FLUSHING_ENTRY(FORCE_IGMP_VERSION,
1617 "force_igmp_version"),
1618 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
1619 "promote_secondaries"),
1620 },
1621};
1622
1623static int __devinet_sysctl_register(struct net *net, char *dev_name,
1624 struct ipv4_devconf *p)
1625{
1626 int i;
1627 struct devinet_sysctl_table *t;
1628 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
1629
1630 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
1631 if (!t)
1632 goto out;
1633
1634 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
1635 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
1636 t->devinet_vars[i].extra1 = p;
1637 t->devinet_vars[i].extra2 = net;
1638 }
1639
1640 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
1641
1642 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
1643 if (!t->sysctl_header)
1644 goto free;
1645
1646 p->sysctl = t;
1647 return 0;
1648
1649free:
1650 kfree(t);
1651out:
1652 return -ENOBUFS;
1653}
1654
1655static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
1656{
1657 struct devinet_sysctl_table *t = cnf->sysctl;
1658
1659 if (t == NULL)
1660 return;
1661
1662 cnf->sysctl = NULL;
1663 unregister_net_sysctl_table(t->sysctl_header);
1664 kfree(t);
1665}
1666
1667static void devinet_sysctl_register(struct in_device *idev)
1668{
1669 neigh_sysctl_register(idev->dev, idev->arp_parms, "ipv4", NULL);
1670 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
1671 &idev->cnf);
1672}
1673
1674static void devinet_sysctl_unregister(struct in_device *idev)
1675{
1676 __devinet_sysctl_unregister(&idev->cnf);
1677 neigh_sysctl_unregister(idev->arp_parms);
1678}
1679
1680static struct ctl_table ctl_forward_entry[] = {
1681 {
1682 .procname = "ip_forward",
1683 .data = &ipv4_devconf.data[
1684 IPV4_DEVCONF_FORWARDING - 1],
1685 .maxlen = sizeof(int),
1686 .mode = 0644,
1687 .proc_handler = devinet_sysctl_forward,
1688 .extra1 = &ipv4_devconf,
1689 .extra2 = &init_net,
1690 },
1691 { },
1692};
1693#endif
1694
1695static __net_init int devinet_init_net(struct net *net)
1696{
1697 int err;
1698 struct ipv4_devconf *all, *dflt;
1699#ifdef CONFIG_SYSCTL
1700 struct ctl_table *tbl = ctl_forward_entry;
1701 struct ctl_table_header *forw_hdr;
1702#endif
1703
1704 err = -ENOMEM;
1705 all = &ipv4_devconf;
1706 dflt = &ipv4_devconf_dflt;
1707
1708 if (!net_eq(net, &init_net)) {
1709 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
1710 if (all == NULL)
1711 goto err_alloc_all;
1712
1713 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
1714 if (dflt == NULL)
1715 goto err_alloc_dflt;
1716
1717#ifdef CONFIG_SYSCTL
1718 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
1719 if (tbl == NULL)
1720 goto err_alloc_ctl;
1721
1722 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
1723 tbl[0].extra1 = all;
1724 tbl[0].extra2 = net;
1725#endif
1726 }
1727
1728#ifdef CONFIG_SYSCTL
1729 err = __devinet_sysctl_register(net, "all", all);
1730 if (err < 0)
1731 goto err_reg_all;
1732
1733 err = __devinet_sysctl_register(net, "default", dflt);
1734 if (err < 0)
1735 goto err_reg_dflt;
1736
1737 err = -ENOMEM;
1738 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
1739 if (forw_hdr == NULL)
1740 goto err_reg_ctl;
1741 net->ipv4.forw_hdr = forw_hdr;
1742#endif
1743
1744 net->ipv4.devconf_all = all;
1745 net->ipv4.devconf_dflt = dflt;
1746 return 0;
1747
1748#ifdef CONFIG_SYSCTL
1749err_reg_ctl:
1750 __devinet_sysctl_unregister(dflt);
1751err_reg_dflt:
1752 __devinet_sysctl_unregister(all);
1753err_reg_all:
1754 if (tbl != ctl_forward_entry)
1755 kfree(tbl);
1756err_alloc_ctl:
1757#endif
1758 if (dflt != &ipv4_devconf_dflt)
1759 kfree(dflt);
1760err_alloc_dflt:
1761 if (all != &ipv4_devconf)
1762 kfree(all);
1763err_alloc_all:
1764 return err;
1765}
1766
1767static __net_exit void devinet_exit_net(struct net *net)
1768{
1769#ifdef CONFIG_SYSCTL
1770 struct ctl_table *tbl;
1771
1772 tbl = net->ipv4.forw_hdr->ctl_table_arg;
1773 unregister_net_sysctl_table(net->ipv4.forw_hdr);
1774 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
1775 __devinet_sysctl_unregister(net->ipv4.devconf_all);
1776 kfree(tbl);
1777#endif
1778 kfree(net->ipv4.devconf_dflt);
1779 kfree(net->ipv4.devconf_all);
1780}
1781
1782static __net_initdata struct pernet_operations devinet_ops = {
1783 .init = devinet_init_net,
1784 .exit = devinet_exit_net,
1785};
1786
1787static struct rtnl_af_ops inet_af_ops = {
1788 .family = AF_INET,
1789 .fill_link_af = inet_fill_link_af,
1790 .get_link_af_size = inet_get_link_af_size,
1791 .validate_link_af = inet_validate_link_af,
1792 .set_link_af = inet_set_link_af,
1793};
1794
1795void __init devinet_init(void)
1796{
1797 int i;
1798
1799 for (i = 0; i < IN4_ADDR_HSIZE; i++)
1800 INIT_HLIST_HEAD(&inet_addr_lst[i]);
1801
1802 register_pernet_subsys(&devinet_ops);
1803
1804 register_gifconf(PF_INET, inet_gifconf);
1805 register_netdevice_notifier(&ip_netdev_notifier);
1806
1807 rtnl_af_register(&inet_af_ops);
1808
1809 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
1810 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
1811 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
1812}
1813