Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *	IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT)
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   8 *
   9 *	This program is free software; you can redistribute it and/or
  10 *      modify it under the terms of the GNU General Public License
  11 *      as published by the Free Software Foundation; either version
  12 *      2 of the License, or (at your option) any later version.
  13 *
  14 *	Changes:
  15 * Roger Venning <r.venning@telstra.com>:	6to4 support
  16 * Nate Thompson <nate@thebog.net>:		6to4 support
  17 * Fred Templin <fred.l.templin@boeing.com>:	isatap support
  18 */
  19
 
 
  20#include <linux/module.h>
  21#include <linux/capability.h>
  22#include <linux/errno.h>
  23#include <linux/types.h>
  24#include <linux/socket.h>
  25#include <linux/sockios.h>
  26#include <linux/net.h>
  27#include <linux/in6.h>
  28#include <linux/netdevice.h>
  29#include <linux/if_arp.h>
  30#include <linux/icmp.h>
  31#include <linux/slab.h>
  32#include <asm/uaccess.h>
  33#include <linux/init.h>
  34#include <linux/netfilter_ipv4.h>
  35#include <linux/if_ether.h>
  36
  37#include <net/sock.h>
  38#include <net/snmp.h>
  39
  40#include <net/ipv6.h>
  41#include <net/protocol.h>
  42#include <net/transp_v6.h>
  43#include <net/ip6_fib.h>
  44#include <net/ip6_route.h>
  45#include <net/ndisc.h>
  46#include <net/addrconf.h>
  47#include <net/ip.h>
  48#include <net/udp.h>
  49#include <net/icmp.h>
  50#include <net/ipip.h>
  51#include <net/inet_ecn.h>
  52#include <net/xfrm.h>
  53#include <net/dsfield.h>
  54#include <net/net_namespace.h>
  55#include <net/netns/generic.h>
  56
  57/*
  58   This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
  59
  60   For comments look at net/ipv4/ip_gre.c --ANK
  61 */
  62
  63#define HASH_SIZE  16
  64#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
  65
  66static int ipip6_tunnel_init(struct net_device *dev);
  67static void ipip6_tunnel_setup(struct net_device *dev);
  68static void ipip6_dev_free(struct net_device *dev);
  69
  70static int sit_net_id __read_mostly;
  71struct sit_net {
  72	struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
  73	struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
  74	struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
  75	struct ip_tunnel __rcu *tunnels_wc[1];
  76	struct ip_tunnel __rcu **tunnels[4];
  77
  78	struct net_device *fb_tunnel_dev;
  79};
  80
  81/*
  82 * Locking : hash tables are protected by RCU and RTNL
  83 */
  84
  85#define for_each_ip_tunnel_rcu(start) \
  86	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
  87
  88/* often modified stats are per cpu, other are shared (netdev->stats) */
  89struct pcpu_tstats {
  90	unsigned long	rx_packets;
  91	unsigned long	rx_bytes;
  92	unsigned long	tx_packets;
  93	unsigned long	tx_bytes;
 
  94};
  95
  96static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
 
  97{
  98	struct pcpu_tstats sum = { 0 };
  99	int i;
 100
 101	for_each_possible_cpu(i) {
 102		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 103
 104		sum.rx_packets += tstats->rx_packets;
 105		sum.rx_bytes   += tstats->rx_bytes;
 106		sum.tx_packets += tstats->tx_packets;
 107		sum.tx_bytes   += tstats->tx_bytes;
 108	}
 109	dev->stats.rx_packets = sum.rx_packets;
 110	dev->stats.rx_bytes   = sum.rx_bytes;
 111	dev->stats.tx_packets = sum.tx_packets;
 112	dev->stats.tx_bytes   = sum.tx_bytes;
 113	return &dev->stats;
 114}
 
 115/*
 116 * Must be invoked with rcu_read_lock
 117 */
 118static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
 119		struct net_device *dev, __be32 remote, __be32 local)
 120{
 121	unsigned int h0 = HASH(remote);
 122	unsigned int h1 = HASH(local);
 123	struct ip_tunnel *t;
 124	struct sit_net *sitn = net_generic(net, sit_net_id);
 125
 126	for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
 127		if (local == t->parms.iph.saddr &&
 128		    remote == t->parms.iph.daddr &&
 129		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 130		    (t->dev->flags & IFF_UP))
 131			return t;
 132	}
 133	for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
 134		if (remote == t->parms.iph.daddr &&
 135		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 136		    (t->dev->flags & IFF_UP))
 137			return t;
 138	}
 139	for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
 140		if (local == t->parms.iph.saddr &&
 141		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 142		    (t->dev->flags & IFF_UP))
 143			return t;
 144	}
 145	t = rcu_dereference(sitn->tunnels_wc[0]);
 146	if ((t != NULL) && (t->dev->flags & IFF_UP))
 147		return t;
 148	return NULL;
 149}
 150
 151static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
 152		struct ip_tunnel_parm *parms)
 153{
 154	__be32 remote = parms->iph.daddr;
 155	__be32 local = parms->iph.saddr;
 156	unsigned int h = 0;
 157	int prio = 0;
 158
 159	if (remote) {
 160		prio |= 2;
 161		h ^= HASH(remote);
 162	}
 163	if (local) {
 164		prio |= 1;
 165		h ^= HASH(local);
 166	}
 167	return &sitn->tunnels[prio][h];
 168}
 169
 170static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
 171		struct ip_tunnel *t)
 172{
 173	return __ipip6_bucket(sitn, &t->parms);
 174}
 175
 176static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
 177{
 178	struct ip_tunnel __rcu **tp;
 179	struct ip_tunnel *iter;
 180
 181	for (tp = ipip6_bucket(sitn, t);
 182	     (iter = rtnl_dereference(*tp)) != NULL;
 183	     tp = &iter->next) {
 184		if (t == iter) {
 185			rcu_assign_pointer(*tp, t->next);
 186			break;
 187		}
 188	}
 189}
 190
 191static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
 192{
 193	struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
 194
 195	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
 196	rcu_assign_pointer(*tp, t);
 197}
 198
 199static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
 200{
 201#ifdef CONFIG_IPV6_SIT_6RD
 202	struct ip_tunnel *t = netdev_priv(dev);
 203
 204	if (t->dev == sitn->fb_tunnel_dev) {
 205		ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
 206		t->ip6rd.relay_prefix = 0;
 207		t->ip6rd.prefixlen = 16;
 208		t->ip6rd.relay_prefixlen = 0;
 209	} else {
 210		struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev);
 211		memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
 212	}
 213#endif
 214}
 215
 216static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
 217		struct ip_tunnel_parm *parms, int create)
 218{
 219	__be32 remote = parms->iph.daddr;
 220	__be32 local = parms->iph.saddr;
 221	struct ip_tunnel *t, *nt;
 222	struct ip_tunnel __rcu **tp;
 223	struct net_device *dev;
 224	char name[IFNAMSIZ];
 225	struct sit_net *sitn = net_generic(net, sit_net_id);
 226
 227	for (tp = __ipip6_bucket(sitn, parms);
 228	    (t = rtnl_dereference(*tp)) != NULL;
 229	     tp = &t->next) {
 230		if (local == t->parms.iph.saddr &&
 231		    remote == t->parms.iph.daddr &&
 232		    parms->link == t->parms.link) {
 233			if (create)
 234				return NULL;
 235			else
 236				return t;
 237		}
 238	}
 239	if (!create)
 240		goto failed;
 241
 242	if (parms->name[0])
 243		strlcpy(name, parms->name, IFNAMSIZ);
 244	else
 245		strcpy(name, "sit%d");
 246
 247	dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
 248	if (dev == NULL)
 249		return NULL;
 250
 251	dev_net_set(dev, net);
 252
 253	nt = netdev_priv(dev);
 254
 255	nt->parms = *parms;
 256	if (ipip6_tunnel_init(dev) < 0)
 257		goto failed_free;
 258	ipip6_tunnel_clone_6rd(dev, sitn);
 259
 260	if (parms->i_flags & SIT_ISATAP)
 261		dev->priv_flags |= IFF_ISATAP;
 262
 263	if (register_netdevice(dev) < 0)
 264		goto failed_free;
 265
 
 
 266	dev_hold(dev);
 267
 268	ipip6_tunnel_link(sitn, nt);
 269	return nt;
 270
 271failed_free:
 272	ipip6_dev_free(dev);
 273failed:
 274	return NULL;
 275}
 276
 277#define for_each_prl_rcu(start)			\
 278	for (prl = rcu_dereference(start);	\
 279	     prl;				\
 280	     prl = rcu_dereference(prl->next))
 281
 282static struct ip_tunnel_prl_entry *
 283__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
 284{
 285	struct ip_tunnel_prl_entry *prl;
 286
 287	for_each_prl_rcu(t->prl)
 288		if (prl->addr == addr)
 289			break;
 290	return prl;
 291
 292}
 293
 294static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
 295				struct ip_tunnel_prl __user *a)
 296{
 297	struct ip_tunnel_prl kprl, *kp;
 298	struct ip_tunnel_prl_entry *prl;
 299	unsigned int cmax, c = 0, ca, len;
 300	int ret = 0;
 301
 302	if (copy_from_user(&kprl, a, sizeof(kprl)))
 303		return -EFAULT;
 304	cmax = kprl.datalen / sizeof(kprl);
 305	if (cmax > 1 && kprl.addr != htonl(INADDR_ANY))
 306		cmax = 1;
 307
 308	/* For simple GET or for root users,
 309	 * we try harder to allocate.
 310	 */
 311	kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
 312		kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
 313		NULL;
 314
 315	rcu_read_lock();
 316
 317	ca = t->prl_count < cmax ? t->prl_count : cmax;
 318
 319	if (!kp) {
 320		/* We don't try hard to allocate much memory for
 321		 * non-root users.
 322		 * For root users, retry allocating enough memory for
 323		 * the answer.
 324		 */
 325		kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
 326		if (!kp) {
 327			ret = -ENOMEM;
 328			goto out;
 329		}
 330	}
 331
 332	c = 0;
 333	for_each_prl_rcu(t->prl) {
 334		if (c >= cmax)
 335			break;
 336		if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
 337			continue;
 338		kp[c].addr = prl->addr;
 339		kp[c].flags = prl->flags;
 340		c++;
 341		if (kprl.addr != htonl(INADDR_ANY))
 342			break;
 343	}
 344out:
 345	rcu_read_unlock();
 346
 347	len = sizeof(*kp) * c;
 348	ret = 0;
 349	if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen))
 350		ret = -EFAULT;
 351
 352	kfree(kp);
 353
 354	return ret;
 355}
 356
 357static int
 358ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
 359{
 360	struct ip_tunnel_prl_entry *p;
 361	int err = 0;
 362
 363	if (a->addr == htonl(INADDR_ANY))
 364		return -EINVAL;
 365
 366	ASSERT_RTNL();
 367
 368	for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
 369		if (p->addr == a->addr) {
 370			if (chg) {
 371				p->flags = a->flags;
 372				goto out;
 373			}
 374			err = -EEXIST;
 375			goto out;
 376		}
 377	}
 378
 379	if (chg) {
 380		err = -ENXIO;
 381		goto out;
 382	}
 383
 384	p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
 385	if (!p) {
 386		err = -ENOBUFS;
 387		goto out;
 388	}
 389
 390	p->next = t->prl;
 391	p->addr = a->addr;
 392	p->flags = a->flags;
 393	t->prl_count++;
 394	rcu_assign_pointer(t->prl, p);
 395out:
 396	return err;
 397}
 398
 399static void prl_list_destroy_rcu(struct rcu_head *head)
 400{
 401	struct ip_tunnel_prl_entry *p, *n;
 402
 403	p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
 404	do {
 405		n = rcu_dereference_protected(p->next, 1);
 406		kfree(p);
 407		p = n;
 408	} while (p);
 409}
 410
 411static int
 412ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
 413{
 414	struct ip_tunnel_prl_entry *x;
 415	struct ip_tunnel_prl_entry __rcu **p;
 416	int err = 0;
 417
 418	ASSERT_RTNL();
 419
 420	if (a && a->addr != htonl(INADDR_ANY)) {
 421		for (p = &t->prl;
 422		     (x = rtnl_dereference(*p)) != NULL;
 423		     p = &x->next) {
 424			if (x->addr == a->addr) {
 425				*p = x->next;
 426				kfree_rcu(x, rcu_head);
 427				t->prl_count--;
 428				goto out;
 429			}
 430		}
 431		err = -ENXIO;
 432	} else {
 433		x = rtnl_dereference(t->prl);
 434		if (x) {
 435			t->prl_count = 0;
 436			call_rcu(&x->rcu_head, prl_list_destroy_rcu);
 437			t->prl = NULL;
 438		}
 439	}
 440out:
 441	return err;
 442}
 443
 444static int
 445isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
 446{
 447	struct ip_tunnel_prl_entry *p;
 448	int ok = 1;
 449
 450	rcu_read_lock();
 451	p = __ipip6_tunnel_locate_prl(t, iph->saddr);
 452	if (p) {
 453		if (p->flags & PRL_DEFAULT)
 454			skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
 455		else
 456			skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
 457	} else {
 458		const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
 459
 460		if (ipv6_addr_is_isatap(addr6) &&
 461		    (addr6->s6_addr32[3] == iph->saddr) &&
 462		    ipv6_chk_prefix(addr6, t->dev))
 463			skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
 464		else
 465			ok = 0;
 466	}
 467	rcu_read_unlock();
 468	return ok;
 469}
 470
 471static void ipip6_tunnel_uninit(struct net_device *dev)
 472{
 473	struct net *net = dev_net(dev);
 474	struct sit_net *sitn = net_generic(net, sit_net_id);
 475
 476	if (dev == sitn->fb_tunnel_dev) {
 477		rcu_assign_pointer(sitn->tunnels_wc[0], NULL);
 478	} else {
 479		ipip6_tunnel_unlink(sitn, netdev_priv(dev));
 480		ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
 481	}
 482	dev_put(dev);
 483}
 484
 485
 486static int ipip6_err(struct sk_buff *skb, u32 info)
 487{
 488
 489/* All the routers (except for Linux) return only
 490   8 bytes of packet payload. It means, that precise relaying of
 491   ICMP in the real Internet is absolutely infeasible.
 492 */
 493	const struct iphdr *iph = (const struct iphdr *)skb->data;
 494	const int type = icmp_hdr(skb)->type;
 495	const int code = icmp_hdr(skb)->code;
 496	struct ip_tunnel *t;
 497	int err;
 498
 499	switch (type) {
 500	default:
 501	case ICMP_PARAMETERPROB:
 502		return 0;
 503
 504	case ICMP_DEST_UNREACH:
 505		switch (code) {
 506		case ICMP_SR_FAILED:
 507		case ICMP_PORT_UNREACH:
 508			/* Impossible event. */
 509			return 0;
 510		case ICMP_FRAG_NEEDED:
 511			/* Soft state for pmtu is maintained by IP core. */
 512			return 0;
 513		default:
 514			/* All others are translated to HOST_UNREACH.
 515			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 516			   I believe they are just ether pollution. --ANK
 517			 */
 518			break;
 519		}
 520		break;
 521	case ICMP_TIME_EXCEEDED:
 522		if (code != ICMP_EXC_TTL)
 523			return 0;
 524		break;
 525	}
 526
 527	err = -ENOENT;
 528
 529	rcu_read_lock();
 530	t = ipip6_tunnel_lookup(dev_net(skb->dev),
 531				skb->dev,
 532				iph->daddr,
 533				iph->saddr);
 534	if (t == NULL || t->parms.iph.daddr == 0)
 535		goto out;
 536
 537	err = 0;
 538	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 539		goto out;
 540
 541	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 542		t->err_count++;
 543	else
 544		t->err_count = 1;
 545	t->err_time = jiffies;
 546out:
 547	rcu_read_unlock();
 548	return err;
 549}
 550
 551static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
 552{
 553	if (INET_ECN_is_ce(iph->tos))
 554		IP6_ECN_set_ce(ipv6_hdr(skb));
 555}
 556
 557static int ipip6_rcv(struct sk_buff *skb)
 558{
 559	const struct iphdr *iph;
 560	struct ip_tunnel *tunnel;
 561
 562	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
 563		goto out;
 564
 565	iph = ip_hdr(skb);
 566
 567	rcu_read_lock();
 568	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
 569				     iph->saddr, iph->daddr);
 570	if (tunnel != NULL) {
 571		struct pcpu_tstats *tstats;
 572
 573		secpath_reset(skb);
 574		skb->mac_header = skb->network_header;
 575		skb_reset_network_header(skb);
 576		IPCB(skb)->flags = 0;
 577		skb->protocol = htons(ETH_P_IPV6);
 578		skb->pkt_type = PACKET_HOST;
 579
 580		if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
 581		    !isatap_chksrc(skb, iph, tunnel)) {
 582			tunnel->dev->stats.rx_errors++;
 583			rcu_read_unlock();
 584			kfree_skb(skb);
 585			return 0;
 586		}
 587
 588		tstats = this_cpu_ptr(tunnel->dev->tstats);
 589		tstats->rx_packets++;
 590		tstats->rx_bytes += skb->len;
 591
 592		__skb_tunnel_rx(skb, tunnel->dev);
 593
 594		ipip6_ecn_decapsulate(iph, skb);
 595
 596		netif_rx(skb);
 597
 598		rcu_read_unlock();
 599		return 0;
 600	}
 601
 602	/* no tunnel matched,  let upstream know, ipsec may handle it */
 603	rcu_read_unlock();
 604	return 1;
 605out:
 606	kfree_skb(skb);
 607	return 0;
 608}
 609
 610/*
 611 * Returns the embedded IPv4 address if the IPv6 address
 612 * comes from 6rd / 6to4 (RFC 3056) addr space.
 613 */
 614static inline
 615__be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
 616{
 617	__be32 dst = 0;
 618
 619#ifdef CONFIG_IPV6_SIT_6RD
 620	if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
 621			      tunnel->ip6rd.prefixlen)) {
 622		unsigned int pbw0, pbi0;
 623		int pbi1;
 624		u32 d;
 625
 626		pbw0 = tunnel->ip6rd.prefixlen >> 5;
 627		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
 628
 629		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
 630		    tunnel->ip6rd.relay_prefixlen;
 631
 632		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
 633		if (pbi1 > 0)
 634			d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
 635			     (32 - pbi1);
 636
 637		dst = tunnel->ip6rd.relay_prefix | htonl(d);
 638	}
 639#else
 640	if (v6dst->s6_addr16[0] == htons(0x2002)) {
 641		/* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
 642		memcpy(&dst, &v6dst->s6_addr16[1], 4);
 643	}
 644#endif
 645	return dst;
 646}
 647
 648/*
 649 *	This function assumes it is being called from dev_queue_xmit()
 650 *	and that skb is filled properly by that function.
 651 */
 652
 653static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
 654				     struct net_device *dev)
 655{
 656	struct ip_tunnel *tunnel = netdev_priv(dev);
 657	struct pcpu_tstats *tstats;
 658	const struct iphdr  *tiph = &tunnel->parms.iph;
 659	const struct ipv6hdr *iph6 = ipv6_hdr(skb);
 660	u8     tos = tunnel->parms.iph.tos;
 661	__be16 df = tiph->frag_off;
 662	struct rtable *rt;     			/* Route to the other host */
 663	struct net_device *tdev;		/* Device to other host */
 664	struct iphdr  *iph;			/* Our new IP header */
 665	unsigned int max_headroom;		/* The extra header space needed */
 666	__be32 dst = tiph->daddr;
 667	struct flowi4 fl4;
 668	int    mtu;
 669	const struct in6_addr *addr6;
 670	int addr_type;
 671
 672	if (skb->protocol != htons(ETH_P_IPV6))
 673		goto tx_error;
 674
 675	if (tos == 1)
 676		tos = ipv6_get_dsfield(iph6);
 677
 678	/* ISATAP (RFC4214) - must come before 6to4 */
 679	if (dev->priv_flags & IFF_ISATAP) {
 680		struct neighbour *neigh = NULL;
 
 681
 682		if (skb_dst(skb))
 683			neigh = dst_get_neighbour(skb_dst(skb));
 684
 685		if (neigh == NULL) {
 686			if (net_ratelimit())
 687				printk(KERN_DEBUG "sit: nexthop == NULL\n");
 688			goto tx_error;
 689		}
 690
 691		addr6 = (const struct in6_addr*)&neigh->primary_key;
 692		addr_type = ipv6_addr_type(addr6);
 693
 694		if ((addr_type & IPV6_ADDR_UNICAST) &&
 695		     ipv6_addr_is_isatap(addr6))
 696			dst = addr6->s6_addr32[3];
 697		else
 
 
 
 
 698			goto tx_error;
 699	}
 700
 701	if (!dst)
 702		dst = try_6rd(&iph6->daddr, tunnel);
 703
 704	if (!dst) {
 705		struct neighbour *neigh = NULL;
 
 706
 707		if (skb_dst(skb))
 708			neigh = dst_get_neighbour(skb_dst(skb));
 709
 710		if (neigh == NULL) {
 711			if (net_ratelimit())
 712				printk(KERN_DEBUG "sit: nexthop == NULL\n");
 713			goto tx_error;
 714		}
 715
 716		addr6 = (const struct in6_addr*)&neigh->primary_key;
 717		addr_type = ipv6_addr_type(addr6);
 718
 719		if (addr_type == IPV6_ADDR_ANY) {
 720			addr6 = &ipv6_hdr(skb)->daddr;
 721			addr_type = ipv6_addr_type(addr6);
 722		}
 723
 724		if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
 725			goto tx_error_icmp;
 
 
 726
 727		dst = addr6->s6_addr32[3];
 
 
 728	}
 729
 730	rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
 731				   dst, tiph->saddr,
 732				   0, 0,
 733				   IPPROTO_IPV6, RT_TOS(tos),
 734				   tunnel->parms.link);
 735	if (IS_ERR(rt)) {
 736		dev->stats.tx_carrier_errors++;
 737		goto tx_error_icmp;
 738	}
 739	if (rt->rt_type != RTN_UNICAST) {
 740		ip_rt_put(rt);
 741		dev->stats.tx_carrier_errors++;
 742		goto tx_error_icmp;
 743	}
 744	tdev = rt->dst.dev;
 745
 746	if (tdev == dev) {
 747		ip_rt_put(rt);
 748		dev->stats.collisions++;
 749		goto tx_error;
 750	}
 751
 752	if (df) {
 753		mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
 754
 755		if (mtu < 68) {
 756			dev->stats.collisions++;
 757			ip_rt_put(rt);
 758			goto tx_error;
 759		}
 760
 761		if (mtu < IPV6_MIN_MTU) {
 762			mtu = IPV6_MIN_MTU;
 763			df = 0;
 764		}
 765
 766		if (tunnel->parms.iph.daddr && skb_dst(skb))
 767			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 768
 769		if (skb->len > mtu) {
 770			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 771			ip_rt_put(rt);
 772			goto tx_error;
 773		}
 774	}
 775
 776	if (tunnel->err_count > 0) {
 777		if (time_before(jiffies,
 778				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
 779			tunnel->err_count--;
 780			dst_link_failure(skb);
 781		} else
 782			tunnel->err_count = 0;
 783	}
 784
 785	/*
 786	 * Okay, now see if we can stuff it in the buffer as-is.
 787	 */
 788	max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);
 789
 790	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
 791	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
 792		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
 793		if (!new_skb) {
 794			ip_rt_put(rt);
 795			dev->stats.tx_dropped++;
 796			dev_kfree_skb(skb);
 797			return NETDEV_TX_OK;
 798		}
 799		if (skb->sk)
 800			skb_set_owner_w(new_skb, skb->sk);
 801		dev_kfree_skb(skb);
 802		skb = new_skb;
 803		iph6 = ipv6_hdr(skb);
 804	}
 805
 806	skb->transport_header = skb->network_header;
 807	skb_push(skb, sizeof(struct iphdr));
 808	skb_reset_network_header(skb);
 809	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 810	IPCB(skb)->flags = 0;
 811	skb_dst_drop(skb);
 812	skb_dst_set(skb, &rt->dst);
 813
 814	/*
 815	 *	Push down and install the IPIP header.
 816	 */
 817
 818	iph 			=	ip_hdr(skb);
 819	iph->version		=	4;
 820	iph->ihl		=	sizeof(struct iphdr)>>2;
 821	iph->frag_off		=	df;
 822	iph->protocol		=	IPPROTO_IPV6;
 823	iph->tos		=	INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 824	iph->daddr		=	fl4.daddr;
 825	iph->saddr		=	fl4.saddr;
 826
 827	if ((iph->ttl = tiph->ttl) == 0)
 828		iph->ttl	=	iph6->hop_limit;
 829
 830	nf_reset(skb);
 831	tstats = this_cpu_ptr(dev->tstats);
 832	__IPTUNNEL_XMIT(tstats, &dev->stats);
 833	return NETDEV_TX_OK;
 834
 835tx_error_icmp:
 836	dst_link_failure(skb);
 837tx_error:
 838	dev->stats.tx_errors++;
 839	dev_kfree_skb(skb);
 840	return NETDEV_TX_OK;
 841}
 842
 843static void ipip6_tunnel_bind_dev(struct net_device *dev)
 844{
 845	struct net_device *tdev = NULL;
 846	struct ip_tunnel *tunnel;
 847	const struct iphdr *iph;
 848	struct flowi4 fl4;
 849
 850	tunnel = netdev_priv(dev);
 851	iph = &tunnel->parms.iph;
 852
 853	if (iph->daddr) {
 854		struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
 855							  iph->daddr, iph->saddr,
 856							  0, 0,
 857							  IPPROTO_IPV6,
 858							  RT_TOS(iph->tos),
 859							  tunnel->parms.link);
 860
 861		if (!IS_ERR(rt)) {
 862			tdev = rt->dst.dev;
 863			ip_rt_put(rt);
 864		}
 865		dev->flags |= IFF_POINTOPOINT;
 866	}
 867
 868	if (!tdev && tunnel->parms.link)
 869		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
 870
 871	if (tdev) {
 872		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
 873		dev->mtu = tdev->mtu - sizeof(struct iphdr);
 874		if (dev->mtu < IPV6_MIN_MTU)
 875			dev->mtu = IPV6_MIN_MTU;
 876	}
 877	dev->iflink = tunnel->parms.link;
 878}
 879
 880static int
 881ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
 882{
 883	int err = 0;
 884	struct ip_tunnel_parm p;
 885	struct ip_tunnel_prl prl;
 886	struct ip_tunnel *t;
 887	struct net *net = dev_net(dev);
 888	struct sit_net *sitn = net_generic(net, sit_net_id);
 889#ifdef CONFIG_IPV6_SIT_6RD
 890	struct ip_tunnel_6rd ip6rd;
 891#endif
 892
 893	switch (cmd) {
 894	case SIOCGETTUNNEL:
 895#ifdef CONFIG_IPV6_SIT_6RD
 896	case SIOCGET6RD:
 897#endif
 898		t = NULL;
 899		if (dev == sitn->fb_tunnel_dev) {
 900			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
 901				err = -EFAULT;
 902				break;
 903			}
 904			t = ipip6_tunnel_locate(net, &p, 0);
 905		}
 906		if (t == NULL)
 907			t = netdev_priv(dev);
 908
 909		err = -EFAULT;
 910		if (cmd == SIOCGETTUNNEL) {
 911			memcpy(&p, &t->parms, sizeof(p));
 912			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
 913					 sizeof(p)))
 914				goto done;
 915#ifdef CONFIG_IPV6_SIT_6RD
 916		} else {
 917			ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
 918			ip6rd.relay_prefix = t->ip6rd.relay_prefix;
 919			ip6rd.prefixlen = t->ip6rd.prefixlen;
 920			ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
 921			if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd,
 922					 sizeof(ip6rd)))
 923				goto done;
 924#endif
 925		}
 926		err = 0;
 927		break;
 928
 929	case SIOCADDTUNNEL:
 930	case SIOCCHGTUNNEL:
 931		err = -EPERM;
 932		if (!capable(CAP_NET_ADMIN))
 933			goto done;
 934
 935		err = -EFAULT;
 936		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 937			goto done;
 938
 939		err = -EINVAL;
 940		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
 941		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
 942			goto done;
 943		if (p.iph.ttl)
 944			p.iph.frag_off |= htons(IP_DF);
 945
 946		t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
 947
 948		if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
 949			if (t != NULL) {
 950				if (t->dev != dev) {
 951					err = -EEXIST;
 952					break;
 953				}
 954			} else {
 955				if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
 956				    (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
 957					err = -EINVAL;
 958					break;
 959				}
 960				t = netdev_priv(dev);
 961				ipip6_tunnel_unlink(sitn, t);
 962				synchronize_net();
 963				t->parms.iph.saddr = p.iph.saddr;
 964				t->parms.iph.daddr = p.iph.daddr;
 965				memcpy(dev->dev_addr, &p.iph.saddr, 4);
 966				memcpy(dev->broadcast, &p.iph.daddr, 4);
 967				ipip6_tunnel_link(sitn, t);
 968				netdev_state_change(dev);
 969			}
 970		}
 971
 972		if (t) {
 973			err = 0;
 974			if (cmd == SIOCCHGTUNNEL) {
 975				t->parms.iph.ttl = p.iph.ttl;
 976				t->parms.iph.tos = p.iph.tos;
 977				if (t->parms.link != p.link) {
 978					t->parms.link = p.link;
 979					ipip6_tunnel_bind_dev(dev);
 980					netdev_state_change(dev);
 981				}
 982			}
 983			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
 984				err = -EFAULT;
 985		} else
 986			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
 987		break;
 988
 989	case SIOCDELTUNNEL:
 990		err = -EPERM;
 991		if (!capable(CAP_NET_ADMIN))
 992			goto done;
 993
 994		if (dev == sitn->fb_tunnel_dev) {
 995			err = -EFAULT;
 996			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 997				goto done;
 998			err = -ENOENT;
 999			if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL)
1000				goto done;
1001			err = -EPERM;
1002			if (t == netdev_priv(sitn->fb_tunnel_dev))
1003				goto done;
1004			dev = t->dev;
1005		}
1006		unregister_netdevice(dev);
1007		err = 0;
1008		break;
1009
1010	case SIOCGETPRL:
1011		err = -EINVAL;
1012		if (dev == sitn->fb_tunnel_dev)
1013			goto done;
1014		err = -ENOENT;
1015		if (!(t = netdev_priv(dev)))
1016			goto done;
1017		err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
1018		break;
1019
1020	case SIOCADDPRL:
1021	case SIOCDELPRL:
1022	case SIOCCHGPRL:
1023		err = -EPERM;
1024		if (!capable(CAP_NET_ADMIN))
1025			goto done;
1026		err = -EINVAL;
1027		if (dev == sitn->fb_tunnel_dev)
1028			goto done;
1029		err = -EFAULT;
1030		if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl)))
1031			goto done;
1032		err = -ENOENT;
1033		if (!(t = netdev_priv(dev)))
1034			goto done;
1035
1036		switch (cmd) {
1037		case SIOCDELPRL:
1038			err = ipip6_tunnel_del_prl(t, &prl);
1039			break;
1040		case SIOCADDPRL:
1041		case SIOCCHGPRL:
1042			err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
1043			break;
1044		}
1045		netdev_state_change(dev);
1046		break;
1047
1048#ifdef CONFIG_IPV6_SIT_6RD
1049	case SIOCADD6RD:
1050	case SIOCCHG6RD:
1051	case SIOCDEL6RD:
1052		err = -EPERM;
1053		if (!capable(CAP_NET_ADMIN))
1054			goto done;
1055
1056		err = -EFAULT;
1057		if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data,
1058				   sizeof(ip6rd)))
1059			goto done;
1060
1061		t = netdev_priv(dev);
1062
1063		if (cmd != SIOCDEL6RD) {
1064			struct in6_addr prefix;
1065			__be32 relay_prefix;
1066
1067			err = -EINVAL;
1068			if (ip6rd.relay_prefixlen > 32 ||
1069			    ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
1070				goto done;
1071
1072			ipv6_addr_prefix(&prefix, &ip6rd.prefix,
1073					 ip6rd.prefixlen);
1074			if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
1075				goto done;
1076			if (ip6rd.relay_prefixlen)
1077				relay_prefix = ip6rd.relay_prefix &
1078					       htonl(0xffffffffUL <<
1079						     (32 - ip6rd.relay_prefixlen));
1080			else
1081				relay_prefix = 0;
1082			if (relay_prefix != ip6rd.relay_prefix)
1083				goto done;
1084
1085			ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
1086			t->ip6rd.relay_prefix = relay_prefix;
1087			t->ip6rd.prefixlen = ip6rd.prefixlen;
1088			t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
1089		} else
1090			ipip6_tunnel_clone_6rd(dev, sitn);
1091
1092		err = 0;
1093		break;
1094#endif
1095
1096	default:
1097		err = -EINVAL;
1098	}
1099
1100done:
1101	return err;
1102}
1103
1104static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1105{
1106	if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
1107		return -EINVAL;
1108	dev->mtu = new_mtu;
1109	return 0;
1110}
1111
1112static const struct net_device_ops ipip6_netdev_ops = {
1113	.ndo_uninit	= ipip6_tunnel_uninit,
1114	.ndo_start_xmit	= ipip6_tunnel_xmit,
1115	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
1116	.ndo_change_mtu	= ipip6_tunnel_change_mtu,
1117	.ndo_get_stats	= ipip6_get_stats,
1118};
1119
1120static void ipip6_dev_free(struct net_device *dev)
1121{
1122	free_percpu(dev->tstats);
1123	free_netdev(dev);
1124}
1125
1126static void ipip6_tunnel_setup(struct net_device *dev)
1127{
1128	dev->netdev_ops		= &ipip6_netdev_ops;
1129	dev->destructor 	= ipip6_dev_free;
1130
1131	dev->type		= ARPHRD_SIT;
1132	dev->hard_header_len 	= LL_MAX_HEADER + sizeof(struct iphdr);
1133	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr);
1134	dev->flags		= IFF_NOARP;
1135	dev->priv_flags	       &= ~IFF_XMIT_DST_RELEASE;
1136	dev->iflink		= 0;
1137	dev->addr_len		= 4;
1138	dev->features		|= NETIF_F_NETNS_LOCAL;
1139	dev->features		|= NETIF_F_LLTX;
1140}
1141
1142static int ipip6_tunnel_init(struct net_device *dev)
1143{
1144	struct ip_tunnel *tunnel = netdev_priv(dev);
1145
1146	tunnel->dev = dev;
1147	strcpy(tunnel->parms.name, dev->name);
1148
1149	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1150	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1151
1152	ipip6_tunnel_bind_dev(dev);
1153	dev->tstats = alloc_percpu(struct pcpu_tstats);
1154	if (!dev->tstats)
1155		return -ENOMEM;
1156
1157	return 0;
1158}
1159
1160static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1161{
1162	struct ip_tunnel *tunnel = netdev_priv(dev);
1163	struct iphdr *iph = &tunnel->parms.iph;
1164	struct net *net = dev_net(dev);
1165	struct sit_net *sitn = net_generic(net, sit_net_id);
1166
1167	tunnel->dev = dev;
1168	strcpy(tunnel->parms.name, dev->name);
1169
1170	iph->version		= 4;
1171	iph->protocol		= IPPROTO_IPV6;
1172	iph->ihl		= 5;
1173	iph->ttl		= 64;
1174
1175	dev->tstats = alloc_percpu(struct pcpu_tstats);
1176	if (!dev->tstats)
1177		return -ENOMEM;
1178	dev_hold(dev);
1179	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
1180	return 0;
1181}
1182
1183static struct xfrm_tunnel sit_handler __read_mostly = {
1184	.handler	=	ipip6_rcv,
1185	.err_handler	=	ipip6_err,
1186	.priority	=	1,
1187};
1188
1189static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1190{
1191	int prio;
1192
1193	for (prio = 1; prio < 4; prio++) {
1194		int h;
1195		for (h = 0; h < HASH_SIZE; h++) {
1196			struct ip_tunnel *t;
1197
1198			t = rtnl_dereference(sitn->tunnels[prio][h]);
1199			while (t != NULL) {
1200				unregister_netdevice_queue(t->dev, head);
1201				t = rtnl_dereference(t->next);
1202			}
1203		}
1204	}
1205}
1206
1207static int __net_init sit_init_net(struct net *net)
1208{
1209	struct sit_net *sitn = net_generic(net, sit_net_id);
 
1210	int err;
1211
1212	sitn->tunnels[0] = sitn->tunnels_wc;
1213	sitn->tunnels[1] = sitn->tunnels_l;
1214	sitn->tunnels[2] = sitn->tunnels_r;
1215	sitn->tunnels[3] = sitn->tunnels_r_l;
1216
1217	sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
1218					   ipip6_tunnel_setup);
1219	if (!sitn->fb_tunnel_dev) {
1220		err = -ENOMEM;
1221		goto err_alloc_dev;
1222	}
1223	dev_net_set(sitn->fb_tunnel_dev, net);
1224
1225	err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
1226	if (err)
1227		goto err_dev_free;
1228
1229	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
1230
1231	if ((err = register_netdev(sitn->fb_tunnel_dev)))
1232		goto err_reg_dev;
1233
 
 
 
1234	return 0;
1235
1236err_reg_dev:
1237	dev_put(sitn->fb_tunnel_dev);
1238err_dev_free:
1239	ipip6_dev_free(sitn->fb_tunnel_dev);
1240err_alloc_dev:
1241	return err;
1242}
1243
1244static void __net_exit sit_exit_net(struct net *net)
1245{
1246	struct sit_net *sitn = net_generic(net, sit_net_id);
1247	LIST_HEAD(list);
1248
1249	rtnl_lock();
1250	sit_destroy_tunnels(sitn, &list);
1251	unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1252	unregister_netdevice_many(&list);
1253	rtnl_unlock();
1254}
1255
1256static struct pernet_operations sit_net_ops = {
1257	.init = sit_init_net,
1258	.exit = sit_exit_net,
1259	.id   = &sit_net_id,
1260	.size = sizeof(struct sit_net),
1261};
1262
1263static void __exit sit_cleanup(void)
1264{
1265	xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1266
1267	unregister_pernet_device(&sit_net_ops);
1268	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1269}
1270
1271static int __init sit_init(void)
1272{
1273	int err;
1274
1275	printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
1276
1277	err = register_pernet_device(&sit_net_ops);
1278	if (err < 0)
1279		return err;
1280	err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1281	if (err < 0) {
1282		unregister_pernet_device(&sit_net_ops);
1283		printk(KERN_INFO "sit init: Can't add protocol\n");
1284	}
1285	return err;
1286}
1287
1288module_init(sit_init);
1289module_exit(sit_cleanup);
1290MODULE_LICENSE("GPL");
1291MODULE_ALIAS_NETDEV("sit0");
v3.5.6
   1/*
   2 *	IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT)
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
   8 *
   9 *	This program is free software; you can redistribute it and/or
  10 *      modify it under the terms of the GNU General Public License
  11 *      as published by the Free Software Foundation; either version
  12 *      2 of the License, or (at your option) any later version.
  13 *
  14 *	Changes:
  15 * Roger Venning <r.venning@telstra.com>:	6to4 support
  16 * Nate Thompson <nate@thebog.net>:		6to4 support
  17 * Fred Templin <fred.l.templin@boeing.com>:	isatap support
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/module.h>
  23#include <linux/capability.h>
  24#include <linux/errno.h>
  25#include <linux/types.h>
  26#include <linux/socket.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/in6.h>
  30#include <linux/netdevice.h>
  31#include <linux/if_arp.h>
  32#include <linux/icmp.h>
  33#include <linux/slab.h>
  34#include <asm/uaccess.h>
  35#include <linux/init.h>
  36#include <linux/netfilter_ipv4.h>
  37#include <linux/if_ether.h>
  38
  39#include <net/sock.h>
  40#include <net/snmp.h>
  41
  42#include <net/ipv6.h>
  43#include <net/protocol.h>
  44#include <net/transp_v6.h>
  45#include <net/ip6_fib.h>
  46#include <net/ip6_route.h>
  47#include <net/ndisc.h>
  48#include <net/addrconf.h>
  49#include <net/ip.h>
  50#include <net/udp.h>
  51#include <net/icmp.h>
  52#include <net/ipip.h>
  53#include <net/inet_ecn.h>
  54#include <net/xfrm.h>
  55#include <net/dsfield.h>
  56#include <net/net_namespace.h>
  57#include <net/netns/generic.h>
  58
  59/*
  60   This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
  61
  62   For comments look at net/ipv4/ip_gre.c --ANK
  63 */
  64
  65#define HASH_SIZE  16
  66#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
  67
  68static int ipip6_tunnel_init(struct net_device *dev);
  69static void ipip6_tunnel_setup(struct net_device *dev);
  70static void ipip6_dev_free(struct net_device *dev);
  71
  72static int sit_net_id __read_mostly;
  73struct sit_net {
  74	struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
  75	struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
  76	struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
  77	struct ip_tunnel __rcu *tunnels_wc[1];
  78	struct ip_tunnel __rcu **tunnels[4];
  79
  80	struct net_device *fb_tunnel_dev;
  81};
  82
  83/*
  84 * Locking : hash tables are protected by RCU and RTNL
  85 */
  86
  87#define for_each_ip_tunnel_rcu(start) \
  88	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
  89
  90/* often modified stats are per cpu, other are shared (netdev->stats) */
  91struct pcpu_tstats {
  92	u64	rx_packets;
  93	u64	rx_bytes;
  94	u64	tx_packets;
  95	u64	tx_bytes;
  96	struct u64_stats_sync	syncp;
  97};
  98
  99static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
 100						   struct rtnl_link_stats64 *tot)
 101{
 
 102	int i;
 103
 104	for_each_possible_cpu(i) {
 105		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
 106		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
 107		unsigned int start;
 108
 109		do {
 110			start = u64_stats_fetch_begin_bh(&tstats->syncp);
 111			rx_packets = tstats->rx_packets;
 112			tx_packets = tstats->tx_packets;
 113			rx_bytes = tstats->rx_bytes;
 114			tx_bytes = tstats->tx_bytes;
 115		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
 116
 117		tot->rx_packets += rx_packets;
 118		tot->tx_packets += tx_packets;
 119		tot->rx_bytes   += rx_bytes;
 120		tot->tx_bytes   += tx_bytes;
 121	}
 122
 123	tot->rx_errors = dev->stats.rx_errors;
 124	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
 125	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
 126	tot->tx_dropped = dev->stats.tx_dropped;
 127	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
 128	tot->tx_errors = dev->stats.tx_errors;
 129
 130	return tot;
 
 
 
 
 
 
 
 
 
 131}
 132
 133/*
 134 * Must be invoked with rcu_read_lock
 135 */
 136static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
 137		struct net_device *dev, __be32 remote, __be32 local)
 138{
 139	unsigned int h0 = HASH(remote);
 140	unsigned int h1 = HASH(local);
 141	struct ip_tunnel *t;
 142	struct sit_net *sitn = net_generic(net, sit_net_id);
 143
 144	for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
 145		if (local == t->parms.iph.saddr &&
 146		    remote == t->parms.iph.daddr &&
 147		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 148		    (t->dev->flags & IFF_UP))
 149			return t;
 150	}
 151	for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
 152		if (remote == t->parms.iph.daddr &&
 153		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 154		    (t->dev->flags & IFF_UP))
 155			return t;
 156	}
 157	for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
 158		if (local == t->parms.iph.saddr &&
 159		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 160		    (t->dev->flags & IFF_UP))
 161			return t;
 162	}
 163	t = rcu_dereference(sitn->tunnels_wc[0]);
 164	if ((t != NULL) && (t->dev->flags & IFF_UP))
 165		return t;
 166	return NULL;
 167}
 168
 169static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
 170		struct ip_tunnel_parm *parms)
 171{
 172	__be32 remote = parms->iph.daddr;
 173	__be32 local = parms->iph.saddr;
 174	unsigned int h = 0;
 175	int prio = 0;
 176
 177	if (remote) {
 178		prio |= 2;
 179		h ^= HASH(remote);
 180	}
 181	if (local) {
 182		prio |= 1;
 183		h ^= HASH(local);
 184	}
 185	return &sitn->tunnels[prio][h];
 186}
 187
 188static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
 189		struct ip_tunnel *t)
 190{
 191	return __ipip6_bucket(sitn, &t->parms);
 192}
 193
 194static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
 195{
 196	struct ip_tunnel __rcu **tp;
 197	struct ip_tunnel *iter;
 198
 199	for (tp = ipip6_bucket(sitn, t);
 200	     (iter = rtnl_dereference(*tp)) != NULL;
 201	     tp = &iter->next) {
 202		if (t == iter) {
 203			rcu_assign_pointer(*tp, t->next);
 204			break;
 205		}
 206	}
 207}
 208
 209static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
 210{
 211	struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
 212
 213	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
 214	rcu_assign_pointer(*tp, t);
 215}
 216
 217static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
 218{
 219#ifdef CONFIG_IPV6_SIT_6RD
 220	struct ip_tunnel *t = netdev_priv(dev);
 221
 222	if (t->dev == sitn->fb_tunnel_dev) {
 223		ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
 224		t->ip6rd.relay_prefix = 0;
 225		t->ip6rd.prefixlen = 16;
 226		t->ip6rd.relay_prefixlen = 0;
 227	} else {
 228		struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev);
 229		memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
 230	}
 231#endif
 232}
 233
 234static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
 235		struct ip_tunnel_parm *parms, int create)
 236{
 237	__be32 remote = parms->iph.daddr;
 238	__be32 local = parms->iph.saddr;
 239	struct ip_tunnel *t, *nt;
 240	struct ip_tunnel __rcu **tp;
 241	struct net_device *dev;
 242	char name[IFNAMSIZ];
 243	struct sit_net *sitn = net_generic(net, sit_net_id);
 244
 245	for (tp = __ipip6_bucket(sitn, parms);
 246	    (t = rtnl_dereference(*tp)) != NULL;
 247	     tp = &t->next) {
 248		if (local == t->parms.iph.saddr &&
 249		    remote == t->parms.iph.daddr &&
 250		    parms->link == t->parms.link) {
 251			if (create)
 252				return NULL;
 253			else
 254				return t;
 255		}
 256	}
 257	if (!create)
 258		goto failed;
 259
 260	if (parms->name[0])
 261		strlcpy(name, parms->name, IFNAMSIZ);
 262	else
 263		strcpy(name, "sit%d");
 264
 265	dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
 266	if (dev == NULL)
 267		return NULL;
 268
 269	dev_net_set(dev, net);
 270
 271	nt = netdev_priv(dev);
 272
 273	nt->parms = *parms;
 274	if (ipip6_tunnel_init(dev) < 0)
 275		goto failed_free;
 276	ipip6_tunnel_clone_6rd(dev, sitn);
 277
 278	if (parms->i_flags & SIT_ISATAP)
 279		dev->priv_flags |= IFF_ISATAP;
 280
 281	if (register_netdevice(dev) < 0)
 282		goto failed_free;
 283
 284	strcpy(nt->parms.name, dev->name);
 285
 286	dev_hold(dev);
 287
 288	ipip6_tunnel_link(sitn, nt);
 289	return nt;
 290
 291failed_free:
 292	ipip6_dev_free(dev);
 293failed:
 294	return NULL;
 295}
 296
 297#define for_each_prl_rcu(start)			\
 298	for (prl = rcu_dereference(start);	\
 299	     prl;				\
 300	     prl = rcu_dereference(prl->next))
 301
 302static struct ip_tunnel_prl_entry *
 303__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
 304{
 305	struct ip_tunnel_prl_entry *prl;
 306
 307	for_each_prl_rcu(t->prl)
 308		if (prl->addr == addr)
 309			break;
 310	return prl;
 311
 312}
 313
 314static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
 315				struct ip_tunnel_prl __user *a)
 316{
 317	struct ip_tunnel_prl kprl, *kp;
 318	struct ip_tunnel_prl_entry *prl;
 319	unsigned int cmax, c = 0, ca, len;
 320	int ret = 0;
 321
 322	if (copy_from_user(&kprl, a, sizeof(kprl)))
 323		return -EFAULT;
 324	cmax = kprl.datalen / sizeof(kprl);
 325	if (cmax > 1 && kprl.addr != htonl(INADDR_ANY))
 326		cmax = 1;
 327
 328	/* For simple GET or for root users,
 329	 * we try harder to allocate.
 330	 */
 331	kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
 332		kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
 333		NULL;
 334
 335	rcu_read_lock();
 336
 337	ca = t->prl_count < cmax ? t->prl_count : cmax;
 338
 339	if (!kp) {
 340		/* We don't try hard to allocate much memory for
 341		 * non-root users.
 342		 * For root users, retry allocating enough memory for
 343		 * the answer.
 344		 */
 345		kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
 346		if (!kp) {
 347			ret = -ENOMEM;
 348			goto out;
 349		}
 350	}
 351
 352	c = 0;
 353	for_each_prl_rcu(t->prl) {
 354		if (c >= cmax)
 355			break;
 356		if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
 357			continue;
 358		kp[c].addr = prl->addr;
 359		kp[c].flags = prl->flags;
 360		c++;
 361		if (kprl.addr != htonl(INADDR_ANY))
 362			break;
 363	}
 364out:
 365	rcu_read_unlock();
 366
 367	len = sizeof(*kp) * c;
 368	ret = 0;
 369	if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen))
 370		ret = -EFAULT;
 371
 372	kfree(kp);
 373
 374	return ret;
 375}
 376
 377static int
 378ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
 379{
 380	struct ip_tunnel_prl_entry *p;
 381	int err = 0;
 382
 383	if (a->addr == htonl(INADDR_ANY))
 384		return -EINVAL;
 385
 386	ASSERT_RTNL();
 387
 388	for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
 389		if (p->addr == a->addr) {
 390			if (chg) {
 391				p->flags = a->flags;
 392				goto out;
 393			}
 394			err = -EEXIST;
 395			goto out;
 396		}
 397	}
 398
 399	if (chg) {
 400		err = -ENXIO;
 401		goto out;
 402	}
 403
 404	p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
 405	if (!p) {
 406		err = -ENOBUFS;
 407		goto out;
 408	}
 409
 410	p->next = t->prl;
 411	p->addr = a->addr;
 412	p->flags = a->flags;
 413	t->prl_count++;
 414	rcu_assign_pointer(t->prl, p);
 415out:
 416	return err;
 417}
 418
 419static void prl_list_destroy_rcu(struct rcu_head *head)
 420{
 421	struct ip_tunnel_prl_entry *p, *n;
 422
 423	p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
 424	do {
 425		n = rcu_dereference_protected(p->next, 1);
 426		kfree(p);
 427		p = n;
 428	} while (p);
 429}
 430
 431static int
 432ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
 433{
 434	struct ip_tunnel_prl_entry *x;
 435	struct ip_tunnel_prl_entry __rcu **p;
 436	int err = 0;
 437
 438	ASSERT_RTNL();
 439
 440	if (a && a->addr != htonl(INADDR_ANY)) {
 441		for (p = &t->prl;
 442		     (x = rtnl_dereference(*p)) != NULL;
 443		     p = &x->next) {
 444			if (x->addr == a->addr) {
 445				*p = x->next;
 446				kfree_rcu(x, rcu_head);
 447				t->prl_count--;
 448				goto out;
 449			}
 450		}
 451		err = -ENXIO;
 452	} else {
 453		x = rtnl_dereference(t->prl);
 454		if (x) {
 455			t->prl_count = 0;
 456			call_rcu(&x->rcu_head, prl_list_destroy_rcu);
 457			t->prl = NULL;
 458		}
 459	}
 460out:
 461	return err;
 462}
 463
 464static int
 465isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
 466{
 467	struct ip_tunnel_prl_entry *p;
 468	int ok = 1;
 469
 470	rcu_read_lock();
 471	p = __ipip6_tunnel_locate_prl(t, iph->saddr);
 472	if (p) {
 473		if (p->flags & PRL_DEFAULT)
 474			skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
 475		else
 476			skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
 477	} else {
 478		const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
 479
 480		if (ipv6_addr_is_isatap(addr6) &&
 481		    (addr6->s6_addr32[3] == iph->saddr) &&
 482		    ipv6_chk_prefix(addr6, t->dev))
 483			skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
 484		else
 485			ok = 0;
 486	}
 487	rcu_read_unlock();
 488	return ok;
 489}
 490
 491static void ipip6_tunnel_uninit(struct net_device *dev)
 492{
 493	struct net *net = dev_net(dev);
 494	struct sit_net *sitn = net_generic(net, sit_net_id);
 495
 496	if (dev == sitn->fb_tunnel_dev) {
 497		RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
 498	} else {
 499		ipip6_tunnel_unlink(sitn, netdev_priv(dev));
 500		ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
 501	}
 502	dev_put(dev);
 503}
 504
 505
 506static int ipip6_err(struct sk_buff *skb, u32 info)
 507{
 508
 509/* All the routers (except for Linux) return only
 510   8 bytes of packet payload. It means, that precise relaying of
 511   ICMP in the real Internet is absolutely infeasible.
 512 */
 513	const struct iphdr *iph = (const struct iphdr *)skb->data;
 514	const int type = icmp_hdr(skb)->type;
 515	const int code = icmp_hdr(skb)->code;
 516	struct ip_tunnel *t;
 517	int err;
 518
 519	switch (type) {
 520	default:
 521	case ICMP_PARAMETERPROB:
 522		return 0;
 523
 524	case ICMP_DEST_UNREACH:
 525		switch (code) {
 526		case ICMP_SR_FAILED:
 527		case ICMP_PORT_UNREACH:
 528			/* Impossible event. */
 529			return 0;
 530		case ICMP_FRAG_NEEDED:
 531			/* Soft state for pmtu is maintained by IP core. */
 532			return 0;
 533		default:
 534			/* All others are translated to HOST_UNREACH.
 535			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 536			   I believe they are just ether pollution. --ANK
 537			 */
 538			break;
 539		}
 540		break;
 541	case ICMP_TIME_EXCEEDED:
 542		if (code != ICMP_EXC_TTL)
 543			return 0;
 544		break;
 545	}
 546
 547	err = -ENOENT;
 548
 549	rcu_read_lock();
 550	t = ipip6_tunnel_lookup(dev_net(skb->dev),
 551				skb->dev,
 552				iph->daddr,
 553				iph->saddr);
 554	if (t == NULL || t->parms.iph.daddr == 0)
 555		goto out;
 556
 557	err = 0;
 558	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 559		goto out;
 560
 561	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 562		t->err_count++;
 563	else
 564		t->err_count = 1;
 565	t->err_time = jiffies;
 566out:
 567	rcu_read_unlock();
 568	return err;
 569}
 570
 571static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
 572{
 573	if (INET_ECN_is_ce(iph->tos))
 574		IP6_ECN_set_ce(ipv6_hdr(skb));
 575}
 576
 577static int ipip6_rcv(struct sk_buff *skb)
 578{
 579	const struct iphdr *iph;
 580	struct ip_tunnel *tunnel;
 581
 582	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
 583		goto out;
 584
 585	iph = ip_hdr(skb);
 586
 587	rcu_read_lock();
 588	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
 589				     iph->saddr, iph->daddr);
 590	if (tunnel != NULL) {
 591		struct pcpu_tstats *tstats;
 592
 593		secpath_reset(skb);
 594		skb->mac_header = skb->network_header;
 595		skb_reset_network_header(skb);
 596		IPCB(skb)->flags = 0;
 597		skb->protocol = htons(ETH_P_IPV6);
 598		skb->pkt_type = PACKET_HOST;
 599
 600		if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
 601		    !isatap_chksrc(skb, iph, tunnel)) {
 602			tunnel->dev->stats.rx_errors++;
 603			rcu_read_unlock();
 604			kfree_skb(skb);
 605			return 0;
 606		}
 607
 608		tstats = this_cpu_ptr(tunnel->dev->tstats);
 609		tstats->rx_packets++;
 610		tstats->rx_bytes += skb->len;
 611
 612		__skb_tunnel_rx(skb, tunnel->dev);
 613
 614		ipip6_ecn_decapsulate(iph, skb);
 615
 616		netif_rx(skb);
 617
 618		rcu_read_unlock();
 619		return 0;
 620	}
 621
 622	/* no tunnel matched,  let upstream know, ipsec may handle it */
 623	rcu_read_unlock();
 624	return 1;
 625out:
 626	kfree_skb(skb);
 627	return 0;
 628}
 629
 630/*
 631 * Returns the embedded IPv4 address if the IPv6 address
 632 * comes from 6rd / 6to4 (RFC 3056) addr space.
 633 */
 634static inline
 635__be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
 636{
 637	__be32 dst = 0;
 638
 639#ifdef CONFIG_IPV6_SIT_6RD
 640	if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
 641			      tunnel->ip6rd.prefixlen)) {
 642		unsigned int pbw0, pbi0;
 643		int pbi1;
 644		u32 d;
 645
 646		pbw0 = tunnel->ip6rd.prefixlen >> 5;
 647		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
 648
 649		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
 650		    tunnel->ip6rd.relay_prefixlen;
 651
 652		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
 653		if (pbi1 > 0)
 654			d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
 655			     (32 - pbi1);
 656
 657		dst = tunnel->ip6rd.relay_prefix | htonl(d);
 658	}
 659#else
 660	if (v6dst->s6_addr16[0] == htons(0x2002)) {
 661		/* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
 662		memcpy(&dst, &v6dst->s6_addr16[1], 4);
 663	}
 664#endif
 665	return dst;
 666}
 667
 668/*
 669 *	This function assumes it is being called from dev_queue_xmit()
 670 *	and that skb is filled properly by that function.
 671 */
 672
 673static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
 674				     struct net_device *dev)
 675{
 676	struct ip_tunnel *tunnel = netdev_priv(dev);
 677	struct pcpu_tstats *tstats;
 678	const struct iphdr  *tiph = &tunnel->parms.iph;
 679	const struct ipv6hdr *iph6 = ipv6_hdr(skb);
 680	u8     tos = tunnel->parms.iph.tos;
 681	__be16 df = tiph->frag_off;
 682	struct rtable *rt;     			/* Route to the other host */
 683	struct net_device *tdev;		/* Device to other host */
 684	struct iphdr  *iph;			/* Our new IP header */
 685	unsigned int max_headroom;		/* The extra header space needed */
 686	__be32 dst = tiph->daddr;
 687	struct flowi4 fl4;
 688	int    mtu;
 689	const struct in6_addr *addr6;
 690	int addr_type;
 691
 692	if (skb->protocol != htons(ETH_P_IPV6))
 693		goto tx_error;
 694
 695	if (tos == 1)
 696		tos = ipv6_get_dsfield(iph6);
 697
 698	/* ISATAP (RFC4214) - must come before 6to4 */
 699	if (dev->priv_flags & IFF_ISATAP) {
 700		struct neighbour *neigh = NULL;
 701		bool do_tx_error = false;
 702
 703		if (skb_dst(skb))
 704			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 705
 706		if (neigh == NULL) {
 707			net_dbg_ratelimited("sit: nexthop == NULL\n");
 
 708			goto tx_error;
 709		}
 710
 711		addr6 = (const struct in6_addr *)&neigh->primary_key;
 712		addr_type = ipv6_addr_type(addr6);
 713
 714		if ((addr_type & IPV6_ADDR_UNICAST) &&
 715		     ipv6_addr_is_isatap(addr6))
 716			dst = addr6->s6_addr32[3];
 717		else
 718			do_tx_error = true;
 719
 720		neigh_release(neigh);
 721		if (do_tx_error)
 722			goto tx_error;
 723	}
 724
 725	if (!dst)
 726		dst = try_6rd(&iph6->daddr, tunnel);
 727
 728	if (!dst) {
 729		struct neighbour *neigh = NULL;
 730		bool do_tx_error = false;
 731
 732		if (skb_dst(skb))
 733			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 734
 735		if (neigh == NULL) {
 736			net_dbg_ratelimited("sit: nexthop == NULL\n");
 
 737			goto tx_error;
 738		}
 739
 740		addr6 = (const struct in6_addr *)&neigh->primary_key;
 741		addr_type = ipv6_addr_type(addr6);
 742
 743		if (addr_type == IPV6_ADDR_ANY) {
 744			addr6 = &ipv6_hdr(skb)->daddr;
 745			addr_type = ipv6_addr_type(addr6);
 746		}
 747
 748		if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
 749			dst = addr6->s6_addr32[3];
 750		else
 751			do_tx_error = true;
 752
 753		neigh_release(neigh);
 754		if (do_tx_error)
 755			goto tx_error;
 756	}
 757
 758	rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
 759				   dst, tiph->saddr,
 760				   0, 0,
 761				   IPPROTO_IPV6, RT_TOS(tos),
 762				   tunnel->parms.link);
 763	if (IS_ERR(rt)) {
 764		dev->stats.tx_carrier_errors++;
 765		goto tx_error_icmp;
 766	}
 767	if (rt->rt_type != RTN_UNICAST) {
 768		ip_rt_put(rt);
 769		dev->stats.tx_carrier_errors++;
 770		goto tx_error_icmp;
 771	}
 772	tdev = rt->dst.dev;
 773
 774	if (tdev == dev) {
 775		ip_rt_put(rt);
 776		dev->stats.collisions++;
 777		goto tx_error;
 778	}
 779
 780	if (df) {
 781		mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
 782
 783		if (mtu < 68) {
 784			dev->stats.collisions++;
 785			ip_rt_put(rt);
 786			goto tx_error;
 787		}
 788
 789		if (mtu < IPV6_MIN_MTU) {
 790			mtu = IPV6_MIN_MTU;
 791			df = 0;
 792		}
 793
 794		if (tunnel->parms.iph.daddr && skb_dst(skb))
 795			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 796
 797		if (skb->len > mtu) {
 798			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 799			ip_rt_put(rt);
 800			goto tx_error;
 801		}
 802	}
 803
 804	if (tunnel->err_count > 0) {
 805		if (time_before(jiffies,
 806				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
 807			tunnel->err_count--;
 808			dst_link_failure(skb);
 809		} else
 810			tunnel->err_count = 0;
 811	}
 812
 813	/*
 814	 * Okay, now see if we can stuff it in the buffer as-is.
 815	 */
 816	max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);
 817
 818	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
 819	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
 820		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
 821		if (!new_skb) {
 822			ip_rt_put(rt);
 823			dev->stats.tx_dropped++;
 824			dev_kfree_skb(skb);
 825			return NETDEV_TX_OK;
 826		}
 827		if (skb->sk)
 828			skb_set_owner_w(new_skb, skb->sk);
 829		dev_kfree_skb(skb);
 830		skb = new_skb;
 831		iph6 = ipv6_hdr(skb);
 832	}
 833
 834	skb->transport_header = skb->network_header;
 835	skb_push(skb, sizeof(struct iphdr));
 836	skb_reset_network_header(skb);
 837	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 838	IPCB(skb)->flags = 0;
 839	skb_dst_drop(skb);
 840	skb_dst_set(skb, &rt->dst);
 841
 842	/*
 843	 *	Push down and install the IPIP header.
 844	 */
 845
 846	iph 			=	ip_hdr(skb);
 847	iph->version		=	4;
 848	iph->ihl		=	sizeof(struct iphdr)>>2;
 849	iph->frag_off		=	df;
 850	iph->protocol		=	IPPROTO_IPV6;
 851	iph->tos		=	INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 852	iph->daddr		=	fl4.daddr;
 853	iph->saddr		=	fl4.saddr;
 854
 855	if ((iph->ttl = tiph->ttl) == 0)
 856		iph->ttl	=	iph6->hop_limit;
 857
 858	nf_reset(skb);
 859	tstats = this_cpu_ptr(dev->tstats);
 860	__IPTUNNEL_XMIT(tstats, &dev->stats);
 861	return NETDEV_TX_OK;
 862
 863tx_error_icmp:
 864	dst_link_failure(skb);
 865tx_error:
 866	dev->stats.tx_errors++;
 867	dev_kfree_skb(skb);
 868	return NETDEV_TX_OK;
 869}
 870
 871static void ipip6_tunnel_bind_dev(struct net_device *dev)
 872{
 873	struct net_device *tdev = NULL;
 874	struct ip_tunnel *tunnel;
 875	const struct iphdr *iph;
 876	struct flowi4 fl4;
 877
 878	tunnel = netdev_priv(dev);
 879	iph = &tunnel->parms.iph;
 880
 881	if (iph->daddr) {
 882		struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
 883							  iph->daddr, iph->saddr,
 884							  0, 0,
 885							  IPPROTO_IPV6,
 886							  RT_TOS(iph->tos),
 887							  tunnel->parms.link);
 888
 889		if (!IS_ERR(rt)) {
 890			tdev = rt->dst.dev;
 891			ip_rt_put(rt);
 892		}
 893		dev->flags |= IFF_POINTOPOINT;
 894	}
 895
 896	if (!tdev && tunnel->parms.link)
 897		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
 898
 899	if (tdev) {
 900		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
 901		dev->mtu = tdev->mtu - sizeof(struct iphdr);
 902		if (dev->mtu < IPV6_MIN_MTU)
 903			dev->mtu = IPV6_MIN_MTU;
 904	}
 905	dev->iflink = tunnel->parms.link;
 906}
 907
 908static int
 909ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
 910{
 911	int err = 0;
 912	struct ip_tunnel_parm p;
 913	struct ip_tunnel_prl prl;
 914	struct ip_tunnel *t;
 915	struct net *net = dev_net(dev);
 916	struct sit_net *sitn = net_generic(net, sit_net_id);
 917#ifdef CONFIG_IPV6_SIT_6RD
 918	struct ip_tunnel_6rd ip6rd;
 919#endif
 920
 921	switch (cmd) {
 922	case SIOCGETTUNNEL:
 923#ifdef CONFIG_IPV6_SIT_6RD
 924	case SIOCGET6RD:
 925#endif
 926		t = NULL;
 927		if (dev == sitn->fb_tunnel_dev) {
 928			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
 929				err = -EFAULT;
 930				break;
 931			}
 932			t = ipip6_tunnel_locate(net, &p, 0);
 933		}
 934		if (t == NULL)
 935			t = netdev_priv(dev);
 936
 937		err = -EFAULT;
 938		if (cmd == SIOCGETTUNNEL) {
 939			memcpy(&p, &t->parms, sizeof(p));
 940			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
 941					 sizeof(p)))
 942				goto done;
 943#ifdef CONFIG_IPV6_SIT_6RD
 944		} else {
 945			ip6rd.prefix = t->ip6rd.prefix;
 946			ip6rd.relay_prefix = t->ip6rd.relay_prefix;
 947			ip6rd.prefixlen = t->ip6rd.prefixlen;
 948			ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
 949			if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd,
 950					 sizeof(ip6rd)))
 951				goto done;
 952#endif
 953		}
 954		err = 0;
 955		break;
 956
 957	case SIOCADDTUNNEL:
 958	case SIOCCHGTUNNEL:
 959		err = -EPERM;
 960		if (!capable(CAP_NET_ADMIN))
 961			goto done;
 962
 963		err = -EFAULT;
 964		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 965			goto done;
 966
 967		err = -EINVAL;
 968		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
 969		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
 970			goto done;
 971		if (p.iph.ttl)
 972			p.iph.frag_off |= htons(IP_DF);
 973
 974		t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
 975
 976		if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
 977			if (t != NULL) {
 978				if (t->dev != dev) {
 979					err = -EEXIST;
 980					break;
 981				}
 982			} else {
 983				if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
 984				    (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
 985					err = -EINVAL;
 986					break;
 987				}
 988				t = netdev_priv(dev);
 989				ipip6_tunnel_unlink(sitn, t);
 990				synchronize_net();
 991				t->parms.iph.saddr = p.iph.saddr;
 992				t->parms.iph.daddr = p.iph.daddr;
 993				memcpy(dev->dev_addr, &p.iph.saddr, 4);
 994				memcpy(dev->broadcast, &p.iph.daddr, 4);
 995				ipip6_tunnel_link(sitn, t);
 996				netdev_state_change(dev);
 997			}
 998		}
 999
1000		if (t) {
1001			err = 0;
1002			if (cmd == SIOCCHGTUNNEL) {
1003				t->parms.iph.ttl = p.iph.ttl;
1004				t->parms.iph.tos = p.iph.tos;
1005				if (t->parms.link != p.link) {
1006					t->parms.link = p.link;
1007					ipip6_tunnel_bind_dev(dev);
1008					netdev_state_change(dev);
1009				}
1010			}
1011			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1012				err = -EFAULT;
1013		} else
1014			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1015		break;
1016
1017	case SIOCDELTUNNEL:
1018		err = -EPERM;
1019		if (!capable(CAP_NET_ADMIN))
1020			goto done;
1021
1022		if (dev == sitn->fb_tunnel_dev) {
1023			err = -EFAULT;
1024			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1025				goto done;
1026			err = -ENOENT;
1027			if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL)
1028				goto done;
1029			err = -EPERM;
1030			if (t == netdev_priv(sitn->fb_tunnel_dev))
1031				goto done;
1032			dev = t->dev;
1033		}
1034		unregister_netdevice(dev);
1035		err = 0;
1036		break;
1037
1038	case SIOCGETPRL:
1039		err = -EINVAL;
1040		if (dev == sitn->fb_tunnel_dev)
1041			goto done;
1042		err = -ENOENT;
1043		if (!(t = netdev_priv(dev)))
1044			goto done;
1045		err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
1046		break;
1047
1048	case SIOCADDPRL:
1049	case SIOCDELPRL:
1050	case SIOCCHGPRL:
1051		err = -EPERM;
1052		if (!capable(CAP_NET_ADMIN))
1053			goto done;
1054		err = -EINVAL;
1055		if (dev == sitn->fb_tunnel_dev)
1056			goto done;
1057		err = -EFAULT;
1058		if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl)))
1059			goto done;
1060		err = -ENOENT;
1061		if (!(t = netdev_priv(dev)))
1062			goto done;
1063
1064		switch (cmd) {
1065		case SIOCDELPRL:
1066			err = ipip6_tunnel_del_prl(t, &prl);
1067			break;
1068		case SIOCADDPRL:
1069		case SIOCCHGPRL:
1070			err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
1071			break;
1072		}
1073		netdev_state_change(dev);
1074		break;
1075
1076#ifdef CONFIG_IPV6_SIT_6RD
1077	case SIOCADD6RD:
1078	case SIOCCHG6RD:
1079	case SIOCDEL6RD:
1080		err = -EPERM;
1081		if (!capable(CAP_NET_ADMIN))
1082			goto done;
1083
1084		err = -EFAULT;
1085		if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data,
1086				   sizeof(ip6rd)))
1087			goto done;
1088
1089		t = netdev_priv(dev);
1090
1091		if (cmd != SIOCDEL6RD) {
1092			struct in6_addr prefix;
1093			__be32 relay_prefix;
1094
1095			err = -EINVAL;
1096			if (ip6rd.relay_prefixlen > 32 ||
1097			    ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
1098				goto done;
1099
1100			ipv6_addr_prefix(&prefix, &ip6rd.prefix,
1101					 ip6rd.prefixlen);
1102			if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
1103				goto done;
1104			if (ip6rd.relay_prefixlen)
1105				relay_prefix = ip6rd.relay_prefix &
1106					       htonl(0xffffffffUL <<
1107						     (32 - ip6rd.relay_prefixlen));
1108			else
1109				relay_prefix = 0;
1110			if (relay_prefix != ip6rd.relay_prefix)
1111				goto done;
1112
1113			t->ip6rd.prefix = prefix;
1114			t->ip6rd.relay_prefix = relay_prefix;
1115			t->ip6rd.prefixlen = ip6rd.prefixlen;
1116			t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
1117		} else
1118			ipip6_tunnel_clone_6rd(dev, sitn);
1119
1120		err = 0;
1121		break;
1122#endif
1123
1124	default:
1125		err = -EINVAL;
1126	}
1127
1128done:
1129	return err;
1130}
1131
1132static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1133{
1134	if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
1135		return -EINVAL;
1136	dev->mtu = new_mtu;
1137	return 0;
1138}
1139
1140static const struct net_device_ops ipip6_netdev_ops = {
1141	.ndo_uninit	= ipip6_tunnel_uninit,
1142	.ndo_start_xmit	= ipip6_tunnel_xmit,
1143	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
1144	.ndo_change_mtu	= ipip6_tunnel_change_mtu,
1145	.ndo_get_stats64= ipip6_get_stats64,
1146};
1147
1148static void ipip6_dev_free(struct net_device *dev)
1149{
1150	free_percpu(dev->tstats);
1151	free_netdev(dev);
1152}
1153
1154static void ipip6_tunnel_setup(struct net_device *dev)
1155{
1156	dev->netdev_ops		= &ipip6_netdev_ops;
1157	dev->destructor 	= ipip6_dev_free;
1158
1159	dev->type		= ARPHRD_SIT;
1160	dev->hard_header_len 	= LL_MAX_HEADER + sizeof(struct iphdr);
1161	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr);
1162	dev->flags		= IFF_NOARP;
1163	dev->priv_flags	       &= ~IFF_XMIT_DST_RELEASE;
1164	dev->iflink		= 0;
1165	dev->addr_len		= 4;
1166	dev->features		|= NETIF_F_NETNS_LOCAL;
1167	dev->features		|= NETIF_F_LLTX;
1168}
1169
1170static int ipip6_tunnel_init(struct net_device *dev)
1171{
1172	struct ip_tunnel *tunnel = netdev_priv(dev);
1173
1174	tunnel->dev = dev;
 
1175
1176	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1177	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1178
1179	ipip6_tunnel_bind_dev(dev);
1180	dev->tstats = alloc_percpu(struct pcpu_tstats);
1181	if (!dev->tstats)
1182		return -ENOMEM;
1183
1184	return 0;
1185}
1186
1187static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1188{
1189	struct ip_tunnel *tunnel = netdev_priv(dev);
1190	struct iphdr *iph = &tunnel->parms.iph;
1191	struct net *net = dev_net(dev);
1192	struct sit_net *sitn = net_generic(net, sit_net_id);
1193
1194	tunnel->dev = dev;
1195	strcpy(tunnel->parms.name, dev->name);
1196
1197	iph->version		= 4;
1198	iph->protocol		= IPPROTO_IPV6;
1199	iph->ihl		= 5;
1200	iph->ttl		= 64;
1201
1202	dev->tstats = alloc_percpu(struct pcpu_tstats);
1203	if (!dev->tstats)
1204		return -ENOMEM;
1205	dev_hold(dev);
1206	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
1207	return 0;
1208}
1209
1210static struct xfrm_tunnel sit_handler __read_mostly = {
1211	.handler	=	ipip6_rcv,
1212	.err_handler	=	ipip6_err,
1213	.priority	=	1,
1214};
1215
1216static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1217{
1218	int prio;
1219
1220	for (prio = 1; prio < 4; prio++) {
1221		int h;
1222		for (h = 0; h < HASH_SIZE; h++) {
1223			struct ip_tunnel *t;
1224
1225			t = rtnl_dereference(sitn->tunnels[prio][h]);
1226			while (t != NULL) {
1227				unregister_netdevice_queue(t->dev, head);
1228				t = rtnl_dereference(t->next);
1229			}
1230		}
1231	}
1232}
1233
1234static int __net_init sit_init_net(struct net *net)
1235{
1236	struct sit_net *sitn = net_generic(net, sit_net_id);
1237	struct ip_tunnel *t;
1238	int err;
1239
1240	sitn->tunnels[0] = sitn->tunnels_wc;
1241	sitn->tunnels[1] = sitn->tunnels_l;
1242	sitn->tunnels[2] = sitn->tunnels_r;
1243	sitn->tunnels[3] = sitn->tunnels_r_l;
1244
1245	sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
1246					   ipip6_tunnel_setup);
1247	if (!sitn->fb_tunnel_dev) {
1248		err = -ENOMEM;
1249		goto err_alloc_dev;
1250	}
1251	dev_net_set(sitn->fb_tunnel_dev, net);
1252
1253	err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
1254	if (err)
1255		goto err_dev_free;
1256
1257	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
1258
1259	if ((err = register_netdev(sitn->fb_tunnel_dev)))
1260		goto err_reg_dev;
1261
1262	t = netdev_priv(sitn->fb_tunnel_dev);
1263
1264	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
1265	return 0;
1266
1267err_reg_dev:
1268	dev_put(sitn->fb_tunnel_dev);
1269err_dev_free:
1270	ipip6_dev_free(sitn->fb_tunnel_dev);
1271err_alloc_dev:
1272	return err;
1273}
1274
1275static void __net_exit sit_exit_net(struct net *net)
1276{
1277	struct sit_net *sitn = net_generic(net, sit_net_id);
1278	LIST_HEAD(list);
1279
1280	rtnl_lock();
1281	sit_destroy_tunnels(sitn, &list);
1282	unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1283	unregister_netdevice_many(&list);
1284	rtnl_unlock();
1285}
1286
1287static struct pernet_operations sit_net_ops = {
1288	.init = sit_init_net,
1289	.exit = sit_exit_net,
1290	.id   = &sit_net_id,
1291	.size = sizeof(struct sit_net),
1292};
1293
1294static void __exit sit_cleanup(void)
1295{
1296	xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1297
1298	unregister_pernet_device(&sit_net_ops);
1299	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1300}
1301
1302static int __init sit_init(void)
1303{
1304	int err;
1305
1306	pr_info("IPv6 over IPv4 tunneling driver\n");
1307
1308	err = register_pernet_device(&sit_net_ops);
1309	if (err < 0)
1310		return err;
1311	err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1312	if (err < 0) {
1313		unregister_pernet_device(&sit_net_ops);
1314		pr_info("%s: can't add protocol\n", __func__);
1315	}
1316	return err;
1317}
1318
1319module_init(sit_init);
1320module_exit(sit_cleanup);
1321MODULE_LICENSE("GPL");
1322MODULE_ALIAS_NETDEV("sit0");