Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2013 Nicira, Inc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/capability.h>
   9#include <linux/module.h>
  10#include <linux/types.h>
  11#include <linux/kernel.h>
  12#include <linux/slab.h>
  13#include <linux/uaccess.h>
  14#include <linux/skbuff.h>
  15#include <linux/netdevice.h>
  16#include <linux/in.h>
  17#include <linux/tcp.h>
  18#include <linux/udp.h>
  19#include <linux/if_arp.h>
 
  20#include <linux/init.h>
  21#include <linux/in6.h>
  22#include <linux/inetdevice.h>
  23#include <linux/igmp.h>
  24#include <linux/netfilter_ipv4.h>
  25#include <linux/etherdevice.h>
  26#include <linux/if_ether.h>
  27#include <linux/if_vlan.h>
  28#include <linux/rculist.h>
  29#include <linux/err.h>
  30
  31#include <net/sock.h>
  32#include <net/ip.h>
  33#include <net/icmp.h>
  34#include <net/protocol.h>
  35#include <net/ip_tunnels.h>
  36#include <net/arp.h>
  37#include <net/checksum.h>
  38#include <net/dsfield.h>
  39#include <net/inet_ecn.h>
  40#include <net/xfrm.h>
  41#include <net/net_namespace.h>
  42#include <net/netns/generic.h>
  43#include <net/rtnetlink.h>
  44#include <net/udp.h>
  45#include <net/dst_metadata.h>
  46#include <net/inet_dscp.h>
  47
  48#if IS_ENABLED(CONFIG_IPV6)
  49#include <net/ipv6.h>
  50#include <net/ip6_fib.h>
  51#include <net/ip6_route.h>
  52#endif
  53
  54static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
  55{
  56	return hash_32((__force u32)key ^ (__force u32)remote,
  57			 IP_TNL_HASH_BITS);
  58}
  59
  60static bool ip_tunnel_key_match(const struct ip_tunnel_parm_kern *p,
  61				const unsigned long *flags, __be32 key)
  62{
  63	if (!test_bit(IP_TUNNEL_KEY_BIT, flags))
  64		return !test_bit(IP_TUNNEL_KEY_BIT, p->i_flags);
  65
  66	return test_bit(IP_TUNNEL_KEY_BIT, p->i_flags) && p->i_key == key;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67}
  68
  69/* Fallback tunnel: no source, no destination, no key, no options
  70
  71   Tunnel hash table:
  72   We require exact key match i.e. if a key is present in packet
  73   it will match only tunnel with the same key; if it is not present,
  74   it will match only keyless tunnel.
  75
  76   All keysless packets, if not matched configured keyless tunnels
  77   will match fallback tunnel.
  78   Given src, dst and key, find appropriate for input tunnel.
  79*/
  80struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
  81				   int link, const unsigned long *flags,
  82				   __be32 remote, __be32 local,
  83				   __be32 key)
  84{
 
  85	struct ip_tunnel *t, *cand = NULL;
  86	struct hlist_head *head;
  87	struct net_device *ndev;
  88	unsigned int hash;
  89
  90	hash = ip_tunnel_hash(key, remote);
  91	head = &itn->tunnels[hash];
  92
  93	hlist_for_each_entry_rcu(t, head, hash_node) {
  94		if (local != t->parms.iph.saddr ||
  95		    remote != t->parms.iph.daddr ||
  96		    !(t->dev->flags & IFF_UP))
  97			continue;
  98
  99		if (!ip_tunnel_key_match(&t->parms, flags, key))
 100			continue;
 101
 102		if (READ_ONCE(t->parms.link) == link)
 103			return t;
 104		cand = t;
 
 105	}
 106
 107	hlist_for_each_entry_rcu(t, head, hash_node) {
 108		if (remote != t->parms.iph.daddr ||
 109		    t->parms.iph.saddr != 0 ||
 110		    !(t->dev->flags & IFF_UP))
 111			continue;
 112
 113		if (!ip_tunnel_key_match(&t->parms, flags, key))
 114			continue;
 115
 116		if (READ_ONCE(t->parms.link) == link)
 117			return t;
 118		if (!cand)
 119			cand = t;
 120	}
 121
 122	hash = ip_tunnel_hash(key, 0);
 123	head = &itn->tunnels[hash];
 124
 125	hlist_for_each_entry_rcu(t, head, hash_node) {
 126		if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
 127		    (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
 128			continue;
 129
 130		if (!(t->dev->flags & IFF_UP))
 131			continue;
 132
 133		if (!ip_tunnel_key_match(&t->parms, flags, key))
 134			continue;
 135
 136		if (READ_ONCE(t->parms.link) == link)
 137			return t;
 138		if (!cand)
 139			cand = t;
 140	}
 141
 
 
 
 142	hlist_for_each_entry_rcu(t, head, hash_node) {
 143		if ((!test_bit(IP_TUNNEL_NO_KEY_BIT, flags) &&
 144		     t->parms.i_key != key) ||
 145		    t->parms.iph.saddr != 0 ||
 146		    t->parms.iph.daddr != 0 ||
 147		    !(t->dev->flags & IFF_UP))
 148			continue;
 149
 150		if (READ_ONCE(t->parms.link) == link)
 151			return t;
 152		if (!cand)
 153			cand = t;
 154	}
 155
 
 156	if (cand)
 157		return cand;
 158
 159	t = rcu_dereference(itn->collect_md_tun);
 160	if (t && t->dev->flags & IFF_UP)
 161		return t;
 162
 163	ndev = READ_ONCE(itn->fb_tunnel_dev);
 164	if (ndev && ndev->flags & IFF_UP)
 165		return netdev_priv(ndev);
 166
 167	return NULL;
 168}
 169EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
 170
 171static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
 172				    struct ip_tunnel_parm_kern *parms)
 173{
 174	unsigned int h;
 175	__be32 remote;
 176	__be32 i_key = parms->i_key;
 177
 178	if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
 179		remote = parms->iph.daddr;
 180	else
 181		remote = 0;
 182
 183	if (!test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags) &&
 184	    test_bit(IP_TUNNEL_VTI_BIT, parms->i_flags))
 185		i_key = 0;
 186
 187	h = ip_tunnel_hash(i_key, remote);
 188	return &itn->tunnels[h];
 189}
 190
 191static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
 192{
 193	struct hlist_head *head = ip_bucket(itn, &t->parms);
 194
 195	if (t->collect_md)
 196		rcu_assign_pointer(itn->collect_md_tun, t);
 197	hlist_add_head_rcu(&t->hash_node, head);
 198}
 199
 200static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
 201{
 202	if (t->collect_md)
 203		rcu_assign_pointer(itn->collect_md_tun, NULL);
 204	hlist_del_init_rcu(&t->hash_node);
 205}
 206
 207static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
 208					struct ip_tunnel_parm_kern *parms,
 209					int type)
 210{
 211	__be32 remote = parms->iph.daddr;
 212	__be32 local = parms->iph.saddr;
 213	IP_TUNNEL_DECLARE_FLAGS(flags);
 214	__be32 key = parms->i_key;
 215	int link = parms->link;
 216	struct ip_tunnel *t = NULL;
 217	struct hlist_head *head = ip_bucket(itn, parms);
 218
 219	ip_tunnel_flags_copy(flags, parms->i_flags);
 220
 221	hlist_for_each_entry_rcu(t, head, hash_node, lockdep_rtnl_is_held()) {
 222		if (local == t->parms.iph.saddr &&
 223		    remote == t->parms.iph.daddr &&
 224		    link == READ_ONCE(t->parms.link) &&
 225		    type == t->dev->type &&
 226		    ip_tunnel_key_match(&t->parms, flags, key))
 227			break;
 228	}
 229	return t;
 230}
 231
 232static struct net_device *__ip_tunnel_create(struct net *net,
 233					     const struct rtnl_link_ops *ops,
 234					     struct ip_tunnel_parm_kern *parms)
 235{
 236	int err;
 237	struct ip_tunnel *tunnel;
 238	struct net_device *dev;
 239	char name[IFNAMSIZ];
 240
 241	err = -E2BIG;
 242	if (parms->name[0]) {
 243		if (!dev_valid_name(parms->name))
 
 
 244			goto failed;
 245		strscpy(name, parms->name, IFNAMSIZ);
 246	} else {
 247		if (strlen(ops->kind) > (IFNAMSIZ - 3))
 248			goto failed;
 249		strcpy(name, ops->kind);
 250		strcat(name, "%d");
 251	}
 252
 253	ASSERT_RTNL();
 254	dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
 255	if (!dev) {
 256		err = -ENOMEM;
 257		goto failed;
 258	}
 259	dev_net_set(dev, net);
 260
 261	dev->rtnl_link_ops = ops;
 262
 263	tunnel = netdev_priv(dev);
 264	tunnel->parms = *parms;
 265	tunnel->net = net;
 266
 267	err = register_netdevice(dev);
 268	if (err)
 269		goto failed_free;
 270
 271	return dev;
 272
 273failed_free:
 274	free_netdev(dev);
 275failed:
 276	return ERR_PTR(err);
 277}
 278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279static int ip_tunnel_bind_dev(struct net_device *dev)
 280{
 281	struct net_device *tdev = NULL;
 282	struct ip_tunnel *tunnel = netdev_priv(dev);
 283	const struct iphdr *iph;
 284	int hlen = LL_MAX_HEADER;
 285	int mtu = ETH_DATA_LEN;
 286	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 287
 288	iph = &tunnel->parms.iph;
 289
 290	/* Guess output device to choose reasonable mtu and needed_headroom */
 291	if (iph->daddr) {
 292		struct flowi4 fl4;
 293		struct rtable *rt;
 294
 295		ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
 296				    iph->saddr, tunnel->parms.o_key,
 297				    iph->tos & INET_DSCP_MASK, tunnel->net,
 298				    tunnel->parms.link, tunnel->fwmark, 0, 0);
 299		rt = ip_route_output_key(tunnel->net, &fl4);
 300
 301		if (!IS_ERR(rt)) {
 302			tdev = rt->dst.dev;
 
 303			ip_rt_put(rt);
 304		}
 305		if (dev->type != ARPHRD_ETHER)
 306			dev->flags |= IFF_POINTOPOINT;
 307
 308		dst_cache_reset(&tunnel->dst_cache);
 309	}
 310
 311	if (!tdev && tunnel->parms.link)
 312		tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
 313
 314	if (tdev) {
 315		hlen = tdev->hard_header_len + tdev->needed_headroom;
 316		mtu = min(tdev->mtu, IP_MAX_MTU);
 317	}
 
 318
 319	dev->needed_headroom = t_hlen + hlen;
 320	mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0);
 321
 322	if (mtu < IPV4_MIN_MTU)
 323		mtu = IPV4_MIN_MTU;
 324
 325	return mtu;
 326}
 327
 328static struct ip_tunnel *ip_tunnel_create(struct net *net,
 329					  struct ip_tunnel_net *itn,
 330					  struct ip_tunnel_parm_kern *parms)
 331{
 332	struct ip_tunnel *nt;
 333	struct net_device *dev;
 334	int t_hlen;
 335	int mtu;
 336	int err;
 337
 338	dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms);
 
 
 339	if (IS_ERR(dev))
 340		return ERR_CAST(dev);
 341
 342	mtu = ip_tunnel_bind_dev(dev);
 343	err = dev_set_mtu(dev, mtu);
 344	if (err)
 345		goto err_dev_set_mtu;
 346
 347	nt = netdev_priv(dev);
 348	t_hlen = nt->hlen + sizeof(struct iphdr);
 349	dev->min_mtu = ETH_MIN_MTU;
 350	dev->max_mtu = IP_MAX_MTU - t_hlen;
 351	if (dev->type == ARPHRD_ETHER)
 352		dev->max_mtu -= dev->hard_header_len;
 353
 354	ip_tunnel_add(itn, nt);
 355	return nt;
 356
 357err_dev_set_mtu:
 358	unregister_netdevice(dev);
 359	return ERR_PTR(err);
 360}
 361
 362void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info)
 363{
 364	const struct iphdr *iph = ip_hdr(skb);
 365	const struct udphdr *udph;
 366
 367	if (iph->protocol != IPPROTO_UDP)
 368		return;
 369
 370	udph = (struct udphdr *)((__u8 *)iph + (iph->ihl << 2));
 371	info->encap.sport = udph->source;
 372	info->encap.dport = udph->dest;
 373}
 374EXPORT_SYMBOL(ip_tunnel_md_udp_encap);
 375
 376int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
 377		  const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
 378		  bool log_ecn_error)
 379{
 
 380	const struct iphdr *iph = ip_hdr(skb);
 381	int nh, err;
 382
 383#ifdef CONFIG_NET_IPGRE_BROADCAST
 384	if (ipv4_is_multicast(iph->daddr)) {
 385		DEV_STATS_INC(tunnel->dev, multicast);
 386		skb->pkt_type = PACKET_BROADCAST;
 387	}
 388#endif
 389
 390	if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) !=
 391	    test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) {
 392		DEV_STATS_INC(tunnel->dev, rx_crc_errors);
 393		DEV_STATS_INC(tunnel->dev, rx_errors);
 394		goto drop;
 395	}
 396
 397	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) {
 398		if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) ||
 399		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
 400			DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
 401			DEV_STATS_INC(tunnel->dev, rx_errors);
 402			goto drop;
 403		}
 404		tunnel->i_seqno = ntohl(tpi->seq) + 1;
 405	}
 406
 407	/* Save offset of outer header relative to skb->head,
 408	 * because we are going to reset the network header to the inner header
 409	 * and might change skb->head.
 410	 */
 411	nh = skb_network_header(skb) - skb->head;
 412
 413	skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
 414
 415	if (!pskb_inet_may_pull(skb)) {
 416		DEV_STATS_INC(tunnel->dev, rx_length_errors);
 417		DEV_STATS_INC(tunnel->dev, rx_errors);
 418		goto drop;
 419	}
 420	iph = (struct iphdr *)(skb->head + nh);
 421
 422	err = IP_ECN_decapsulate(iph, skb);
 423	if (unlikely(err)) {
 424		if (log_ecn_error)
 425			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
 426					&iph->saddr, iph->tos);
 427		if (err > 1) {
 428			DEV_STATS_INC(tunnel->dev, rx_frame_errors);
 429			DEV_STATS_INC(tunnel->dev, rx_errors);
 430			goto drop;
 431		}
 432	}
 433
 434	dev_sw_netstats_rx_add(tunnel->dev, skb->len);
 
 
 
 
 
 435	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
 436
 437	if (tunnel->dev->type == ARPHRD_ETHER) {
 438		skb->protocol = eth_type_trans(skb, tunnel->dev);
 439		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 440	} else {
 441		skb->dev = tunnel->dev;
 442	}
 443
 444	if (tun_dst)
 445		skb_dst_set(skb, (struct dst_entry *)tun_dst);
 446
 447	gro_cells_receive(&tunnel->gro_cells, skb);
 448	return 0;
 449
 450drop:
 451	if (tun_dst)
 452		dst_release((struct dst_entry *)tun_dst);
 453	kfree_skb(skb);
 454	return 0;
 455}
 456EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
 457
 458int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
 459			    unsigned int num)
 460{
 461	if (num >= MAX_IPTUN_ENCAP_OPS)
 462		return -ERANGE;
 463
 464	return !cmpxchg((const struct ip_tunnel_encap_ops **)
 465			&iptun_encaps[num],
 466			NULL, ops) ? 0 : -1;
 467}
 468EXPORT_SYMBOL(ip_tunnel_encap_add_ops);
 469
 470int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
 471			    unsigned int num)
 472{
 473	int ret;
 474
 475	if (num >= MAX_IPTUN_ENCAP_OPS)
 476		return -ERANGE;
 477
 478	ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
 479		       &iptun_encaps[num],
 480		       ops, NULL) == ops) ? 0 : -1;
 481
 482	synchronize_net();
 483
 484	return ret;
 485}
 486EXPORT_SYMBOL(ip_tunnel_encap_del_ops);
 487
 488int ip_tunnel_encap_setup(struct ip_tunnel *t,
 489			  struct ip_tunnel_encap *ipencap)
 490{
 491	int hlen;
 492
 493	memset(&t->encap, 0, sizeof(t->encap));
 494
 495	hlen = ip_encap_hlen(ipencap);
 496	if (hlen < 0)
 497		return hlen;
 498
 499	t->encap.type = ipencap->type;
 500	t->encap.sport = ipencap->sport;
 501	t->encap.dport = ipencap->dport;
 502	t->encap.flags = ipencap->flags;
 503
 504	t->encap_hlen = hlen;
 505	t->hlen = t->encap_hlen + t->tun_hlen;
 506
 507	return 0;
 508}
 509EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
 510
 511static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
 512			    struct rtable *rt, __be16 df,
 513			    const struct iphdr *inner_iph,
 514			    int tunnel_hlen, __be32 dst, bool md)
 515{
 516	struct ip_tunnel *tunnel = netdev_priv(dev);
 517	int pkt_size;
 518	int mtu;
 519
 520	tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
 521	pkt_size = skb->len - tunnel_hlen;
 522	pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
 523
 524	if (df) {
 525		mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
 526		mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
 527	} else {
 528		mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 529	}
 530
 531	if (skb_valid_dst(skb))
 532		skb_dst_update_pmtu_no_confirm(skb, mtu);
 533
 534	if (skb->protocol == htons(ETH_P_IP)) {
 535		if (!skb_is_gso(skb) &&
 536		    (inner_iph->frag_off & htons(IP_DF)) &&
 537		    mtu < pkt_size) {
 538			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
 539			return -E2BIG;
 540		}
 541	}
 542#if IS_ENABLED(CONFIG_IPV6)
 543	else if (skb->protocol == htons(ETH_P_IPV6)) {
 544		struct rt6_info *rt6;
 545		__be32 daddr;
 546
 547		rt6 = skb_valid_dst(skb) ? dst_rt6_info(skb_dst(skb)) :
 548					   NULL;
 549		daddr = md ? dst : tunnel->parms.iph.daddr;
 550
 551		if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
 552			   mtu >= IPV6_MIN_MTU) {
 553			if ((daddr && !ipv4_is_multicast(daddr)) ||
 
 554			    rt6->rt6i_dst.plen == 128) {
 555				rt6->rt6i_flags |= RTF_MODIFIED;
 556				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
 557			}
 558		}
 559
 560		if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
 561					mtu < pkt_size) {
 562			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 563			return -E2BIG;
 564		}
 565	}
 566#endif
 567	return 0;
 568}
 569
 570static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
 571{
 572	/* we must cap headroom to some upperlimit, else pskb_expand_head
 573	 * will overflow header offsets in skb_headers_offset_update().
 574	 */
 575	static const unsigned int max_allowed = 512;
 576
 577	if (headroom > max_allowed)
 578		headroom = max_allowed;
 579
 580	if (headroom > READ_ONCE(dev->needed_headroom))
 581		WRITE_ONCE(dev->needed_headroom, headroom);
 582}
 583
 584void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 585		       u8 proto, int tunnel_hlen)
 586{
 587	struct ip_tunnel *tunnel = netdev_priv(dev);
 588	u32 headroom = sizeof(struct iphdr);
 589	struct ip_tunnel_info *tun_info;
 590	const struct ip_tunnel_key *key;
 591	const struct iphdr *inner_iph;
 592	struct rtable *rt = NULL;
 593	struct flowi4 fl4;
 594	__be16 df = 0;
 595	u8 tos, ttl;
 596	bool use_cache;
 597
 598	tun_info = skb_tunnel_info(skb);
 599	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 600		     ip_tunnel_info_af(tun_info) != AF_INET))
 601		goto tx_error;
 602	key = &tun_info->key;
 603	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 604	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 605	tos = key->tos;
 606	if (tos == 1) {
 607		if (skb->protocol == htons(ETH_P_IP))
 608			tos = inner_iph->tos;
 609		else if (skb->protocol == htons(ETH_P_IPV6))
 610			tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
 611	}
 612	ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
 613			    tunnel_id_to_key32(key->tun_id),
 614			    tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark,
 615			    skb_get_hash(skb), key->flow_flags);
 616
 617	if (!tunnel_hlen)
 618		tunnel_hlen = ip_encap_hlen(&tun_info->encap);
 619
 620	if (ip_tunnel_encap(skb, &tun_info->encap, &proto, &fl4) < 0)
 621		goto tx_error;
 622
 623	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
 624	if (use_cache)
 625		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
 626	if (!rt) {
 627		rt = ip_route_output_key(tunnel->net, &fl4);
 628		if (IS_ERR(rt)) {
 629			DEV_STATS_INC(dev, tx_carrier_errors);
 630			goto tx_error;
 631		}
 632		if (use_cache)
 633			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
 634					  fl4.saddr);
 635	}
 636	if (rt->dst.dev == dev) {
 637		ip_rt_put(rt);
 638		DEV_STATS_INC(dev, collisions);
 639		goto tx_error;
 640	}
 641
 642	if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags))
 643		df = htons(IP_DF);
 644	if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
 645			    key->u.ipv4.dst, true)) {
 646		ip_rt_put(rt);
 647		goto tx_error;
 648	}
 649
 650	tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
 651	ttl = key->ttl;
 652	if (ttl == 0) {
 653		if (skb->protocol == htons(ETH_P_IP))
 654			ttl = inner_iph->ttl;
 655		else if (skb->protocol == htons(ETH_P_IPV6))
 656			ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
 657		else
 658			ttl = ip4_dst_hoplimit(&rt->dst);
 659	}
 660
 661	headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
 662	if (skb_cow_head(skb, headroom)) {
 663		ip_rt_put(rt);
 664		goto tx_dropped;
 665	}
 666
 667	ip_tunnel_adj_headroom(dev, headroom);
 668
 669	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
 670		      df, !net_eq(tunnel->net, dev_net(dev)));
 671	return;
 672tx_error:
 673	DEV_STATS_INC(dev, tx_errors);
 674	goto kfree;
 675tx_dropped:
 676	DEV_STATS_INC(dev, tx_dropped);
 677kfree:
 678	kfree_skb(skb);
 679}
 680EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
 681
 682void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 683		    const struct iphdr *tnl_params, u8 protocol)
 684{
 685	struct ip_tunnel *tunnel = netdev_priv(dev);
 686	struct ip_tunnel_info *tun_info = NULL;
 687	const struct iphdr *inner_iph;
 688	unsigned int max_headroom;	/* The extra header space needed */
 689	struct rtable *rt = NULL;		/* Route to the other host */
 690	__be16 payload_protocol;
 691	bool use_cache = false;
 692	struct flowi4 fl4;
 693	bool md = false;
 694	bool connected;
 695	u8 tos, ttl;
 696	__be32 dst;
 697	__be16 df;
 
 
 
 
 
 698
 699	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 700	connected = (tunnel->parms.iph.daddr != 0);
 701	payload_protocol = skb_protocol(skb, true);
 702
 703	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 704
 705	dst = tnl_params->daddr;
 706	if (dst == 0) {
 707		/* NBMA tunnel */
 708
 709		if (!skb_dst(skb)) {
 710			DEV_STATS_INC(dev, tx_fifo_errors);
 711			goto tx_error;
 712		}
 713
 714		tun_info = skb_tunnel_info(skb);
 715		if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
 716		    ip_tunnel_info_af(tun_info) == AF_INET &&
 717		    tun_info->key.u.ipv4.dst) {
 718			dst = tun_info->key.u.ipv4.dst;
 719			md = true;
 720			connected = true;
 721		} else if (payload_protocol == htons(ETH_P_IP)) {
 722			rt = skb_rtable(skb);
 723			dst = rt_nexthop(rt, inner_iph->daddr);
 724		}
 725#if IS_ENABLED(CONFIG_IPV6)
 726		else if (payload_protocol == htons(ETH_P_IPV6)) {
 727			const struct in6_addr *addr6;
 728			struct neighbour *neigh;
 729			bool do_tx_error_icmp;
 730			int addr_type;
 731
 732			neigh = dst_neigh_lookup(skb_dst(skb),
 733						 &ipv6_hdr(skb)->daddr);
 734			if (!neigh)
 735				goto tx_error;
 736
 737			addr6 = (const struct in6_addr *)&neigh->primary_key;
 738			addr_type = ipv6_addr_type(addr6);
 739
 740			if (addr_type == IPV6_ADDR_ANY) {
 741				addr6 = &ipv6_hdr(skb)->daddr;
 742				addr_type = ipv6_addr_type(addr6);
 743			}
 744
 745			if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
 746				do_tx_error_icmp = true;
 747			else {
 748				do_tx_error_icmp = false;
 749				dst = addr6->s6_addr32[3];
 750			}
 751			neigh_release(neigh);
 752			if (do_tx_error_icmp)
 753				goto tx_error_icmp;
 754		}
 755#endif
 756		else
 757			goto tx_error;
 758
 759		if (!md)
 760			connected = false;
 761	}
 762
 763	tos = tnl_params->tos;
 764	if (tos & 0x1) {
 765		tos &= ~0x1;
 766		if (payload_protocol == htons(ETH_P_IP)) {
 767			tos = inner_iph->tos;
 768			connected = false;
 769		} else if (payload_protocol == htons(ETH_P_IPV6)) {
 770			tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
 771			connected = false;
 772		}
 773	}
 774
 775	ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
 776			    tunnel->parms.o_key, tos & INET_DSCP_MASK,
 777			    tunnel->net, READ_ONCE(tunnel->parms.link),
 778			    tunnel->fwmark, skb_get_hash(skb), 0);
 779
 780	if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
 781		goto tx_error;
 782
 783	if (connected && md) {
 784		use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
 785		if (use_cache)
 786			rt = dst_cache_get_ip4(&tun_info->dst_cache,
 787					       &fl4.saddr);
 788	} else {
 789		rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache,
 790						&fl4.saddr) : NULL;
 791	}
 792
 793	if (!rt) {
 794		rt = ip_route_output_key(tunnel->net, &fl4);
 795
 796		if (IS_ERR(rt)) {
 797			DEV_STATS_INC(dev, tx_carrier_errors);
 798			goto tx_error;
 799		}
 800		if (use_cache)
 801			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
 802					  fl4.saddr);
 803		else if (!md && connected)
 804			dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
 805					  fl4.saddr);
 806	}
 807
 808	if (rt->dst.dev == dev) {
 809		ip_rt_put(rt);
 810		DEV_STATS_INC(dev, collisions);
 811		goto tx_error;
 812	}
 813
 814	df = tnl_params->frag_off;
 815	if (payload_protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
 816		df |= (inner_iph->frag_off & htons(IP_DF));
 817
 818	if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
 819		ip_rt_put(rt);
 820		goto tx_error;
 821	}
 822
 823	if (tunnel->err_count > 0) {
 824		if (time_before(jiffies,
 825				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
 826			tunnel->err_count--;
 827
 
 828			dst_link_failure(skb);
 829		} else
 830			tunnel->err_count = 0;
 831	}
 832
 833	tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
 834	ttl = tnl_params->ttl;
 835	if (ttl == 0) {
 836		if (payload_protocol == htons(ETH_P_IP))
 837			ttl = inner_iph->ttl;
 838#if IS_ENABLED(CONFIG_IPV6)
 839		else if (payload_protocol == htons(ETH_P_IPV6))
 840			ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
 841#endif
 842		else
 843			ttl = ip4_dst_hoplimit(&rt->dst);
 844	}
 845
 
 
 
 
 846	max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
 847			+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
 
 
 848
 849	if (skb_cow_head(skb, max_headroom)) {
 850		ip_rt_put(rt);
 851		DEV_STATS_INC(dev, tx_dropped);
 852		kfree_skb(skb);
 853		return;
 854	}
 855
 856	ip_tunnel_adj_headroom(dev, max_headroom);
 
 
 857
 858	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
 859		      df, !net_eq(tunnel->net, dev_net(dev)));
 860	return;
 861
 862#if IS_ENABLED(CONFIG_IPV6)
 863tx_error_icmp:
 864	dst_link_failure(skb);
 865#endif
 866tx_error:
 867	DEV_STATS_INC(dev, tx_errors);
 868	kfree_skb(skb);
 869}
 870EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
 871
 872static void ip_tunnel_update(struct ip_tunnel_net *itn,
 873			     struct ip_tunnel *t,
 874			     struct net_device *dev,
 875			     struct ip_tunnel_parm_kern *p,
 876			     bool set_mtu,
 877			     __u32 fwmark)
 878{
 879	ip_tunnel_del(itn, t);
 880	t->parms.iph.saddr = p->iph.saddr;
 881	t->parms.iph.daddr = p->iph.daddr;
 882	t->parms.i_key = p->i_key;
 883	t->parms.o_key = p->o_key;
 884	if (dev->type != ARPHRD_ETHER) {
 885		__dev_addr_set(dev, &p->iph.saddr, 4);
 886		memcpy(dev->broadcast, &p->iph.daddr, 4);
 887	}
 888	ip_tunnel_add(itn, t);
 889
 890	t->parms.iph.ttl = p->iph.ttl;
 891	t->parms.iph.tos = p->iph.tos;
 892	t->parms.iph.frag_off = p->iph.frag_off;
 893
 894	if (t->parms.link != p->link || t->fwmark != fwmark) {
 895		int mtu;
 896
 897		WRITE_ONCE(t->parms.link, p->link);
 898		t->fwmark = fwmark;
 899		mtu = ip_tunnel_bind_dev(dev);
 900		if (set_mtu)
 901			WRITE_ONCE(dev->mtu, mtu);
 902	}
 903	dst_cache_reset(&t->dst_cache);
 904	netdev_state_change(dev);
 905}
 906
 907int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
 908		  int cmd)
 909{
 910	int err = 0;
 911	struct ip_tunnel *t = netdev_priv(dev);
 912	struct net *net = t->net;
 913	struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
 914
 
 915	switch (cmd) {
 916	case SIOCGETTUNNEL:
 917		if (dev == itn->fb_tunnel_dev) {
 918			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
 919			if (!t)
 920				t = netdev_priv(dev);
 921		}
 922		memcpy(p, &t->parms, sizeof(*p));
 923		break;
 924
 925	case SIOCADDTUNNEL:
 926	case SIOCCHGTUNNEL:
 927		err = -EPERM;
 928		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 929			goto done;
 930		if (p->iph.ttl)
 931			p->iph.frag_off |= htons(IP_DF);
 932		if (!test_bit(IP_TUNNEL_VTI_BIT, p->i_flags)) {
 933			if (!test_bit(IP_TUNNEL_KEY_BIT, p->i_flags))
 934				p->i_key = 0;
 935			if (!test_bit(IP_TUNNEL_KEY_BIT, p->o_flags))
 936				p->o_key = 0;
 937		}
 938
 939		t = ip_tunnel_find(itn, p, itn->type);
 940
 941		if (cmd == SIOCADDTUNNEL) {
 942			if (!t) {
 943				t = ip_tunnel_create(net, itn, p);
 944				err = PTR_ERR_OR_ZERO(t);
 945				break;
 946			}
 947
 948			err = -EEXIST;
 949			break;
 950		}
 951		if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
 952			if (t) {
 953				if (t->dev != dev) {
 954					err = -EEXIST;
 955					break;
 956				}
 957			} else {
 958				unsigned int nflags = 0;
 959
 960				if (ipv4_is_multicast(p->iph.daddr))
 961					nflags = IFF_BROADCAST;
 962				else if (p->iph.daddr)
 963					nflags = IFF_POINTOPOINT;
 964
 965				if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
 966					err = -EINVAL;
 967					break;
 968				}
 969
 970				t = netdev_priv(dev);
 971			}
 972		}
 973
 974		if (t) {
 975			err = 0;
 976			ip_tunnel_update(itn, t, dev, p, true, 0);
 977		} else {
 978			err = -ENOENT;
 979		}
 980		break;
 981
 982	case SIOCDELTUNNEL:
 983		err = -EPERM;
 984		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 985			goto done;
 986
 987		if (dev == itn->fb_tunnel_dev) {
 988			err = -ENOENT;
 989			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
 990			if (!t)
 991				goto done;
 992			err = -EPERM;
 993			if (t == netdev_priv(itn->fb_tunnel_dev))
 994				goto done;
 995			dev = t->dev;
 996		}
 997		unregister_netdevice(dev);
 998		err = 0;
 999		break;
1000
1001	default:
1002		err = -EINVAL;
1003	}
1004
1005done:
1006	return err;
1007}
1008EXPORT_SYMBOL_GPL(ip_tunnel_ctl);
1009
1010bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp,
1011			      const void __user *data)
1012{
1013	struct ip_tunnel_parm p;
1014
1015	if (copy_from_user(&p, data, sizeof(p)))
1016		return false;
1017
1018	strscpy(kp->name, p.name);
1019	kp->link = p.link;
1020	ip_tunnel_flags_from_be16(kp->i_flags, p.i_flags);
1021	ip_tunnel_flags_from_be16(kp->o_flags, p.o_flags);
1022	kp->i_key = p.i_key;
1023	kp->o_key = p.o_key;
1024	memcpy(&kp->iph, &p.iph, min(sizeof(kp->iph), sizeof(p.iph)));
1025
1026	return true;
1027}
1028EXPORT_SYMBOL_GPL(ip_tunnel_parm_from_user);
1029
1030bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp)
1031{
1032	struct ip_tunnel_parm p;
1033
1034	if (!ip_tunnel_flags_is_be16_compat(kp->i_flags) ||
1035	    !ip_tunnel_flags_is_be16_compat(kp->o_flags))
1036		return false;
1037
1038	memset(&p, 0, sizeof(p));
1039
1040	strscpy(p.name, kp->name);
1041	p.link = kp->link;
1042	p.i_flags = ip_tunnel_flags_to_be16(kp->i_flags);
1043	p.o_flags = ip_tunnel_flags_to_be16(kp->o_flags);
1044	p.i_key = kp->i_key;
1045	p.o_key = kp->o_key;
1046	memcpy(&p.iph, &kp->iph, min(sizeof(p.iph), sizeof(kp->iph)));
1047
1048	return !copy_to_user(data, &p, sizeof(p));
1049}
1050EXPORT_SYMBOL_GPL(ip_tunnel_parm_to_user);
1051
1052int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
1053			     void __user *data, int cmd)
1054{
1055	struct ip_tunnel_parm_kern p;
1056	int err;
1057
1058	if (!ip_tunnel_parm_from_user(&p, data))
1059		return -EFAULT;
1060	err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd);
1061	if (!err && !ip_tunnel_parm_to_user(data, &p))
1062		return -EFAULT;
1063	return err;
1064}
1065EXPORT_SYMBOL_GPL(ip_tunnel_siocdevprivate);
1066
1067int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1068{
1069	struct ip_tunnel *tunnel = netdev_priv(dev);
1070	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
1071	int max_mtu = IP_MAX_MTU - t_hlen;
1072
1073	if (dev->type == ARPHRD_ETHER)
1074		max_mtu -= dev->hard_header_len;
1075
1076	if (new_mtu < ETH_MIN_MTU)
 
1077		return -EINVAL;
1078
1079	if (new_mtu > max_mtu) {
1080		if (strict)
1081			return -EINVAL;
1082
1083		new_mtu = max_mtu;
1084	}
1085
1086	WRITE_ONCE(dev->mtu, new_mtu);
1087	return 0;
1088}
1089EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
1090
1091int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1092{
1093	return __ip_tunnel_change_mtu(dev, new_mtu, true);
1094}
1095EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
1096
1097static void ip_tunnel_dev_free(struct net_device *dev)
1098{
1099	struct ip_tunnel *tunnel = netdev_priv(dev);
1100
1101	gro_cells_destroy(&tunnel->gro_cells);
1102	dst_cache_destroy(&tunnel->dst_cache);
 
 
1103}
1104
1105void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
1106{
1107	struct ip_tunnel *tunnel = netdev_priv(dev);
1108	struct ip_tunnel_net *itn;
1109
1110	itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
1111
1112	if (itn->fb_tunnel_dev != dev) {
1113		ip_tunnel_del(itn, netdev_priv(dev));
1114		unregister_netdevice_queue(dev, head);
1115	}
1116}
1117EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
1118
1119struct net *ip_tunnel_get_link_net(const struct net_device *dev)
1120{
1121	struct ip_tunnel *tunnel = netdev_priv(dev);
1122
1123	return READ_ONCE(tunnel->net);
1124}
1125EXPORT_SYMBOL(ip_tunnel_get_link_net);
1126
1127int ip_tunnel_get_iflink(const struct net_device *dev)
1128{
1129	const struct ip_tunnel *tunnel = netdev_priv(dev);
1130
1131	return READ_ONCE(tunnel->parms.link);
1132}
1133EXPORT_SYMBOL(ip_tunnel_get_iflink);
1134
1135int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
1136				  struct rtnl_link_ops *ops, char *devname)
1137{
1138	struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
1139	struct ip_tunnel_parm_kern parms;
1140	unsigned int i;
1141
1142	itn->rtnl_link_ops = ops;
1143	for (i = 0; i < IP_TNL_HASH_SIZE; i++)
1144		INIT_HLIST_HEAD(&itn->tunnels[i]);
1145
1146	if (!ops || !net_has_fallback_tunnels(net)) {
1147		struct ip_tunnel_net *it_init_net;
1148
1149		it_init_net = net_generic(&init_net, ip_tnl_net_id);
1150		itn->type = it_init_net->type;
1151		itn->fb_tunnel_dev = NULL;
1152		return 0;
1153	}
1154
1155	memset(&parms, 0, sizeof(parms));
1156	if (devname)
1157		strscpy(parms.name, devname, IFNAMSIZ);
1158
1159	rtnl_lock();
1160	itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
1161	/* FB netdevice is special: we have one, and only one per netns.
1162	 * Allowing to move it to another netns is clearly unsafe.
1163	 */
1164	if (!IS_ERR(itn->fb_tunnel_dev)) {
1165		itn->fb_tunnel_dev->netns_local = true;
1166		itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
1167		ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
1168		itn->type = itn->fb_tunnel_dev->type;
1169	}
1170	rtnl_unlock();
1171
1172	return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
1173}
1174EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
1175
1176static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
1177			      struct list_head *head,
1178			      struct rtnl_link_ops *ops)
1179{
 
1180	struct net_device *dev, *aux;
1181	int h;
1182
1183	for_each_netdev_safe(net, dev, aux)
1184		if (dev->rtnl_link_ops == ops)
1185			unregister_netdevice_queue(dev, head);
1186
1187	for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
1188		struct ip_tunnel *t;
1189		struct hlist_node *n;
1190		struct hlist_head *thead = &itn->tunnels[h];
1191
1192		hlist_for_each_entry_safe(t, n, thead, hash_node)
1193			/* If dev is in the same netns, it has already
1194			 * been added to the list by the previous loop.
1195			 */
1196			if (!net_eq(dev_net(t->dev), net))
1197				unregister_netdevice_queue(t->dev, head);
1198	}
1199}
1200
1201void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
1202			   struct rtnl_link_ops *ops,
1203			   struct list_head *dev_to_kill)
1204{
1205	struct ip_tunnel_net *itn;
1206	struct net *net;
1207
1208	ASSERT_RTNL();
1209	list_for_each_entry(net, net_list, exit_list) {
1210		itn = net_generic(net, id);
1211		ip_tunnel_destroy(net, itn, dev_to_kill, ops);
1212	}
1213}
1214EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
1215
1216int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1217		      struct ip_tunnel_parm_kern *p, __u32 fwmark)
1218{
1219	struct ip_tunnel *nt;
1220	struct net *net = dev_net(dev);
1221	struct ip_tunnel_net *itn;
1222	int mtu;
1223	int err;
1224
1225	nt = netdev_priv(dev);
1226	itn = net_generic(net, nt->ip_tnl_net_id);
1227
1228	if (nt->collect_md) {
1229		if (rtnl_dereference(itn->collect_md_tun))
1230			return -EEXIST;
1231	} else {
1232		if (ip_tunnel_find(itn, p, dev->type))
1233			return -EEXIST;
1234	}
1235
1236	nt->net = net;
1237	nt->parms = *p;
1238	nt->fwmark = fwmark;
1239	err = register_netdevice(dev);
1240	if (err)
1241		goto err_register_netdevice;
1242
1243	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1244		eth_hw_addr_random(dev);
1245
1246	mtu = ip_tunnel_bind_dev(dev);
1247	if (tb[IFLA_MTU]) {
1248		unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
1249
1250		if (dev->type == ARPHRD_ETHER)
1251			max -= dev->hard_header_len;
1252
1253		mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
1254	}
1255
1256	err = dev_set_mtu(dev, mtu);
1257	if (err)
1258		goto err_dev_set_mtu;
1259
1260	ip_tunnel_add(itn, nt);
1261	return 0;
1262
1263err_dev_set_mtu:
1264	unregister_netdevice(dev);
1265err_register_netdevice:
1266	return err;
1267}
1268EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
1269
1270int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
1271			 struct ip_tunnel_parm_kern *p, __u32 fwmark)
1272{
1273	struct ip_tunnel *t;
1274	struct ip_tunnel *tunnel = netdev_priv(dev);
1275	struct net *net = tunnel->net;
1276	struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
1277
1278	if (dev == itn->fb_tunnel_dev)
1279		return -EINVAL;
1280
1281	t = ip_tunnel_find(itn, p, dev->type);
1282
1283	if (t) {
1284		if (t->dev != dev)
1285			return -EEXIST;
1286	} else {
1287		t = tunnel;
1288
1289		if (dev->type != ARPHRD_ETHER) {
1290			unsigned int nflags = 0;
1291
1292			if (ipv4_is_multicast(p->iph.daddr))
1293				nflags = IFF_BROADCAST;
1294			else if (p->iph.daddr)
1295				nflags = IFF_POINTOPOINT;
1296
1297			if ((dev->flags ^ nflags) &
1298			    (IFF_POINTOPOINT | IFF_BROADCAST))
1299				return -EINVAL;
1300		}
1301	}
1302
1303	ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
1304	return 0;
1305}
1306EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1307
1308int ip_tunnel_init(struct net_device *dev)
1309{
1310	struct ip_tunnel *tunnel = netdev_priv(dev);
1311	struct iphdr *iph = &tunnel->parms.iph;
1312	int err;
1313
1314	dev->needs_free_netdev = true;
1315	dev->priv_destructor = ip_tunnel_dev_free;
1316	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1317
1318	err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1319	if (err)
1320		return err;
 
 
 
1321
1322	err = gro_cells_init(&tunnel->gro_cells, dev);
1323	if (err) {
1324		dst_cache_destroy(&tunnel->dst_cache);
 
1325		return err;
1326	}
1327
1328	tunnel->dev = dev;
1329	tunnel->net = dev_net(dev);
1330	strscpy(tunnel->parms.name, dev->name);
1331	iph->version		= 4;
1332	iph->ihl		= 5;
1333
1334	if (tunnel->collect_md)
1335		netif_keep_dst(dev);
1336	netdev_lockdep_set_classes(dev);
1337	return 0;
1338}
1339EXPORT_SYMBOL_GPL(ip_tunnel_init);
1340
1341void ip_tunnel_uninit(struct net_device *dev)
1342{
1343	struct ip_tunnel *tunnel = netdev_priv(dev);
1344	struct net *net = tunnel->net;
1345	struct ip_tunnel_net *itn;
1346
1347	itn = net_generic(net, tunnel->ip_tnl_net_id);
1348	ip_tunnel_del(itn, netdev_priv(dev));
1349	if (itn->fb_tunnel_dev == dev)
1350		WRITE_ONCE(itn->fb_tunnel_dev, NULL);
1351
1352	dst_cache_reset(&tunnel->dst_cache);
1353}
1354EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1355
1356/* Do least required initialization, rest of init is done in tunnel_init call */
1357void ip_tunnel_setup(struct net_device *dev, unsigned int net_id)
1358{
1359	struct ip_tunnel *tunnel = netdev_priv(dev);
1360	tunnel->ip_tnl_net_id = net_id;
1361}
1362EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1363
1364MODULE_DESCRIPTION("IPv4 tunnel implementation library");
1365MODULE_LICENSE("GPL");
v3.15
 
   1/*
   2 * Copyright (c) 2013 Nicira, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of version 2 of the GNU General Public
   6 * License as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16 * 02110-1301, USA
  17 */
  18
  19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20
  21#include <linux/capability.h>
  22#include <linux/module.h>
  23#include <linux/types.h>
  24#include <linux/kernel.h>
  25#include <linux/slab.h>
  26#include <linux/uaccess.h>
  27#include <linux/skbuff.h>
  28#include <linux/netdevice.h>
  29#include <linux/in.h>
  30#include <linux/tcp.h>
  31#include <linux/udp.h>
  32#include <linux/if_arp.h>
  33#include <linux/mroute.h>
  34#include <linux/init.h>
  35#include <linux/in6.h>
  36#include <linux/inetdevice.h>
  37#include <linux/igmp.h>
  38#include <linux/netfilter_ipv4.h>
  39#include <linux/etherdevice.h>
  40#include <linux/if_ether.h>
  41#include <linux/if_vlan.h>
  42#include <linux/rculist.h>
  43#include <linux/err.h>
  44
  45#include <net/sock.h>
  46#include <net/ip.h>
  47#include <net/icmp.h>
  48#include <net/protocol.h>
  49#include <net/ip_tunnels.h>
  50#include <net/arp.h>
  51#include <net/checksum.h>
  52#include <net/dsfield.h>
  53#include <net/inet_ecn.h>
  54#include <net/xfrm.h>
  55#include <net/net_namespace.h>
  56#include <net/netns/generic.h>
  57#include <net/rtnetlink.h>
 
 
 
  58
  59#if IS_ENABLED(CONFIG_IPV6)
  60#include <net/ipv6.h>
  61#include <net/ip6_fib.h>
  62#include <net/ip6_route.h>
  63#endif
  64
  65static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
  66{
  67	return hash_32((__force u32)key ^ (__force u32)remote,
  68			 IP_TNL_HASH_BITS);
  69}
  70
  71static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
  72			     struct dst_entry *dst)
  73{
  74	struct dst_entry *old_dst;
 
  75
  76	if (dst) {
  77		if (dst->flags & DST_NOCACHE)
  78			dst = NULL;
  79		else
  80			dst_clone(dst);
  81	}
  82	old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
  83	dst_release(old_dst);
  84}
  85
  86static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
  87{
  88	__tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
  89}
  90
  91static void tunnel_dst_reset(struct ip_tunnel *t)
  92{
  93	tunnel_dst_set(t, NULL);
  94}
  95
  96void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
  97{
  98	int i;
  99
 100	for_each_possible_cpu(i)
 101		__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
 102}
 103EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
 104
 105static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
 106{
 107	struct dst_entry *dst;
 108
 109	rcu_read_lock();
 110	dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
 111	if (dst) {
 112		if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 113			rcu_read_unlock();
 114			tunnel_dst_reset(t);
 115			return NULL;
 116		}
 117		dst_hold(dst);
 118	}
 119	rcu_read_unlock();
 120	return (struct rtable *)dst;
 121}
 122
 123static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
 124				__be16 flags, __be32 key)
 125{
 126	if (p->i_flags & TUNNEL_KEY) {
 127		if (flags & TUNNEL_KEY)
 128			return key == p->i_key;
 129		else
 130			/* key expected, none present */
 131			return false;
 132	} else
 133		return !(flags & TUNNEL_KEY);
 134}
 135
 136/* Fallback tunnel: no source, no destination, no key, no options
 137
 138   Tunnel hash table:
 139   We require exact key match i.e. if a key is present in packet
 140   it will match only tunnel with the same key; if it is not present,
 141   it will match only keyless tunnel.
 142
 143   All keysless packets, if not matched configured keyless tunnels
 144   will match fallback tunnel.
 145   Given src, dst and key, find appropriate for input tunnel.
 146*/
 147struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 148				   int link, __be16 flags,
 149				   __be32 remote, __be32 local,
 150				   __be32 key)
 151{
 152	unsigned int hash;
 153	struct ip_tunnel *t, *cand = NULL;
 154	struct hlist_head *head;
 
 
 155
 156	hash = ip_tunnel_hash(key, remote);
 157	head = &itn->tunnels[hash];
 158
 159	hlist_for_each_entry_rcu(t, head, hash_node) {
 160		if (local != t->parms.iph.saddr ||
 161		    remote != t->parms.iph.daddr ||
 162		    !(t->dev->flags & IFF_UP))
 163			continue;
 164
 165		if (!ip_tunnel_key_match(&t->parms, flags, key))
 166			continue;
 167
 168		if (t->parms.link == link)
 169			return t;
 170		else
 171			cand = t;
 172	}
 173
 174	hlist_for_each_entry_rcu(t, head, hash_node) {
 175		if (remote != t->parms.iph.daddr ||
 
 176		    !(t->dev->flags & IFF_UP))
 177			continue;
 178
 179		if (!ip_tunnel_key_match(&t->parms, flags, key))
 180			continue;
 181
 182		if (t->parms.link == link)
 183			return t;
 184		else if (!cand)
 185			cand = t;
 186	}
 187
 188	hash = ip_tunnel_hash(key, 0);
 189	head = &itn->tunnels[hash];
 190
 191	hlist_for_each_entry_rcu(t, head, hash_node) {
 192		if ((local != t->parms.iph.saddr &&
 193		     (local != t->parms.iph.daddr ||
 194		      !ipv4_is_multicast(local))) ||
 195		    !(t->dev->flags & IFF_UP))
 
 196			continue;
 197
 198		if (!ip_tunnel_key_match(&t->parms, flags, key))
 199			continue;
 200
 201		if (t->parms.link == link)
 202			return t;
 203		else if (!cand)
 204			cand = t;
 205	}
 206
 207	if (flags & TUNNEL_NO_KEY)
 208		goto skip_key_lookup;
 209
 210	hlist_for_each_entry_rcu(t, head, hash_node) {
 211		if (t->parms.i_key != key ||
 
 
 
 212		    !(t->dev->flags & IFF_UP))
 213			continue;
 214
 215		if (t->parms.link == link)
 216			return t;
 217		else if (!cand)
 218			cand = t;
 219	}
 220
 221skip_key_lookup:
 222	if (cand)
 223		return cand;
 224
 225	if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
 226		return netdev_priv(itn->fb_tunnel_dev);
 227
 
 
 
 
 228
 229	return NULL;
 230}
 231EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
 232
 233static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
 234				    struct ip_tunnel_parm *parms)
 235{
 236	unsigned int h;
 237	__be32 remote;
 238	__be32 i_key = parms->i_key;
 239
 240	if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
 241		remote = parms->iph.daddr;
 242	else
 243		remote = 0;
 244
 245	if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
 
 246		i_key = 0;
 247
 248	h = ip_tunnel_hash(i_key, remote);
 249	return &itn->tunnels[h];
 250}
 251
 252static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
 253{
 254	struct hlist_head *head = ip_bucket(itn, &t->parms);
 255
 
 
 256	hlist_add_head_rcu(&t->hash_node, head);
 257}
 258
 259static void ip_tunnel_del(struct ip_tunnel *t)
 260{
 
 
 261	hlist_del_init_rcu(&t->hash_node);
 262}
 263
 264static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
 265					struct ip_tunnel_parm *parms,
 266					int type)
 267{
 268	__be32 remote = parms->iph.daddr;
 269	__be32 local = parms->iph.saddr;
 
 270	__be32 key = parms->i_key;
 271	int link = parms->link;
 272	struct ip_tunnel *t = NULL;
 273	struct hlist_head *head = ip_bucket(itn, parms);
 274
 275	hlist_for_each_entry_rcu(t, head, hash_node) {
 
 
 276		if (local == t->parms.iph.saddr &&
 277		    remote == t->parms.iph.daddr &&
 278		    key == t->parms.i_key &&
 279		    link == t->parms.link &&
 280		    type == t->dev->type)
 281			break;
 282	}
 283	return t;
 284}
 285
 286static struct net_device *__ip_tunnel_create(struct net *net,
 287					     const struct rtnl_link_ops *ops,
 288					     struct ip_tunnel_parm *parms)
 289{
 290	int err;
 291	struct ip_tunnel *tunnel;
 292	struct net_device *dev;
 293	char name[IFNAMSIZ];
 294
 295	if (parms->name[0])
 296		strlcpy(name, parms->name, IFNAMSIZ);
 297	else {
 298		if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
 299			err = -E2BIG;
 300			goto failed;
 301		}
 302		strlcpy(name, ops->kind, IFNAMSIZ);
 303		strncat(name, "%d", 2);
 
 
 
 304	}
 305
 306	ASSERT_RTNL();
 307	dev = alloc_netdev(ops->priv_size, name, ops->setup);
 308	if (!dev) {
 309		err = -ENOMEM;
 310		goto failed;
 311	}
 312	dev_net_set(dev, net);
 313
 314	dev->rtnl_link_ops = ops;
 315
 316	tunnel = netdev_priv(dev);
 317	tunnel->parms = *parms;
 318	tunnel->net = net;
 319
 320	err = register_netdevice(dev);
 321	if (err)
 322		goto failed_free;
 323
 324	return dev;
 325
 326failed_free:
 327	free_netdev(dev);
 328failed:
 329	return ERR_PTR(err);
 330}
 331
 332static inline void init_tunnel_flow(struct flowi4 *fl4,
 333				    int proto,
 334				    __be32 daddr, __be32 saddr,
 335				    __be32 key, __u8 tos, int oif)
 336{
 337	memset(fl4, 0, sizeof(*fl4));
 338	fl4->flowi4_oif = oif;
 339	fl4->daddr = daddr;
 340	fl4->saddr = saddr;
 341	fl4->flowi4_tos = tos;
 342	fl4->flowi4_proto = proto;
 343	fl4->fl4_gre_key = key;
 344}
 345
 346static int ip_tunnel_bind_dev(struct net_device *dev)
 347{
 348	struct net_device *tdev = NULL;
 349	struct ip_tunnel *tunnel = netdev_priv(dev);
 350	const struct iphdr *iph;
 351	int hlen = LL_MAX_HEADER;
 352	int mtu = ETH_DATA_LEN;
 353	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 354
 355	iph = &tunnel->parms.iph;
 356
 357	/* Guess output device to choose reasonable mtu and needed_headroom */
 358	if (iph->daddr) {
 359		struct flowi4 fl4;
 360		struct rtable *rt;
 361
 362		init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
 363				 iph->saddr, tunnel->parms.o_key,
 364				 RT_TOS(iph->tos), tunnel->parms.link);
 
 365		rt = ip_route_output_key(tunnel->net, &fl4);
 366
 367		if (!IS_ERR(rt)) {
 368			tdev = rt->dst.dev;
 369			tunnel_dst_set(tunnel, &rt->dst);
 370			ip_rt_put(rt);
 371		}
 372		if (dev->type != ARPHRD_ETHER)
 373			dev->flags |= IFF_POINTOPOINT;
 
 
 374	}
 375
 376	if (!tdev && tunnel->parms.link)
 377		tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
 378
 379	if (tdev) {
 380		hlen = tdev->hard_header_len + tdev->needed_headroom;
 381		mtu = tdev->mtu;
 382	}
 383	dev->iflink = tunnel->parms.link;
 384
 385	dev->needed_headroom = t_hlen + hlen;
 386	mtu -= (dev->hard_header_len + t_hlen);
 387
 388	if (mtu < 68)
 389		mtu = 68;
 390
 391	return mtu;
 392}
 393
 394static struct ip_tunnel *ip_tunnel_create(struct net *net,
 395					  struct ip_tunnel_net *itn,
 396					  struct ip_tunnel_parm *parms)
 397{
 398	struct ip_tunnel *nt, *fbt;
 399	struct net_device *dev;
 
 
 
 400
 401	BUG_ON(!itn->fb_tunnel_dev);
 402	fbt = netdev_priv(itn->fb_tunnel_dev);
 403	dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
 404	if (IS_ERR(dev))
 405		return ERR_CAST(dev);
 406
 407	dev->mtu = ip_tunnel_bind_dev(dev);
 
 
 
 408
 409	nt = netdev_priv(dev);
 
 
 
 
 
 
 410	ip_tunnel_add(itn, nt);
 411	return nt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 412}
 
 413
 414int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
 415		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
 
 416{
 417	struct pcpu_sw_netstats *tstats;
 418	const struct iphdr *iph = ip_hdr(skb);
 419	int err;
 420
 421#ifdef CONFIG_NET_IPGRE_BROADCAST
 422	if (ipv4_is_multicast(iph->daddr)) {
 423		tunnel->dev->stats.multicast++;
 424		skb->pkt_type = PACKET_BROADCAST;
 425	}
 426#endif
 427
 428	if ((!(tpi->flags&TUNNEL_CSUM) &&  (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
 429	     ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
 430		tunnel->dev->stats.rx_crc_errors++;
 431		tunnel->dev->stats.rx_errors++;
 432		goto drop;
 433	}
 434
 435	if (tunnel->parms.i_flags&TUNNEL_SEQ) {
 436		if (!(tpi->flags&TUNNEL_SEQ) ||
 437		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
 438			tunnel->dev->stats.rx_fifo_errors++;
 439			tunnel->dev->stats.rx_errors++;
 440			goto drop;
 441		}
 442		tunnel->i_seqno = ntohl(tpi->seq) + 1;
 443	}
 444
 445	skb_reset_network_header(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447	err = IP_ECN_decapsulate(iph, skb);
 448	if (unlikely(err)) {
 449		if (log_ecn_error)
 450			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
 451					&iph->saddr, iph->tos);
 452		if (err > 1) {
 453			++tunnel->dev->stats.rx_frame_errors;
 454			++tunnel->dev->stats.rx_errors;
 455			goto drop;
 456		}
 457	}
 458
 459	tstats = this_cpu_ptr(tunnel->dev->tstats);
 460	u64_stats_update_begin(&tstats->syncp);
 461	tstats->rx_packets++;
 462	tstats->rx_bytes += skb->len;
 463	u64_stats_update_end(&tstats->syncp);
 464
 465	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
 466
 467	if (tunnel->dev->type == ARPHRD_ETHER) {
 468		skb->protocol = eth_type_trans(skb, tunnel->dev);
 469		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 470	} else {
 471		skb->dev = tunnel->dev;
 472	}
 473
 
 
 
 474	gro_cells_receive(&tunnel->gro_cells, skb);
 475	return 0;
 476
 477drop:
 
 
 478	kfree_skb(skb);
 479	return 0;
 480}
 481EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
 482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 483static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
 484			    struct rtable *rt, __be16 df)
 
 
 485{
 486	struct ip_tunnel *tunnel = netdev_priv(dev);
 487	int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
 488	int mtu;
 489
 490	if (df)
 491		mtu = dst_mtu(&rt->dst) - dev->hard_header_len
 492					- sizeof(struct iphdr) - tunnel->hlen;
 493	else
 494		mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
 
 
 
 
 495
 496	if (skb_dst(skb))
 497		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 498
 499	if (skb->protocol == htons(ETH_P_IP)) {
 500		if (!skb_is_gso(skb) &&
 501		    (df & htons(IP_DF)) && mtu < pkt_size) {
 502			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 503			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
 504			return -E2BIG;
 505		}
 506	}
 507#if IS_ENABLED(CONFIG_IPV6)
 508	else if (skb->protocol == htons(ETH_P_IPV6)) {
 509		struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 
 
 
 
 
 510
 511		if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
 512			   mtu >= IPV6_MIN_MTU) {
 513			if ((tunnel->parms.iph.daddr &&
 514			    !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
 515			    rt6->rt6i_dst.plen == 128) {
 516				rt6->rt6i_flags |= RTF_MODIFIED;
 517				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
 518			}
 519		}
 520
 521		if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
 522					mtu < pkt_size) {
 523			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 524			return -E2BIG;
 525		}
 526	}
 527#endif
 528	return 0;
 529}
 530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 532		    const struct iphdr *tnl_params, const u8 protocol)
 533{
 534	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 535	const struct iphdr *inner_iph;
 
 
 
 
 536	struct flowi4 fl4;
 537	u8     tos, ttl;
 
 
 
 538	__be16 df;
 539	struct rtable *rt;		/* Route to the other host */
 540	unsigned int max_headroom;	/* The extra header space needed */
 541	__be32 dst;
 542	int err;
 543	bool connected;
 544
 545	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 546	connected = (tunnel->parms.iph.daddr != 0);
 
 
 
 547
 548	dst = tnl_params->daddr;
 549	if (dst == 0) {
 550		/* NBMA tunnel */
 551
 552		if (skb_dst(skb) == NULL) {
 553			dev->stats.tx_fifo_errors++;
 554			goto tx_error;
 555		}
 556
 557		if (skb->protocol == htons(ETH_P_IP)) {
 
 
 
 
 
 
 
 558			rt = skb_rtable(skb);
 559			dst = rt_nexthop(rt, inner_iph->daddr);
 560		}
 561#if IS_ENABLED(CONFIG_IPV6)
 562		else if (skb->protocol == htons(ETH_P_IPV6)) {
 563			const struct in6_addr *addr6;
 564			struct neighbour *neigh;
 565			bool do_tx_error_icmp;
 566			int addr_type;
 567
 568			neigh = dst_neigh_lookup(skb_dst(skb),
 569						 &ipv6_hdr(skb)->daddr);
 570			if (neigh == NULL)
 571				goto tx_error;
 572
 573			addr6 = (const struct in6_addr *)&neigh->primary_key;
 574			addr_type = ipv6_addr_type(addr6);
 575
 576			if (addr_type == IPV6_ADDR_ANY) {
 577				addr6 = &ipv6_hdr(skb)->daddr;
 578				addr_type = ipv6_addr_type(addr6);
 579			}
 580
 581			if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
 582				do_tx_error_icmp = true;
 583			else {
 584				do_tx_error_icmp = false;
 585				dst = addr6->s6_addr32[3];
 586			}
 587			neigh_release(neigh);
 588			if (do_tx_error_icmp)
 589				goto tx_error_icmp;
 590		}
 591#endif
 592		else
 593			goto tx_error;
 594
 595		connected = false;
 
 596	}
 597
 598	tos = tnl_params->tos;
 599	if (tos & 0x1) {
 600		tos &= ~0x1;
 601		if (skb->protocol == htons(ETH_P_IP)) {
 602			tos = inner_iph->tos;
 603			connected = false;
 604		} else if (skb->protocol == htons(ETH_P_IPV6)) {
 605			tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
 606			connected = false;
 607		}
 608	}
 609
 610	init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
 611			 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
 
 
 612
 613	rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
 
 
 
 
 
 
 
 
 
 
 
 614
 615	if (!rt) {
 616		rt = ip_route_output_key(tunnel->net, &fl4);
 617
 618		if (IS_ERR(rt)) {
 619			dev->stats.tx_carrier_errors++;
 620			goto tx_error;
 621		}
 622		if (connected)
 623			tunnel_dst_set(tunnel, &rt->dst);
 
 
 
 
 624	}
 625
 626	if (rt->dst.dev == dev) {
 627		ip_rt_put(rt);
 628		dev->stats.collisions++;
 629		goto tx_error;
 630	}
 631
 632	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
 
 
 
 
 633		ip_rt_put(rt);
 634		goto tx_error;
 635	}
 636
 637	if (tunnel->err_count > 0) {
 638		if (time_before(jiffies,
 639				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
 640			tunnel->err_count--;
 641
 642			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 643			dst_link_failure(skb);
 644		} else
 645			tunnel->err_count = 0;
 646	}
 647
 648	tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
 649	ttl = tnl_params->ttl;
 650	if (ttl == 0) {
 651		if (skb->protocol == htons(ETH_P_IP))
 652			ttl = inner_iph->ttl;
 653#if IS_ENABLED(CONFIG_IPV6)
 654		else if (skb->protocol == htons(ETH_P_IPV6))
 655			ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
 656#endif
 657		else
 658			ttl = ip4_dst_hoplimit(&rt->dst);
 659	}
 660
 661	df = tnl_params->frag_off;
 662	if (skb->protocol == htons(ETH_P_IP))
 663		df |= (inner_iph->frag_off&htons(IP_DF));
 664
 665	max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
 666			+ rt->dst.header_len;
 667	if (max_headroom > dev->needed_headroom)
 668		dev->needed_headroom = max_headroom;
 669
 670	if (skb_cow_head(skb, dev->needed_headroom)) {
 671		dev->stats.tx_dropped++;
 
 672		kfree_skb(skb);
 673		return;
 674	}
 675
 676	err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol,
 677			    tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
 678	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 679
 
 
 680	return;
 681
 682#if IS_ENABLED(CONFIG_IPV6)
 683tx_error_icmp:
 684	dst_link_failure(skb);
 685#endif
 686tx_error:
 687	dev->stats.tx_errors++;
 688	kfree_skb(skb);
 689}
 690EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
 691
 692static void ip_tunnel_update(struct ip_tunnel_net *itn,
 693			     struct ip_tunnel *t,
 694			     struct net_device *dev,
 695			     struct ip_tunnel_parm *p,
 696			     bool set_mtu)
 
 697{
 698	ip_tunnel_del(t);
 699	t->parms.iph.saddr = p->iph.saddr;
 700	t->parms.iph.daddr = p->iph.daddr;
 701	t->parms.i_key = p->i_key;
 702	t->parms.o_key = p->o_key;
 703	if (dev->type != ARPHRD_ETHER) {
 704		memcpy(dev->dev_addr, &p->iph.saddr, 4);
 705		memcpy(dev->broadcast, &p->iph.daddr, 4);
 706	}
 707	ip_tunnel_add(itn, t);
 708
 709	t->parms.iph.ttl = p->iph.ttl;
 710	t->parms.iph.tos = p->iph.tos;
 711	t->parms.iph.frag_off = p->iph.frag_off;
 712
 713	if (t->parms.link != p->link) {
 714		int mtu;
 715
 716		t->parms.link = p->link;
 
 717		mtu = ip_tunnel_bind_dev(dev);
 718		if (set_mtu)
 719			dev->mtu = mtu;
 720	}
 721	ip_tunnel_dst_reset_all(t);
 722	netdev_state_change(dev);
 723}
 724
 725int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
 
 726{
 727	int err = 0;
 728	struct ip_tunnel *t = netdev_priv(dev);
 729	struct net *net = t->net;
 730	struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
 731
 732	BUG_ON(!itn->fb_tunnel_dev);
 733	switch (cmd) {
 734	case SIOCGETTUNNEL:
 735		if (dev == itn->fb_tunnel_dev) {
 736			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
 737			if (t == NULL)
 738				t = netdev_priv(dev);
 739		}
 740		memcpy(p, &t->parms, sizeof(*p));
 741		break;
 742
 743	case SIOCADDTUNNEL:
 744	case SIOCCHGTUNNEL:
 745		err = -EPERM;
 746		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 747			goto done;
 748		if (p->iph.ttl)
 749			p->iph.frag_off |= htons(IP_DF);
 750		if (!(p->i_flags&TUNNEL_KEY))
 751			p->i_key = 0;
 752		if (!(p->o_flags&TUNNEL_KEY))
 753			p->o_key = 0;
 754
 755		t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
 756
 757		if (!t && (cmd == SIOCADDTUNNEL)) {
 758			t = ip_tunnel_create(net, itn, p);
 759			if (IS_ERR(t)) {
 760				err = PTR_ERR(t);
 
 
 761				break;
 762			}
 
 
 
 763		}
 764		if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
 765			if (t != NULL) {
 766				if (t->dev != dev) {
 767					err = -EEXIST;
 768					break;
 769				}
 770			} else {
 771				unsigned int nflags = 0;
 772
 773				if (ipv4_is_multicast(p->iph.daddr))
 774					nflags = IFF_BROADCAST;
 775				else if (p->iph.daddr)
 776					nflags = IFF_POINTOPOINT;
 777
 778				if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
 779					err = -EINVAL;
 780					break;
 781				}
 782
 783				t = netdev_priv(dev);
 784			}
 785		}
 786
 787		if (t) {
 788			err = 0;
 789			ip_tunnel_update(itn, t, dev, p, true);
 790		} else {
 791			err = -ENOENT;
 792		}
 793		break;
 794
 795	case SIOCDELTUNNEL:
 796		err = -EPERM;
 797		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 798			goto done;
 799
 800		if (dev == itn->fb_tunnel_dev) {
 801			err = -ENOENT;
 802			t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
 803			if (t == NULL)
 804				goto done;
 805			err = -EPERM;
 806			if (t == netdev_priv(itn->fb_tunnel_dev))
 807				goto done;
 808			dev = t->dev;
 809		}
 810		unregister_netdevice(dev);
 811		err = 0;
 812		break;
 813
 814	default:
 815		err = -EINVAL;
 816	}
 817
 818done:
 819	return err;
 820}
 821EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822
 823int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
 824{
 825	struct ip_tunnel *tunnel = netdev_priv(dev);
 826	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
 
 
 
 827
 828	if (new_mtu < 68 ||
 829	    new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
 830		return -EINVAL;
 831	dev->mtu = new_mtu;
 
 
 
 
 
 
 
 
 832	return 0;
 833}
 
 
 
 
 
 
 834EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
 835
 836static void ip_tunnel_dev_free(struct net_device *dev)
 837{
 838	struct ip_tunnel *tunnel = netdev_priv(dev);
 839
 840	gro_cells_destroy(&tunnel->gro_cells);
 841	free_percpu(tunnel->dst_cache);
 842	free_percpu(dev->tstats);
 843	free_netdev(dev);
 844}
 845
 846void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
 847{
 848	struct ip_tunnel *tunnel = netdev_priv(dev);
 849	struct ip_tunnel_net *itn;
 850
 851	itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
 852
 853	if (itn->fb_tunnel_dev != dev) {
 854		ip_tunnel_del(netdev_priv(dev));
 855		unregister_netdevice_queue(dev, head);
 856	}
 857}
 858EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
 859
 860int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 861				  struct rtnl_link_ops *ops, char *devname)
 862{
 863	struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
 864	struct ip_tunnel_parm parms;
 865	unsigned int i;
 866
 
 867	for (i = 0; i < IP_TNL_HASH_SIZE; i++)
 868		INIT_HLIST_HEAD(&itn->tunnels[i]);
 869
 870	if (!ops) {
 
 
 
 
 871		itn->fb_tunnel_dev = NULL;
 872		return 0;
 873	}
 874
 875	memset(&parms, 0, sizeof(parms));
 876	if (devname)
 877		strlcpy(parms.name, devname, IFNAMSIZ);
 878
 879	rtnl_lock();
 880	itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
 881	/* FB netdevice is special: we have one, and only one per netns.
 882	 * Allowing to move it to another netns is clearly unsafe.
 883	 */
 884	if (!IS_ERR(itn->fb_tunnel_dev)) {
 885		itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
 886		itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
 887		ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
 
 888	}
 889	rtnl_unlock();
 890
 891	return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
 892}
 893EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
 894
 895static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
 
 896			      struct rtnl_link_ops *ops)
 897{
 898	struct net *net = dev_net(itn->fb_tunnel_dev);
 899	struct net_device *dev, *aux;
 900	int h;
 901
 902	for_each_netdev_safe(net, dev, aux)
 903		if (dev->rtnl_link_ops == ops)
 904			unregister_netdevice_queue(dev, head);
 905
 906	for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
 907		struct ip_tunnel *t;
 908		struct hlist_node *n;
 909		struct hlist_head *thead = &itn->tunnels[h];
 910
 911		hlist_for_each_entry_safe(t, n, thead, hash_node)
 912			/* If dev is in the same netns, it has already
 913			 * been added to the list by the previous loop.
 914			 */
 915			if (!net_eq(dev_net(t->dev), net))
 916				unregister_netdevice_queue(t->dev, head);
 917	}
 918}
 919
 920void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
 
 
 921{
 922	LIST_HEAD(list);
 
 923
 924	rtnl_lock();
 925	ip_tunnel_destroy(itn, &list, ops);
 926	unregister_netdevice_many(&list);
 927	rtnl_unlock();
 
 928}
 929EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
 930
 931int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
 932		      struct ip_tunnel_parm *p)
 933{
 934	struct ip_tunnel *nt;
 935	struct net *net = dev_net(dev);
 936	struct ip_tunnel_net *itn;
 937	int mtu;
 938	int err;
 939
 940	nt = netdev_priv(dev);
 941	itn = net_generic(net, nt->ip_tnl_net_id);
 942
 943	if (ip_tunnel_find(itn, p, dev->type))
 944		return -EEXIST;
 
 
 
 
 
 945
 946	nt->net = net;
 947	nt->parms = *p;
 
 948	err = register_netdevice(dev);
 949	if (err)
 950		goto out;
 951
 952	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
 953		eth_hw_addr_random(dev);
 954
 955	mtu = ip_tunnel_bind_dev(dev);
 956	if (!tb[IFLA_MTU])
 957		dev->mtu = mtu;
 
 
 
 
 
 
 
 
 
 
 958
 959	ip_tunnel_add(itn, nt);
 
 960
 961out:
 
 
 962	return err;
 963}
 964EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
 965
 966int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
 967			 struct ip_tunnel_parm *p)
 968{
 969	struct ip_tunnel *t;
 970	struct ip_tunnel *tunnel = netdev_priv(dev);
 971	struct net *net = tunnel->net;
 972	struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
 973
 974	if (dev == itn->fb_tunnel_dev)
 975		return -EINVAL;
 976
 977	t = ip_tunnel_find(itn, p, dev->type);
 978
 979	if (t) {
 980		if (t->dev != dev)
 981			return -EEXIST;
 982	} else {
 983		t = tunnel;
 984
 985		if (dev->type != ARPHRD_ETHER) {
 986			unsigned int nflags = 0;
 987
 988			if (ipv4_is_multicast(p->iph.daddr))
 989				nflags = IFF_BROADCAST;
 990			else if (p->iph.daddr)
 991				nflags = IFF_POINTOPOINT;
 992
 993			if ((dev->flags ^ nflags) &
 994			    (IFF_POINTOPOINT | IFF_BROADCAST))
 995				return -EINVAL;
 996		}
 997	}
 998
 999	ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
1000	return 0;
1001}
1002EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1003
1004int ip_tunnel_init(struct net_device *dev)
1005{
1006	struct ip_tunnel *tunnel = netdev_priv(dev);
1007	struct iphdr *iph = &tunnel->parms.iph;
1008	int err;
1009
1010	dev->destructor	= ip_tunnel_dev_free;
1011	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1012	if (!dev->tstats)
1013		return -ENOMEM;
1014
1015	tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
1016	if (!tunnel->dst_cache) {
1017		free_percpu(dev->tstats);
1018		return -ENOMEM;
1019	}
1020
1021	err = gro_cells_init(&tunnel->gro_cells, dev);
1022	if (err) {
1023		free_percpu(tunnel->dst_cache);
1024		free_percpu(dev->tstats);
1025		return err;
1026	}
1027
1028	tunnel->dev = dev;
1029	tunnel->net = dev_net(dev);
1030	strcpy(tunnel->parms.name, dev->name);
1031	iph->version		= 4;
1032	iph->ihl		= 5;
1033
 
 
 
1034	return 0;
1035}
1036EXPORT_SYMBOL_GPL(ip_tunnel_init);
1037
1038void ip_tunnel_uninit(struct net_device *dev)
1039{
1040	struct ip_tunnel *tunnel = netdev_priv(dev);
1041	struct net *net = tunnel->net;
1042	struct ip_tunnel_net *itn;
1043
1044	itn = net_generic(net, tunnel->ip_tnl_net_id);
1045	/* fb_tunnel_dev will be unregisted in net-exit call. */
1046	if (itn->fb_tunnel_dev != dev)
1047		ip_tunnel_del(netdev_priv(dev));
1048
1049	ip_tunnel_dst_reset_all(tunnel);
1050}
1051EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1052
1053/* Do least required initialization, rest of init is done in tunnel_init call */
1054void ip_tunnel_setup(struct net_device *dev, int net_id)
1055{
1056	struct ip_tunnel *tunnel = netdev_priv(dev);
1057	tunnel->ip_tnl_net_id = net_id;
1058}
1059EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1060
 
1061MODULE_LICENSE("GPL");