Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux NET3:	GRE over IP protocol decoder.
   4 *
   5 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
 
 
 
 
 
 
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/uaccess.h>
  16#include <linux/skbuff.h>
  17#include <linux/netdevice.h>
  18#include <linux/in.h>
  19#include <linux/tcp.h>
  20#include <linux/udp.h>
  21#include <linux/if_arp.h>
  22#include <linux/if_vlan.h>
  23#include <linux/init.h>
  24#include <linux/in6.h>
  25#include <linux/inetdevice.h>
  26#include <linux/igmp.h>
  27#include <linux/netfilter_ipv4.h>
  28#include <linux/etherdevice.h>
  29#include <linux/if_ether.h>
  30
  31#include <net/sock.h>
  32#include <net/ip.h>
  33#include <net/icmp.h>
  34#include <net/protocol.h>
  35#include <net/ip_tunnels.h>
  36#include <net/arp.h>
  37#include <net/checksum.h>
  38#include <net/dsfield.h>
  39#include <net/inet_ecn.h>
  40#include <net/xfrm.h>
  41#include <net/net_namespace.h>
  42#include <net/netns/generic.h>
  43#include <net/rtnetlink.h>
  44#include <net/gre.h>
  45#include <net/dst_metadata.h>
  46#include <net/erspan.h>
  47#include <net/inet_dscp.h>
 
 
 
  48
  49/*
  50   Problems & solutions
  51   --------------------
  52
  53   1. The most important issue is detecting local dead loops.
  54   They would cause complete host lockup in transmit, which
  55   would be "resolved" by stack overflow or, if queueing is enabled,
  56   with infinite looping in net_bh.
  57
  58   We cannot track such dead loops during route installation,
  59   it is infeasible task. The most general solutions would be
  60   to keep skb->encapsulation counter (sort of local ttl),
  61   and silently drop packet when it expires. It is a good
  62   solution, but it supposes maintaining new variable in ALL
  63   skb, even if no tunneling is used.
  64
  65   Current solution: xmit_recursion breaks dead loops. This is a percpu
  66   counter, since when we enter the first ndo_xmit(), cpu migration is
  67   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  68
  69   2. Networking dead loops would not kill routers, but would really
  70   kill network. IP hop limit plays role of "t->recursion" in this case,
  71   if we copy it from packet being encapsulated to upper header.
  72   It is very good solution, but it introduces two problems:
  73
  74   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  75     do not work over tunnels.
  76   - traceroute does not work. I planned to relay ICMP from tunnel,
  77     so that this problem would be solved and traceroute output
  78     would even more informative. This idea appeared to be wrong:
  79     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  80     true router now :-)), all routers (at least, in neighbourhood of mine)
  81     return only 8 bytes of payload. It is the end.
  82
  83   Hence, if we want that OSPF worked or traceroute said something reasonable,
  84   we should search for another solution.
  85
  86   One of them is to parse packet trying to detect inner encapsulation
  87   made by our node. It is difficult or even impossible, especially,
  88   taking into account fragmentation. TO be short, ttl is not solution at all.
  89
  90   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  91   We force DF flag on tunnels with preconfigured hop limit,
  92   that is ALL. :-) Well, it does not remove the problem completely,
  93   but exponential growth of network traffic is changed to linear
  94   (branches, that exceed pmtu are pruned) and tunnel mtu
  95   rapidly degrades to value <68, where looping stops.
  96   Yes, it is not good if there exists a router in the loop,
  97   which does not force DF, even when encapsulating packets have DF set.
  98   But it is not our problem! Nobody could accuse us, we made
  99   all that we could make. Even if it is your gated who injected
 100   fatal route to network, even if it were you who configured
 101   fatal static route: you are innocent. :-)
 102
 
 
 
 
 
 
 
 
 
 103   Alexey Kuznetsov.
 104 */
 105
 106static bool log_ecn_error = true;
 107module_param(log_ecn_error, bool, 0644);
 108MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 109
 110static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 111static const struct header_ops ipgre_header_ops;
 112
 113static int ipgre_tunnel_init(struct net_device *dev);
 114static void erspan_build_header(struct sk_buff *skb,
 115				u32 id, u32 index,
 116				bool truncate, bool is_ipv4);
 117
 118static unsigned int ipgre_net_id __read_mostly;
 119static unsigned int gre_tap_net_id __read_mostly;
 120static unsigned int erspan_net_id __read_mostly;
 121
 122static int ipgre_err(struct sk_buff *skb, u32 info,
 123		     const struct tnl_ptk_info *tpi)
 124{
 125
 126	/* All the routers (except for Linux) return only
 127	   8 bytes of packet payload. It means, that precise relaying of
 128	   ICMP in the real Internet is absolutely infeasible.
 129
 130	   Moreover, Cisco "wise men" put GRE key to the third word
 131	   in GRE header. It makes impossible maintaining even soft
 132	   state for keyed GRE tunnels with enabled checksum. Tell
 133	   them "thank you".
 134
 135	   Well, I wonder, rfc1812 was written by Cisco employee,
 136	   what the hell these idiots break standards established
 137	   by themselves???
 138	   */
 139	struct net *net = dev_net(skb->dev);
 140	struct ip_tunnel_net *itn;
 141	const struct iphdr *iph;
 142	const int type = icmp_hdr(skb)->type;
 143	const int code = icmp_hdr(skb)->code;
 144	unsigned int data_len = 0;
 145	struct ip_tunnel *t;
 146
 147	if (tpi->proto == htons(ETH_P_TEB))
 148		itn = net_generic(net, gre_tap_net_id);
 149	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
 150		 tpi->proto == htons(ETH_P_ERSPAN2))
 151		itn = net_generic(net, erspan_net_id);
 152	else
 153		itn = net_generic(net, ipgre_net_id);
 154
 155	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 156	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 157			     iph->daddr, iph->saddr, tpi->key);
 158
 159	if (!t)
 160		return -ENOENT;
 
 161
 162	switch (type) {
 163	default:
 164	case ICMP_PARAMETERPROB:
 165		return 0;
 166
 167	case ICMP_DEST_UNREACH:
 168		switch (code) {
 169		case ICMP_SR_FAILED:
 170		case ICMP_PORT_UNREACH:
 171			/* Impossible event. */
 172			return 0;
 173		default:
 174			/* All others are translated to HOST_UNREACH.
 175			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 176			   I believe they are just ether pollution. --ANK
 177			 */
 178			break;
 179		}
 180		break;
 181
 182	case ICMP_TIME_EXCEEDED:
 183		if (code != ICMP_EXC_TTL)
 184			return 0;
 185		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
 186		break;
 187
 188	case ICMP_REDIRECT:
 189		break;
 190	}
 
 
 
 
 
 191
 192#if IS_ENABLED(CONFIG_IPV6)
 193	if (tpi->proto == htons(ETH_P_IPV6) &&
 194	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 195					type, data_len))
 196		return 0;
 197#endif
 198
 199	if (t->parms.iph.daddr == 0 ||
 200	    ipv4_is_multicast(t->parms.iph.daddr))
 201		return 0;
 202
 203	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 204		return 0;
 
 
 
 
 
 205
 206	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 207		t->err_count++;
 208	else
 209		t->err_count = 1;
 210	t->err_time = jiffies;
 211
 212	return 0;
 213}
 
 
 
 
 
 
 214
 215static void gre_err(struct sk_buff *skb, u32 info)
 
 216{
 217	/* All the routers (except for Linux) return only
 218	 * 8 bytes of packet payload. It means, that precise relaying of
 219	 * ICMP in the real Internet is absolutely infeasible.
 220	 *
 221	 * Moreover, Cisco "wise men" put GRE key to the third word
 222	 * in GRE header. It makes impossible maintaining even soft
 223	 * state for keyed
 224	 * GRE tunnels with enabled checksum. Tell them "thank you".
 225	 *
 226	 * Well, I wonder, rfc1812 was written by Cisco employee,
 227	 * what the hell these idiots break standards established
 228	 * by themselves???
 229	 */
 230
 231	const struct iphdr *iph = (struct iphdr *)skb->data;
 232	const int type = icmp_hdr(skb)->type;
 233	const int code = icmp_hdr(skb)->code;
 234	struct tnl_ptk_info tpi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235
 236	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
 237			     iph->ihl * 4) < 0)
 238		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 239
 240	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 241		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 242				 skb->dev->ifindex, IPPROTO_GRE);
 243		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 244	}
 245	if (type == ICMP_REDIRECT) {
 246		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
 247			      IPPROTO_GRE);
 248		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249	}
 250
 251	ipgre_err(skb, info, &tpi);
 
 
 
 
 
 
 
 252}
 253
 254static bool is_erspan_type1(int gre_hdr_len)
 
 255{
 256	/* Both ERSPAN type I (version 0) and type II (version 1) use
 257	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
 258	 * while type II has 8-byte.
 259	 */
 260	return gre_hdr_len == 4;
 
 
 
 
 
 
 
 
 
 261}
 262
 263static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 264		      int gre_hdr_len)
 265{
 266	struct net *net = dev_net(skb->dev);
 267	struct metadata_dst *tun_dst = NULL;
 268	struct erspan_base_hdr *ershdr;
 269	IP_TUNNEL_DECLARE_FLAGS(flags);
 270	struct ip_tunnel_net *itn;
 271	struct ip_tunnel *tunnel;
 272	const struct iphdr *iph;
 273	struct erspan_md2 *md2;
 274	int ver;
 275	int len;
 276
 277	ip_tunnel_flags_copy(flags, tpi->flags);
 
 
 278
 279	itn = net_generic(net, erspan_net_id);
 280	iph = ip_hdr(skb);
 281	if (is_erspan_type1(gre_hdr_len)) {
 282		ver = 0;
 283		__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
 284		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
 285					  iph->saddr, iph->daddr, 0);
 286	} else {
 287		if (unlikely(!pskb_may_pull(skb,
 288					    gre_hdr_len + sizeof(*ershdr))))
 289			return PACKET_REJECT;
 290
 291		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 292		ver = ershdr->ver;
 293		iph = ip_hdr(skb);
 294		__set_bit(IP_TUNNEL_KEY_BIT, flags);
 295		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
 296					  iph->saddr, iph->daddr, tpi->key);
 297	}
 298
 299	if (tunnel) {
 300		if (is_erspan_type1(gre_hdr_len))
 301			len = gre_hdr_len;
 302		else
 303			len = gre_hdr_len + erspan_hdr_len(ver);
 304
 305		if (unlikely(!pskb_may_pull(skb, len)))
 306			return PACKET_REJECT;
 
 
 
 
 
 
 
 307
 308		if (__iptunnel_pull_header(skb,
 309					   len,
 310					   htons(ETH_P_TEB),
 311					   false, false) < 0)
 312			goto drop;
 
 
 
 
 
 
 313
 314		if (tunnel->collect_md) {
 315			struct erspan_metadata *pkt_md, *md;
 316			struct ip_tunnel_info *info;
 317			unsigned char *gh;
 318			__be64 tun_id;
 319
 320			__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
 321			ip_tunnel_flags_copy(flags, tpi->flags);
 322			tun_id = key32_to_tunnel_id(tpi->key);
 323
 324			tun_dst = ip_tun_rx_dst(skb, flags,
 325						tun_id, sizeof(*md));
 326			if (!tun_dst)
 327				return PACKET_REJECT;
 328
 329			/* skb can be uncloned in __iptunnel_pull_header, so
 330			 * old pkt_md is no longer valid and we need to reset
 331			 * it
 332			 */
 333			gh = skb_network_header(skb) +
 334			     skb_network_header_len(skb);
 335			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
 336							    sizeof(*ershdr));
 337			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 338			md->version = ver;
 339			md2 = &md->u.md2;
 340			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
 341						       ERSPAN_V2_MDSIZE);
 342
 343			info = &tun_dst->u.tun_info;
 344			__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
 345				  info->key.tun_flags);
 346			info->options_len = sizeof(*md);
 347		}
 348
 349		skb_reset_mac_header(skb);
 350		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 351		return PACKET_RCVD;
 352	}
 353	return PACKET_REJECT;
 354
 355drop:
 356	kfree_skb(skb);
 357	return PACKET_RCVD;
 358}
 359
 360static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 361		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 362{
 363	struct metadata_dst *tun_dst = NULL;
 364	const struct iphdr *iph;
 365	struct ip_tunnel *tunnel;
 
 366
 367	iph = ip_hdr(skb);
 368	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 369				  iph->saddr, iph->daddr, tpi->key);
 370
 371	if (tunnel) {
 372		const struct iphdr *tnl_params;
 
 
 373
 374		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
 375					   raw_proto, false) < 0)
 376			goto drop;
 377
 378		/* Special case for ipgre_header_parse(), which expects the
 379		 * mac_header to point to the outer IP header.
 380		 */
 381		if (tunnel->dev->header_ops == &ipgre_header_ops)
 382			skb_pop_mac_header(skb);
 383		else
 384			skb_reset_mac_header(skb);
 385
 386		tnl_params = &tunnel->parms.iph;
 387		if (tunnel->collect_md || tnl_params->daddr == 0) {
 388			IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 389			__be64 tun_id;
 390
 391			__set_bit(IP_TUNNEL_CSUM_BIT, flags);
 392			__set_bit(IP_TUNNEL_KEY_BIT, flags);
 393			ip_tunnel_flags_and(flags, tpi->flags, flags);
 394
 395			tun_id = key32_to_tunnel_id(tpi->key);
 396			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 397			if (!tun_dst)
 398				return PACKET_REJECT;
 399		}
 400
 401		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 402		return PACKET_RCVD;
 403	}
 404	return PACKET_NEXT;
 405
 406drop:
 407	kfree_skb(skb);
 408	return PACKET_RCVD;
 
 
 
 
 409}
 410
 411static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 412		     int hdr_len)
 413{
 414	struct net *net = dev_net(skb->dev);
 415	struct ip_tunnel_net *itn;
 416	int res;
 417
 418	if (tpi->proto == htons(ETH_P_TEB))
 419		itn = net_generic(net, gre_tap_net_id);
 420	else
 421		itn = net_generic(net, ipgre_net_id);
 422
 423	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
 424	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
 425		/* ipgre tunnels in collect metadata mode should receive
 426		 * also ETH_P_TEB traffic.
 427		 */
 428		itn = net_generic(net, ipgre_net_id);
 429		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
 430	}
 431	return res;
 432}
 433
 434static int gre_rcv(struct sk_buff *skb)
 
 435{
 436	struct tnl_ptk_info tpi;
 437	bool csum_err = false;
 438	int hdr_len;
 439
 440#ifdef CONFIG_NET_IPGRE_BROADCAST
 441	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 442		/* Looped back packet, drop it! */
 443		if (rt_is_output_route(skb_rtable(skb)))
 444			goto drop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445	}
 446#endif
 447
 448	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 449	if (hdr_len < 0)
 450		goto drop;
 451
 452	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
 453		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 454		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 455			return 0;
 456		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 457	}
 458
 459	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 460		return 0;
 
 
 
 
 
 
 461
 
 
 
 
 
 
 
 
 462out:
 463	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 464drop:
 465	kfree_skb(skb);
 466	return 0;
 467}
 468
 469static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 470		       const struct iphdr *tnl_params,
 471		       __be16 proto)
 472{
 473	struct ip_tunnel *tunnel = netdev_priv(dev);
 474	IP_TUNNEL_DECLARE_FLAGS(flags);
 475
 476	ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
 477
 478	/* Push GRE header. */
 479	gre_build_header(skb, tunnel->tun_hlen,
 480			 flags, proto, tunnel->parms.o_key,
 481			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
 482			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 483
 484	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 485}
 486
 487static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 
 488{
 489	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 
 
 
 
 
 490}
 491
 492static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 493			__be16 proto)
 494{
 495	struct ip_tunnel *tunnel = netdev_priv(dev);
 496	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 497	struct ip_tunnel_info *tun_info;
 498	const struct ip_tunnel_key *key;
 499	int tunnel_hlen;
 500
 501	tun_info = skb_tunnel_info(skb);
 502	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 503		     ip_tunnel_info_af(tun_info) != AF_INET))
 504		goto err_free_skb;
 505
 506	key = &tun_info->key;
 507	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 508
 509	if (skb_cow_head(skb, dev->needed_headroom))
 510		goto err_free_skb;
 511
 512	/* Push Tunnel header. */
 513	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 514					      tunnel->parms.o_flags)))
 515		goto err_free_skb;
 516
 517	__set_bit(IP_TUNNEL_CSUM_BIT, flags);
 518	__set_bit(IP_TUNNEL_KEY_BIT, flags);
 519	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
 520	ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
 521
 522	gre_build_header(skb, tunnel_hlen, flags, proto,
 523			 tunnel_id_to_key32(tun_info->key.tun_id),
 524			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
 525			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 526
 527	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 
 528
 529	return;
 
 
 530
 531err_free_skb:
 532	kfree_skb(skb);
 533	DEV_STATS_INC(dev, tx_dropped);
 534}
 
 
 535
 536static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 537{
 538	struct ip_tunnel *tunnel = netdev_priv(dev);
 539	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 540	struct ip_tunnel_info *tun_info;
 541	const struct ip_tunnel_key *key;
 542	struct erspan_metadata *md;
 543	bool truncate = false;
 544	__be16 proto;
 545	int tunnel_hlen;
 546	int version;
 547	int nhoff;
 548
 549	tun_info = skb_tunnel_info(skb);
 550	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 551		     ip_tunnel_info_af(tun_info) != AF_INET))
 552		goto err_free_skb;
 553
 554	key = &tun_info->key;
 555	if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
 556		goto err_free_skb;
 557	if (tun_info->options_len < sizeof(*md))
 558		goto err_free_skb;
 559	md = ip_tunnel_info_opts(tun_info);
 560
 561	/* ERSPAN has fixed 8 byte GRE header */
 562	version = md->version;
 563	tunnel_hlen = 8 + erspan_hdr_len(version);
 564
 565	if (skb_cow_head(skb, dev->needed_headroom))
 566		goto err_free_skb;
 567
 568	if (gre_handle_offloads(skb, false))
 569		goto err_free_skb;
 570
 571	if (skb->len > dev->mtu + dev->hard_header_len) {
 572		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 573			goto err_free_skb;
 574		truncate = true;
 575	}
 576
 577	nhoff = skb_network_offset(skb);
 578	if (skb->protocol == htons(ETH_P_IP) &&
 579	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 580		truncate = true;
 581
 582	if (skb->protocol == htons(ETH_P_IPV6)) {
 583		int thoff;
 584
 585		if (skb_transport_header_was_set(skb))
 586			thoff = skb_transport_offset(skb);
 587		else
 588			thoff = nhoff + sizeof(struct ipv6hdr);
 589		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
 590			truncate = true;
 591	}
 592
 593	if (version == 1) {
 594		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 595				    ntohl(md->u.index), truncate, true);
 596		proto = htons(ETH_P_ERSPAN);
 597	} else if (version == 2) {
 598		erspan_build_header_v2(skb,
 599				       ntohl(tunnel_id_to_key32(key->tun_id)),
 600				       md->u.md2.dir,
 601				       get_hwid(&md->u.md2),
 602				       truncate, true);
 603		proto = htons(ETH_P_ERSPAN2);
 604	} else {
 605		goto err_free_skb;
 606	}
 607
 608	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
 609	gre_build_header(skb, 8, flags, proto, 0,
 610			 htonl(atomic_fetch_inc(&tunnel->o_seqno)));
 
 
 
 
 
 
 
 
 
 
 611
 612	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613
 614	return;
 
 
 
 
 
 
 
 
 
 
 
 615
 616err_free_skb:
 617	kfree_skb(skb);
 618	DEV_STATS_INC(dev, tx_dropped);
 619}
 
 620
 621static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 622{
 623	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 624	const struct ip_tunnel_key *key;
 625	struct rtable *rt;
 626	struct flowi4 fl4;
 627
 628	if (ip_tunnel_info_af(info) != AF_INET)
 629		return -EINVAL;
 630
 631	key = &info->key;
 632	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
 633			    tunnel_id_to_key32(key->tun_id),
 634			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
 635			    skb->mark, skb_get_hash(skb), key->flow_flags);
 636	rt = ip_route_output_key(dev_net(dev), &fl4);
 637	if (IS_ERR(rt))
 638		return PTR_ERR(rt);
 639
 640	ip_rt_put(rt);
 641	info->key.u.ipv4.src = fl4.saddr;
 
 
 
 
 
 
 
 642	return 0;
 643}
 644
 645static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 646			      struct net_device *dev)
 647{
 648	struct ip_tunnel *tunnel = netdev_priv(dev);
 649	const struct iphdr *tnl_params;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650
 651	if (!pskb_inet_may_pull(skb))
 652		goto free_skb;
 
 
 
 
 
 653
 654	if (tunnel->collect_md) {
 655		gre_fb_xmit(skb, dev, skb->protocol);
 656		return NETDEV_TX_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657	}
 658
 659	if (dev->header_ops) {
 660		int pull_len = tunnel->hlen + sizeof(struct iphdr);
 
 
 
 
 
 
 661
 662		if (skb_cow_head(skb, 0))
 663			goto free_skb;
 
 
 
 
 
 
 664
 665		if (!pskb_may_pull(skb, pull_len))
 666			goto free_skb;
 
 
 
 667
 668		tnl_params = (const struct iphdr *)skb->data;
 
 
 
 
 669
 670		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
 671		skb_pull(skb, pull_len);
 672		skb_reset_mac_header(skb);
 673
 674		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 675		    skb_checksum_start(skb) < skb->data)
 676			goto free_skb;
 677	} else {
 678		if (skb_cow_head(skb, dev->needed_headroom))
 679			goto free_skb;
 680
 681		tnl_params = &tunnel->parms.iph;
 
 
 
 
 
 682	}
 
 
 
 683
 684	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 685					      tunnel->parms.o_flags)))
 686		goto free_skb;
 
 
 
 
 
 687
 688	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 689	return NETDEV_TX_OK;
 
 
 
 
 
 690
 691free_skb:
 692	kfree_skb(skb);
 693	DEV_STATS_INC(dev, tx_dropped);
 694	return NETDEV_TX_OK;
 695}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696
 697static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 698			       struct net_device *dev)
 699{
 700	struct ip_tunnel *tunnel = netdev_priv(dev);
 701	bool truncate = false;
 702	__be16 proto;
 703
 704	if (!pskb_inet_may_pull(skb))
 705		goto free_skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 706
 707	if (tunnel->collect_md) {
 708		erspan_fb_xmit(skb, dev);
 709		return NETDEV_TX_OK;
 710	}
 711
 712	if (gre_handle_offloads(skb, false))
 713		goto free_skb;
 714
 715	if (skb_cow_head(skb, dev->needed_headroom))
 716		goto free_skb;
 717
 718	if (skb->len > dev->mtu + dev->hard_header_len) {
 719		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 720			goto free_skb;
 721		truncate = true;
 722	}
 723
 724	/* Push ERSPAN header */
 725	if (tunnel->erspan_ver == 0) {
 726		proto = htons(ETH_P_ERSPAN);
 727		__clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
 728	} else if (tunnel->erspan_ver == 1) {
 729		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 730				    tunnel->index,
 731				    truncate, true);
 732		proto = htons(ETH_P_ERSPAN);
 733	} else if (tunnel->erspan_ver == 2) {
 734		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 735				       tunnel->dir, tunnel->hwid,
 736				       truncate, true);
 737		proto = htons(ETH_P_ERSPAN2);
 738	} else {
 739		goto free_skb;
 740	}
 741
 742	__clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
 743	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 
 744	return NETDEV_TX_OK;
 745
 746free_skb:
 747	kfree_skb(skb);
 748	DEV_STATS_INC(dev, tx_dropped);
 
 
 
 
 749	return NETDEV_TX_OK;
 750}
 751
 752static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 753				struct net_device *dev)
 754{
 755	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 
 
 
 
 756
 757	if (!pskb_inet_may_pull(skb))
 758		goto free_skb;
 759
 760	if (tunnel->collect_md) {
 761		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 762		return NETDEV_TX_OK;
 763	}
 764
 765	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 766					      tunnel->parms.o_flags)))
 767		goto free_skb;
 768
 769	if (skb_cow_head(skb, dev->needed_headroom))
 770		goto free_skb;
 
 
 
 
 
 
 
 771
 772	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 773	return NETDEV_TX_OK;
 
 774
 775free_skb:
 776	kfree_skb(skb);
 777	DEV_STATS_INC(dev, tx_dropped);
 778	return NETDEV_TX_OK;
 779}
 780
 781static void ipgre_link_update(struct net_device *dev, bool set_mtu)
 782{
 783	struct ip_tunnel *tunnel = netdev_priv(dev);
 784	int len;
 
 785
 786	len = tunnel->tun_hlen;
 787	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 788	len = tunnel->tun_hlen - len;
 789	tunnel->hlen = tunnel->hlen + len;
 
 
 
 
 
 
 
 790
 791	if (dev->header_ops)
 792		dev->hard_header_len += len;
 793	else
 794		dev->needed_headroom += len;
 795
 796	if (set_mtu)
 797		WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
 798
 799	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
 800	    (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
 801	     tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
 802		dev->features &= ~NETIF_F_GSO_SOFTWARE;
 803		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 804	} else {
 805		dev->features |= NETIF_F_GSO_SOFTWARE;
 806		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 807	}
 808}
 809
 810static int ipgre_tunnel_ctl(struct net_device *dev,
 811			    struct ip_tunnel_parm_kern *p,
 812			    int cmd)
 813{
 814	__be16 i_flags, o_flags;
 815	int err;
 816
 817	if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
 818	    !ip_tunnel_flags_is_be16_compat(p->o_flags))
 819		return -EOVERFLOW;
 820
 821	i_flags = ip_tunnel_flags_to_be16(p->i_flags);
 822	o_flags = ip_tunnel_flags_to_be16(p->o_flags);
 823
 824	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 825		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
 826		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
 827		    ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
 828			return -EINVAL;
 829	}
 830
 831	gre_flags_to_tnl_flags(p->i_flags, i_flags);
 832	gre_flags_to_tnl_flags(p->o_flags, o_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833
 834	err = ip_tunnel_ctl(dev, p, cmd);
 835	if (err)
 836		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 837
 838	if (cmd == SIOCCHGTUNNEL) {
 839		struct ip_tunnel *t = netdev_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840
 841		ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
 842		ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843
 844		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
 845			ipgre_link_update(dev, true);
 846	}
 847
 848	i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
 849	ip_tunnel_flags_from_be16(p->i_flags, i_flags);
 850	o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
 851	ip_tunnel_flags_from_be16(p->o_flags, o_flags);
 852
 
 
 
 
 
 
 
 853	return 0;
 854}
 855
 856/* Nice toy. Unfortunately, useless in real life :-)
 857   It allows to construct virtual multiprotocol broadcast "LAN"
 858   over the Internet, provided multicast routing is tuned.
 859
 860
 861   I have no idea was this bicycle invented before me,
 862   so that I had to set ARPHRD_IPGRE to a random value.
 863   I have an impression, that Cisco could make something similar,
 864   but this feature is apparently missing in IOS<=11.2(8).
 865
 866   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 867   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 868
 869   ping -t 255 224.66.66.66
 870
 871   If nobody answers, mbone does not work.
 872
 873   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 874   ip addr add 10.66.66.<somewhat>/24 dev Universe
 875   ifconfig Universe up
 876   ifconfig Universe add fe80::<Your_real_addr>/10
 877   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 878   ftp 10.66.66.66
 879   ...
 880   ftp fec0:6666:6666::193.233.7.65
 881   ...
 
 882 */
 
 883static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 884			unsigned short type,
 885			const void *daddr, const void *saddr, unsigned int len)
 886{
 887	struct ip_tunnel *t = netdev_priv(dev);
 888	struct iphdr *iph;
 889	struct gre_base_hdr *greh;
 890
 891	iph = skb_push(skb, t->hlen + sizeof(*iph));
 892	greh = (struct gre_base_hdr *)(iph+1);
 893	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 894	greh->protocol = htons(type);
 895
 896	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 
 
 
 
 
 
 897
 898	/* Set the source hardware address. */
 899	if (saddr)
 900		memcpy(&iph->saddr, saddr, 4);
 901	if (daddr)
 902		memcpy(&iph->daddr, daddr, 4);
 903	if (iph->daddr)
 904		return t->hlen + sizeof(*iph);
 905
 906	return -(t->hlen + sizeof(*iph));
 907}
 908
 909static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 910{
 911	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 912	memcpy(haddr, &iph->saddr, 4);
 913	return 4;
 914}
 915
 916static const struct header_ops ipgre_header_ops = {
 917	.create	= ipgre_header,
 918	.parse	= ipgre_header_parse,
 919};
 920
 921#ifdef CONFIG_NET_IPGRE_BROADCAST
 922static int ipgre_open(struct net_device *dev)
 923{
 924	struct ip_tunnel *t = netdev_priv(dev);
 925
 926	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 927		struct flowi4 fl4;
 928		struct rtable *rt;
 929
 930		rt = ip_route_output_gre(t->net, &fl4,
 931					 t->parms.iph.daddr,
 932					 t->parms.iph.saddr,
 933					 t->parms.o_key,
 934					 t->parms.iph.tos & INET_DSCP_MASK,
 935					 t->parms.link);
 936		if (IS_ERR(rt))
 937			return -EADDRNOTAVAIL;
 938		dev = rt->dst.dev;
 939		ip_rt_put(rt);
 940		if (!__in_dev_get_rtnl(dev))
 941			return -EADDRNOTAVAIL;
 942		t->mlink = dev->ifindex;
 943		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 944	}
 945	return 0;
 946}
 947
 948static int ipgre_close(struct net_device *dev)
 949{
 950	struct ip_tunnel *t = netdev_priv(dev);
 951
 952	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 953		struct in_device *in_dev;
 954		in_dev = inetdev_by_index(t->net, t->mlink);
 955		if (in_dev)
 956			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 957	}
 958	return 0;
 959}
 
 960#endif
 961
 962static const struct net_device_ops ipgre_netdev_ops = {
 963	.ndo_init		= ipgre_tunnel_init,
 964	.ndo_uninit		= ip_tunnel_uninit,
 965#ifdef CONFIG_NET_IPGRE_BROADCAST
 966	.ndo_open		= ipgre_open,
 967	.ndo_stop		= ipgre_close,
 968#endif
 969	.ndo_start_xmit		= ipgre_xmit,
 970	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
 971	.ndo_change_mtu		= ip_tunnel_change_mtu,
 972	.ndo_get_stats64	= dev_get_tstats64,
 973	.ndo_get_iflink		= ip_tunnel_get_iflink,
 974	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
 975};
 976
 977#define GRE_FEATURES (NETIF_F_SG |		\
 978		      NETIF_F_FRAGLIST |	\
 979		      NETIF_F_HIGHDMA |		\
 980		      NETIF_F_HW_CSUM)
 
 981
 982static void ipgre_tunnel_setup(struct net_device *dev)
 983{
 984	dev->netdev_ops		= &ipgre_netdev_ops;
 
 
 985	dev->type		= ARPHRD_IPGRE;
 986	ip_tunnel_setup(dev, ipgre_net_id);
 
 
 
 
 
 
 987}
 988
 989static void __gre_tunnel_init(struct net_device *dev)
 990{
 991	struct ip_tunnel *tunnel;
 
 992
 993	tunnel = netdev_priv(dev);
 994	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 995	tunnel->parms.iph.protocol = IPPROTO_GRE;
 996
 997	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 998	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
 999
1000	dev->features		|= GRE_FEATURES;
1001	dev->hw_features	|= GRE_FEATURES;
1002
1003	/* TCP offload with GRE SEQ is not supported, nor can we support 2
1004	 * levels of outer headers requiring an update.
1005	 */
1006	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
1007		return;
1008	if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
1009	    tunnel->encap.type != TUNNEL_ENCAP_NONE)
1010		return;
1011
1012	dev->features |= NETIF_F_GSO_SOFTWARE;
1013	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1014
1015	dev->lltx = true;
1016}
1017
1018static int ipgre_tunnel_init(struct net_device *dev)
1019{
1020	struct ip_tunnel *tunnel = netdev_priv(dev);
1021	struct iphdr *iph = &tunnel->parms.iph;
1022
1023	__gre_tunnel_init(dev);
1024
1025	__dev_addr_set(dev, &iph->saddr, 4);
1026	memcpy(dev->broadcast, &iph->daddr, 4);
1027
1028	dev->flags		= IFF_NOARP;
1029	netif_keep_dst(dev);
1030	dev->addr_len		= 4;
1031
1032	if (iph->daddr && !tunnel->collect_md) {
1033#ifdef CONFIG_NET_IPGRE_BROADCAST
1034		if (ipv4_is_multicast(iph->daddr)) {
1035			if (!iph->saddr)
1036				return -EINVAL;
1037			dev->flags = IFF_BROADCAST;
1038			dev->header_ops = &ipgre_header_ops;
1039			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1040			dev->needed_headroom = 0;
1041		}
1042#endif
1043	} else if (!tunnel->collect_md) {
1044		dev->header_ops = &ipgre_header_ops;
1045		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1046		dev->needed_headroom = 0;
1047	}
1048
1049	return ip_tunnel_init(dev);
 
 
 
 
1050}
1051
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052static const struct gre_protocol ipgre_protocol = {
1053	.handler     = gre_rcv,
1054	.err_handler = gre_err,
1055};
1056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1057static int __net_init ipgre_init_net(struct net *net)
1058{
1059	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1060}
1061
1062static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
1063					     struct list_head *dev_to_kill)
1064{
1065	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
1066			      dev_to_kill);
 
 
 
 
 
 
1067}
1068
1069static struct pernet_operations ipgre_net_ops = {
1070	.init = ipgre_init_net,
1071	.exit_batch_rtnl = ipgre_exit_batch_rtnl,
1072	.id   = &ipgre_net_id,
1073	.size = sizeof(struct ip_tunnel_net),
1074};
1075
1076static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1077				 struct netlink_ext_ack *extack)
1078{
1079	__be16 flags;
1080
1081	if (!data)
1082		return 0;
1083
1084	flags = 0;
1085	if (data[IFLA_GRE_IFLAGS])
1086		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1087	if (data[IFLA_GRE_OFLAGS])
1088		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1089	if (flags & (GRE_VERSION|GRE_ROUTING))
1090		return -EINVAL;
1091
1092	if (data[IFLA_GRE_COLLECT_METADATA] &&
1093	    data[IFLA_GRE_ENCAP_TYPE] &&
1094	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1095		return -EINVAL;
1096
1097	return 0;
1098}
1099
1100static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1101			      struct netlink_ext_ack *extack)
1102{
1103	__be32 daddr;
1104
1105	if (tb[IFLA_ADDRESS]) {
1106		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1107			return -EINVAL;
1108		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1109			return -EADDRNOTAVAIL;
1110	}
1111
1112	if (!data)
1113		goto out;
1114
1115	if (data[IFLA_GRE_REMOTE]) {
1116		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1117		if (!daddr)
1118			return -EINVAL;
1119	}
1120
1121out:
1122	return ipgre_tunnel_validate(tb, data, extack);
1123}
1124
1125static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1126			   struct netlink_ext_ack *extack)
1127{
1128	__be16 flags = 0;
1129	int ret;
1130
1131	if (!data)
1132		return 0;
1133
1134	ret = ipgre_tap_validate(tb, data, extack);
1135	if (ret)
1136		return ret;
1137
1138	if (data[IFLA_GRE_ERSPAN_VER] &&
1139	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1140		return 0;
1141
1142	/* ERSPAN type II/III should only have GRE sequence and key flag */
1143	if (data[IFLA_GRE_OFLAGS])
1144		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1145	if (data[IFLA_GRE_IFLAGS])
1146		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1147	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1148	    flags != (GRE_SEQ | GRE_KEY))
1149		return -EINVAL;
1150
1151	/* ERSPAN Session ID only has 10-bit. Since we reuse
1152	 * 32-bit key field as ID, check it's range.
1153	 */
1154	if (data[IFLA_GRE_IKEY] &&
1155	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1156		return -EINVAL;
1157
1158	if (data[IFLA_GRE_OKEY] &&
1159	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1160		return -EINVAL;
1161
1162	return 0;
1163}
1164
1165static int ipgre_netlink_parms(struct net_device *dev,
1166				struct nlattr *data[],
1167				struct nlattr *tb[],
1168				struct ip_tunnel_parm_kern *parms,
1169				__u32 *fwmark)
1170{
1171	struct ip_tunnel *t = netdev_priv(dev);
1172
1173	memset(parms, 0, sizeof(*parms));
1174
1175	parms->iph.protocol = IPPROTO_GRE;
1176
1177	if (!data)
1178		return 0;
1179
1180	if (data[IFLA_GRE_LINK])
1181		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1182
1183	if (data[IFLA_GRE_IFLAGS])
1184		gre_flags_to_tnl_flags(parms->i_flags,
1185				       nla_get_be16(data[IFLA_GRE_IFLAGS]));
1186
1187	if (data[IFLA_GRE_OFLAGS])
1188		gre_flags_to_tnl_flags(parms->o_flags,
1189				       nla_get_be16(data[IFLA_GRE_OFLAGS]));
1190
1191	if (data[IFLA_GRE_IKEY])
1192		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1193
1194	if (data[IFLA_GRE_OKEY])
1195		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1196
1197	if (data[IFLA_GRE_LOCAL])
1198		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1199
1200	if (data[IFLA_GRE_REMOTE])
1201		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1202
1203	if (data[IFLA_GRE_TTL])
1204		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1205
1206	if (data[IFLA_GRE_TOS])
1207		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1208
1209	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1210		if (t->ignore_df)
1211			return -EINVAL;
1212		parms->iph.frag_off = htons(IP_DF);
1213	}
1214
1215	if (data[IFLA_GRE_COLLECT_METADATA]) {
1216		t->collect_md = true;
1217		if (dev->type == ARPHRD_IPGRE)
1218			dev->type = ARPHRD_NONE;
1219	}
1220
1221	if (data[IFLA_GRE_IGNORE_DF]) {
1222		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1223		  && (parms->iph.frag_off & htons(IP_DF)))
1224			return -EINVAL;
1225		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1226	}
1227
1228	if (data[IFLA_GRE_FWMARK])
1229		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1230
1231	return 0;
1232}
1233
1234static int erspan_netlink_parms(struct net_device *dev,
1235				struct nlattr *data[],
1236				struct nlattr *tb[],
1237				struct ip_tunnel_parm_kern *parms,
1238				__u32 *fwmark)
1239{
1240	struct ip_tunnel *t = netdev_priv(dev);
1241	int err;
1242
1243	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1244	if (err)
1245		return err;
1246	if (!data)
1247		return 0;
1248
1249	if (data[IFLA_GRE_ERSPAN_VER]) {
1250		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1251
1252		if (t->erspan_ver > 2)
1253			return -EINVAL;
1254	}
1255
1256	if (t->erspan_ver == 1) {
1257		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1258			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1259			if (t->index & ~INDEX_MASK)
1260				return -EINVAL;
1261		}
1262	} else if (t->erspan_ver == 2) {
1263		if (data[IFLA_GRE_ERSPAN_DIR]) {
1264			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1265			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1266				return -EINVAL;
1267		}
1268		if (data[IFLA_GRE_ERSPAN_HWID]) {
1269			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1270			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1271				return -EINVAL;
1272		}
1273	}
1274
1275	return 0;
1276}
1277
1278/* This function returns true when ENCAP attributes are present in the nl msg */
1279static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1280				      struct ip_tunnel_encap *ipencap)
1281{
1282	bool ret = false;
1283
1284	memset(ipencap, 0, sizeof(*ipencap));
1285
1286	if (!data)
1287		return ret;
1288
1289	if (data[IFLA_GRE_ENCAP_TYPE]) {
1290		ret = true;
1291		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1292	}
1293
1294	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1295		ret = true;
1296		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1297	}
1298
1299	if (data[IFLA_GRE_ENCAP_SPORT]) {
1300		ret = true;
1301		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1302	}
1303
1304	if (data[IFLA_GRE_ENCAP_DPORT]) {
1305		ret = true;
1306		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1307	}
1308
1309	return ret;
1310}
1311
1312static int gre_tap_init(struct net_device *dev)
1313{
1314	__gre_tunnel_init(dev);
1315	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1316	netif_keep_dst(dev);
1317
1318	return ip_tunnel_init(dev);
1319}
1320
1321static const struct net_device_ops gre_tap_netdev_ops = {
1322	.ndo_init		= gre_tap_init,
1323	.ndo_uninit		= ip_tunnel_uninit,
1324	.ndo_start_xmit		= gre_tap_xmit,
1325	.ndo_set_mac_address 	= eth_mac_addr,
1326	.ndo_validate_addr	= eth_validate_addr,
1327	.ndo_change_mtu		= ip_tunnel_change_mtu,
1328	.ndo_get_stats64	= dev_get_tstats64,
1329	.ndo_get_iflink		= ip_tunnel_get_iflink,
1330	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1331};
1332
1333static int erspan_tunnel_init(struct net_device *dev)
1334{
1335	struct ip_tunnel *tunnel = netdev_priv(dev);
1336
1337	if (tunnel->erspan_ver == 0)
1338		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1339	else
1340		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1341
1342	tunnel->parms.iph.protocol = IPPROTO_GRE;
1343	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1344		       erspan_hdr_len(tunnel->erspan_ver);
1345
1346	dev->features		|= GRE_FEATURES;
1347	dev->hw_features	|= GRE_FEATURES;
1348	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1349	netif_keep_dst(dev);
1350
1351	return ip_tunnel_init(dev);
 
1352}
1353
1354static const struct net_device_ops erspan_netdev_ops = {
1355	.ndo_init		= erspan_tunnel_init,
1356	.ndo_uninit		= ip_tunnel_uninit,
1357	.ndo_start_xmit		= erspan_xmit,
1358	.ndo_set_mac_address	= eth_mac_addr,
1359	.ndo_validate_addr	= eth_validate_addr,
1360	.ndo_change_mtu		= ip_tunnel_change_mtu,
1361	.ndo_get_stats64	= dev_get_tstats64,
1362	.ndo_get_iflink		= ip_tunnel_get_iflink,
1363	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1364};
1365
1366static void ipgre_tap_setup(struct net_device *dev)
1367{
1368	ether_setup(dev);
1369	dev->max_mtu = 0;
1370	dev->netdev_ops	= &gre_tap_netdev_ops;
1371	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1372	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1373	ip_tunnel_setup(dev, gre_tap_net_id);
1374}
1375
1376static int
1377ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1378{
1379	struct ip_tunnel_encap ipencap;
1380
1381	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1382		struct ip_tunnel *t = netdev_priv(dev);
1383		int err = ip_tunnel_encap_setup(t, &ipencap);
1384
1385		if (err < 0)
1386			return err;
1387	}
1388
1389	return 0;
1390}
 
1391
1392static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1393			 struct nlattr *tb[], struct nlattr *data[],
1394			 struct netlink_ext_ack *extack)
1395{
1396	struct ip_tunnel_parm_kern p;
1397	__u32 fwmark = 0;
1398	int err;
1399
1400	err = ipgre_newlink_encap_setup(dev, data);
1401	if (err)
1402		return err;
1403
1404	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1405	if (err < 0)
1406		return err;
1407	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1408}
1409
1410static int erspan_newlink(struct net *src_net, struct net_device *dev,
1411			  struct nlattr *tb[], struct nlattr *data[],
1412			  struct netlink_ext_ack *extack)
1413{
1414	struct ip_tunnel_parm_kern p;
1415	__u32 fwmark = 0;
1416	int err;
1417
1418	err = ipgre_newlink_encap_setup(dev, data);
1419	if (err)
1420		return err;
1421
1422	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1423	if (err)
1424		return err;
1425	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1426}
1427
1428static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1429			    struct nlattr *data[],
1430			    struct netlink_ext_ack *extack)
1431{
1432	struct ip_tunnel *t = netdev_priv(dev);
1433	struct ip_tunnel_parm_kern p;
1434	__u32 fwmark = t->fwmark;
1435	int err;
1436
1437	err = ipgre_newlink_encap_setup(dev, data);
1438	if (err)
1439		return err;
1440
1441	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1442	if (err < 0)
1443		return err;
1444
1445	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1446	if (err < 0)
1447		return err;
1448
1449	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1450	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1451
1452	ipgre_link_update(dev, !tb[IFLA_MTU]);
1453
1454	return 0;
1455}
 
 
 
1456
1457static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1458			     struct nlattr *data[],
1459			     struct netlink_ext_ack *extack)
1460{
1461	struct ip_tunnel *t = netdev_priv(dev);
1462	struct ip_tunnel_parm_kern p;
1463	__u32 fwmark = t->fwmark;
1464	int err;
1465
1466	err = ipgre_newlink_encap_setup(dev, data);
1467	if (err)
1468		return err;
 
1469
1470	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1471	if (err < 0)
1472		return err;
 
1473
1474	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1475	if (err < 0)
1476		return err;
 
 
 
 
 
 
 
 
1477
1478	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1479	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
 
 
 
 
 
 
 
 
 
 
1480
1481	return 0;
1482}
1483
1484static size_t ipgre_get_size(const struct net_device *dev)
1485{
1486	return
1487		/* IFLA_GRE_LINK */
1488		nla_total_size(4) +
1489		/* IFLA_GRE_IFLAGS */
1490		nla_total_size(2) +
1491		/* IFLA_GRE_OFLAGS */
1492		nla_total_size(2) +
1493		/* IFLA_GRE_IKEY */
1494		nla_total_size(4) +
1495		/* IFLA_GRE_OKEY */
1496		nla_total_size(4) +
1497		/* IFLA_GRE_LOCAL */
1498		nla_total_size(4) +
1499		/* IFLA_GRE_REMOTE */
1500		nla_total_size(4) +
1501		/* IFLA_GRE_TTL */
1502		nla_total_size(1) +
1503		/* IFLA_GRE_TOS */
1504		nla_total_size(1) +
1505		/* IFLA_GRE_PMTUDISC */
1506		nla_total_size(1) +
1507		/* IFLA_GRE_ENCAP_TYPE */
1508		nla_total_size(2) +
1509		/* IFLA_GRE_ENCAP_FLAGS */
1510		nla_total_size(2) +
1511		/* IFLA_GRE_ENCAP_SPORT */
1512		nla_total_size(2) +
1513		/* IFLA_GRE_ENCAP_DPORT */
1514		nla_total_size(2) +
1515		/* IFLA_GRE_COLLECT_METADATA */
1516		nla_total_size(0) +
1517		/* IFLA_GRE_IGNORE_DF */
1518		nla_total_size(1) +
1519		/* IFLA_GRE_FWMARK */
1520		nla_total_size(4) +
1521		/* IFLA_GRE_ERSPAN_INDEX */
1522		nla_total_size(4) +
1523		/* IFLA_GRE_ERSPAN_VER */
1524		nla_total_size(1) +
1525		/* IFLA_GRE_ERSPAN_DIR */
1526		nla_total_size(1) +
1527		/* IFLA_GRE_ERSPAN_HWID */
1528		nla_total_size(2) +
1529		0;
1530}
1531
1532static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1533{
1534	struct ip_tunnel *t = netdev_priv(dev);
1535	struct ip_tunnel_parm_kern *p = &t->parms;
1536	IP_TUNNEL_DECLARE_FLAGS(o_flags);
1537
1538	ip_tunnel_flags_copy(o_flags, p->o_flags);
1539
1540	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1541	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1542			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1543	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1544			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1545	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1546	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1547	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1548	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1549	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1550	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1551	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1552		       !!(p->iph.frag_off & htons(IP_DF))) ||
1553	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1554		goto nla_put_failure;
1555
1556	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1557			t->encap.type) ||
1558	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1559			 t->encap.sport) ||
1560	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1561			 t->encap.dport) ||
1562	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1563			t->encap.flags))
1564		goto nla_put_failure;
1565
1566	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1567		goto nla_put_failure;
1568
1569	if (t->collect_md) {
1570		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1571			goto nla_put_failure;
1572	}
1573
1574	return 0;
1575
1576nla_put_failure:
1577	return -EMSGSIZE;
1578}
1579
1580static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1581{
1582	struct ip_tunnel *t = netdev_priv(dev);
1583
1584	if (t->erspan_ver <= 2) {
1585		if (t->erspan_ver != 0 && !t->collect_md)
1586			__set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
1587
1588		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1589			goto nla_put_failure;
1590
1591		if (t->erspan_ver == 1) {
1592			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1593				goto nla_put_failure;
1594		} else if (t->erspan_ver == 2) {
1595			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1596				goto nla_put_failure;
1597			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1598				goto nla_put_failure;
1599		}
1600	}
1601
1602	return ipgre_fill_info(skb, dev);
1603
1604nla_put_failure:
1605	return -EMSGSIZE;
1606}
1607
1608static void erspan_setup(struct net_device *dev)
1609{
1610	struct ip_tunnel *t = netdev_priv(dev);
1611
1612	ether_setup(dev);
1613	dev->max_mtu = 0;
1614	dev->netdev_ops = &erspan_netdev_ops;
1615	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1616	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1617	ip_tunnel_setup(dev, erspan_net_id);
1618	t->erspan_ver = 1;
1619}
1620
1621static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1622	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1623	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1624	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1625	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1626	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1627	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1628	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1629	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1630	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1631	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1632	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1633	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1634	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1635	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1636	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1637	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1638	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1639	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1640	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1641	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1642	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1643};
1644
1645static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1646	.kind		= "gre",
1647	.maxtype	= IFLA_GRE_MAX,
1648	.policy		= ipgre_policy,
1649	.priv_size	= sizeof(struct ip_tunnel),
1650	.setup		= ipgre_tunnel_setup,
1651	.validate	= ipgre_tunnel_validate,
1652	.newlink	= ipgre_newlink,
1653	.changelink	= ipgre_changelink,
1654	.dellink	= ip_tunnel_dellink,
1655	.get_size	= ipgre_get_size,
1656	.fill_info	= ipgre_fill_info,
1657	.get_link_net	= ip_tunnel_get_link_net,
1658};
1659
1660static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1661	.kind		= "gretap",
1662	.maxtype	= IFLA_GRE_MAX,
1663	.policy		= ipgre_policy,
1664	.priv_size	= sizeof(struct ip_tunnel),
1665	.setup		= ipgre_tap_setup,
1666	.validate	= ipgre_tap_validate,
1667	.newlink	= ipgre_newlink,
1668	.changelink	= ipgre_changelink,
1669	.dellink	= ip_tunnel_dellink,
1670	.get_size	= ipgre_get_size,
1671	.fill_info	= ipgre_fill_info,
1672	.get_link_net	= ip_tunnel_get_link_net,
1673};
1674
1675static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1676	.kind		= "erspan",
1677	.maxtype	= IFLA_GRE_MAX,
1678	.policy		= ipgre_policy,
1679	.priv_size	= sizeof(struct ip_tunnel),
1680	.setup		= erspan_setup,
1681	.validate	= erspan_validate,
1682	.newlink	= erspan_newlink,
1683	.changelink	= erspan_changelink,
1684	.dellink	= ip_tunnel_dellink,
1685	.get_size	= ipgre_get_size,
1686	.fill_info	= erspan_fill_info,
1687	.get_link_net	= ip_tunnel_get_link_net,
1688};
1689
1690struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1691					u8 name_assign_type)
1692{
1693	struct nlattr *tb[IFLA_MAX + 1];
1694	struct net_device *dev;
1695	LIST_HEAD(list_kill);
1696	struct ip_tunnel *t;
1697	int err;
1698
1699	memset(&tb, 0, sizeof(tb));
1700
1701	dev = rtnl_create_link(net, name, name_assign_type,
1702			       &ipgre_tap_ops, tb, NULL);
1703	if (IS_ERR(dev))
1704		return dev;
1705
1706	/* Configure flow based GRE device. */
1707	t = netdev_priv(dev);
1708	t->collect_md = true;
1709
1710	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1711	if (err < 0) {
1712		free_netdev(dev);
1713		return ERR_PTR(err);
1714	}
1715
1716	/* openvswitch users expect packet sizes to be unrestricted,
1717	 * so set the largest MTU we can.
1718	 */
1719	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1720	if (err)
1721		goto out;
1722
1723	err = rtnl_configure_link(dev, NULL, 0, NULL);
1724	if (err < 0)
1725		goto out;
1726
1727	return dev;
1728out:
1729	ip_tunnel_dellink(dev, &list_kill);
1730	unregister_netdevice_many(&list_kill);
1731	return ERR_PTR(err);
1732}
1733EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1734
1735static int __net_init ipgre_tap_init_net(struct net *net)
1736{
1737	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1738}
1739
1740static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
1741						 struct list_head *dev_to_kill)
1742{
1743	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
1744			      dev_to_kill);
1745}
1746
1747static struct pernet_operations ipgre_tap_net_ops = {
1748	.init = ipgre_tap_init_net,
1749	.exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
1750	.id   = &gre_tap_net_id,
1751	.size = sizeof(struct ip_tunnel_net),
1752};
1753
1754static int __net_init erspan_init_net(struct net *net)
1755{
1756	return ip_tunnel_init_net(net, erspan_net_id,
1757				  &erspan_link_ops, "erspan0");
1758}
1759
1760static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
1761					      struct list_head *dev_to_kill)
1762{
1763	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
1764			      dev_to_kill);
1765}
1766
1767static struct pernet_operations erspan_net_ops = {
1768	.init = erspan_init_net,
1769	.exit_batch_rtnl = erspan_exit_batch_rtnl,
1770	.id   = &erspan_net_id,
1771	.size = sizeof(struct ip_tunnel_net),
1772};
1773
1774static int __init ipgre_init(void)
1775{
1776	int err;
1777
1778	pr_info("GRE over IPv4 tunneling driver\n");
1779
1780	err = register_pernet_device(&ipgre_net_ops);
1781	if (err < 0)
1782		return err;
1783
1784	err = register_pernet_device(&ipgre_tap_net_ops);
1785	if (err < 0)
1786		goto pnet_tap_failed;
1787
1788	err = register_pernet_device(&erspan_net_ops);
1789	if (err < 0)
1790		goto pnet_erspan_failed;
1791
1792	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1793	if (err < 0) {
1794		pr_info("%s: can't add protocol\n", __func__);
1795		goto add_proto_failed;
1796	}
1797
1798	err = rtnl_link_register(&ipgre_link_ops);
1799	if (err < 0)
1800		goto rtnl_link_failed;
1801
1802	err = rtnl_link_register(&ipgre_tap_ops);
1803	if (err < 0)
1804		goto tap_ops_failed;
1805
1806	err = rtnl_link_register(&erspan_link_ops);
1807	if (err < 0)
1808		goto erspan_link_failed;
1809
1810	return 0;
1811
1812erspan_link_failed:
1813	rtnl_link_unregister(&ipgre_tap_ops);
1814tap_ops_failed:
1815	rtnl_link_unregister(&ipgre_link_ops);
1816rtnl_link_failed:
1817	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1818add_proto_failed:
1819	unregister_pernet_device(&erspan_net_ops);
1820pnet_erspan_failed:
1821	unregister_pernet_device(&ipgre_tap_net_ops);
1822pnet_tap_failed:
1823	unregister_pernet_device(&ipgre_net_ops);
1824	return err;
1825}
1826
1827static void __exit ipgre_fini(void)
1828{
1829	rtnl_link_unregister(&ipgre_tap_ops);
1830	rtnl_link_unregister(&ipgre_link_ops);
1831	rtnl_link_unregister(&erspan_link_ops);
1832	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1833	unregister_pernet_device(&ipgre_tap_net_ops);
1834	unregister_pernet_device(&ipgre_net_ops);
1835	unregister_pernet_device(&erspan_net_ops);
1836}
1837
1838module_init(ipgre_init);
1839module_exit(ipgre_fini);
1840MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1841MODULE_LICENSE("GPL");
1842MODULE_ALIAS_RTNL_LINK("gre");
1843MODULE_ALIAS_RTNL_LINK("gretap");
1844MODULE_ALIAS_RTNL_LINK("erspan");
1845MODULE_ALIAS_NETDEV("gre0");
1846MODULE_ALIAS_NETDEV("gretap0");
1847MODULE_ALIAS_NETDEV("erspan0");
v3.5.6
 
   1/*
   2 *	Linux NET3:	GRE over IP protocol decoder.
   3 *
   4 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
   5 *
   6 *	This program is free software; you can redistribute it and/or
   7 *	modify it under the terms of the GNU General Public License
   8 *	as published by the Free Software Foundation; either version
   9 *	2 of the License, or (at your option) any later version.
  10 *
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/capability.h>
  16#include <linux/module.h>
  17#include <linux/types.h>
  18#include <linux/kernel.h>
  19#include <linux/slab.h>
  20#include <asm/uaccess.h>
  21#include <linux/skbuff.h>
  22#include <linux/netdevice.h>
  23#include <linux/in.h>
  24#include <linux/tcp.h>
  25#include <linux/udp.h>
  26#include <linux/if_arp.h>
  27#include <linux/mroute.h>
  28#include <linux/init.h>
  29#include <linux/in6.h>
  30#include <linux/inetdevice.h>
  31#include <linux/igmp.h>
  32#include <linux/netfilter_ipv4.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_ether.h>
  35
  36#include <net/sock.h>
  37#include <net/ip.h>
  38#include <net/icmp.h>
  39#include <net/protocol.h>
  40#include <net/ipip.h>
  41#include <net/arp.h>
  42#include <net/checksum.h>
  43#include <net/dsfield.h>
  44#include <net/inet_ecn.h>
  45#include <net/xfrm.h>
  46#include <net/net_namespace.h>
  47#include <net/netns/generic.h>
  48#include <net/rtnetlink.h>
  49#include <net/gre.h>
  50
  51#if IS_ENABLED(CONFIG_IPV6)
  52#include <net/ipv6.h>
  53#include <net/ip6_fib.h>
  54#include <net/ip6_route.h>
  55#endif
  56
  57/*
  58   Problems & solutions
  59   --------------------
  60
  61   1. The most important issue is detecting local dead loops.
  62   They would cause complete host lockup in transmit, which
  63   would be "resolved" by stack overflow or, if queueing is enabled,
  64   with infinite looping in net_bh.
  65
  66   We cannot track such dead loops during route installation,
  67   it is infeasible task. The most general solutions would be
  68   to keep skb->encapsulation counter (sort of local ttl),
  69   and silently drop packet when it expires. It is a good
  70   solution, but it supposes maintaining new variable in ALL
  71   skb, even if no tunneling is used.
  72
  73   Current solution: xmit_recursion breaks dead loops. This is a percpu
  74   counter, since when we enter the first ndo_xmit(), cpu migration is
  75   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  76
  77   2. Networking dead loops would not kill routers, but would really
  78   kill network. IP hop limit plays role of "t->recursion" in this case,
  79   if we copy it from packet being encapsulated to upper header.
  80   It is very good solution, but it introduces two problems:
  81
  82   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  83     do not work over tunnels.
  84   - traceroute does not work. I planned to relay ICMP from tunnel,
  85     so that this problem would be solved and traceroute output
  86     would even more informative. This idea appeared to be wrong:
  87     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  88     true router now :-)), all routers (at least, in neighbourhood of mine)
  89     return only 8 bytes of payload. It is the end.
  90
  91   Hence, if we want that OSPF worked or traceroute said something reasonable,
  92   we should search for another solution.
  93
  94   One of them is to parse packet trying to detect inner encapsulation
  95   made by our node. It is difficult or even impossible, especially,
  96   taking into account fragmentation. TO be short, ttl is not solution at all.
  97
  98   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  99   We force DF flag on tunnels with preconfigured hop limit,
 100   that is ALL. :-) Well, it does not remove the problem completely,
 101   but exponential growth of network traffic is changed to linear
 102   (branches, that exceed pmtu are pruned) and tunnel mtu
 103   rapidly degrades to value <68, where looping stops.
 104   Yes, it is not good if there exists a router in the loop,
 105   which does not force DF, even when encapsulating packets have DF set.
 106   But it is not our problem! Nobody could accuse us, we made
 107   all that we could make. Even if it is your gated who injected
 108   fatal route to network, even if it were you who configured
 109   fatal static route: you are innocent. :-)
 110
 111
 112
 113   3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
 114   practically identical code. It would be good to glue them
 115   together, but it is not very evident, how to make them modular.
 116   sit is integral part of IPv6, ipip and gre are naturally modular.
 117   We could extract common parts (hash table, ioctl etc)
 118   to a separate module (ip_tunnel.c).
 119
 120   Alexey Kuznetsov.
 121 */
 122
 
 
 
 
 123static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 
 
 124static int ipgre_tunnel_init(struct net_device *dev);
 125static void ipgre_tunnel_setup(struct net_device *dev);
 126static int ipgre_tunnel_bind_dev(struct net_device *dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127
 128/* Fallback tunnel: no source, no destination, no key, no options */
 
 
 
 
 
 
 129
 130#define HASH_SIZE  16
 
 
 131
 132static int ipgre_net_id __read_mostly;
 133struct ipgre_net {
 134	struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
 135
 136	struct net_device *fb_tunnel_dev;
 137};
 
 
 138
 139/* Tunnel hash table */
 
 
 
 
 
 
 
 
 
 
 
 
 
 140
 141/*
 142   4 hash tables:
 
 
 
 143
 144   3: (remote,local)
 145   2: (remote,*)
 146   1: (*,local)
 147   0: (*,*)
 148
 149   We require exact key match i.e. if a key is present in packet
 150   it will match only tunnel with the same key; if it is not present,
 151   it will match only keyless tunnel.
 152
 153   All keysless packets, if not matched configured keyless tunnels
 154   will match fallback tunnel.
 155 */
 
 
 
 156
 157#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
 
 158
 159#define tunnels_r_l	tunnels[3]
 160#define tunnels_r	tunnels[2]
 161#define tunnels_l	tunnels[1]
 162#define tunnels_wc	tunnels[0]
 163/*
 164 * Locking : hash tables are protected by RCU and RTNL
 165 */
 166
 167#define for_each_ip_tunnel_rcu(start) \
 168	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
 
 
 
 169
 170/* often modified stats are per cpu, other are shared (netdev->stats) */
 171struct pcpu_tstats {
 172	u64	rx_packets;
 173	u64	rx_bytes;
 174	u64	tx_packets;
 175	u64	tx_bytes;
 176	struct u64_stats_sync	syncp;
 177};
 178
 179static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
 180						   struct rtnl_link_stats64 *tot)
 181{
 182	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 183
 184	for_each_possible_cpu(i) {
 185		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
 186		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
 187		unsigned int start;
 188
 189		do {
 190			start = u64_stats_fetch_begin_bh(&tstats->syncp);
 191			rx_packets = tstats->rx_packets;
 192			tx_packets = tstats->tx_packets;
 193			rx_bytes = tstats->rx_bytes;
 194			tx_bytes = tstats->tx_bytes;
 195		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
 196
 197		tot->rx_packets += rx_packets;
 198		tot->tx_packets += tx_packets;
 199		tot->rx_bytes   += rx_bytes;
 200		tot->tx_bytes   += tx_bytes;
 201	}
 202
 203	tot->multicast = dev->stats.multicast;
 204	tot->rx_crc_errors = dev->stats.rx_crc_errors;
 205	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
 206	tot->rx_length_errors = dev->stats.rx_length_errors;
 207	tot->rx_errors = dev->stats.rx_errors;
 208	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
 209	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
 210	tot->tx_dropped = dev->stats.tx_dropped;
 211	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
 212	tot->tx_errors = dev->stats.tx_errors;
 213
 214	return tot;
 215}
 216
 217/* Given src, dst and key, find appropriate for input tunnel. */
 218
 219static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
 220					     __be32 remote, __be32 local,
 221					     __be32 key, __be16 gre_proto)
 222{
 223	struct net *net = dev_net(dev);
 224	int link = dev->ifindex;
 225	unsigned int h0 = HASH(remote);
 226	unsigned int h1 = HASH(key);
 227	struct ip_tunnel *t, *cand = NULL;
 228	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 229	int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
 230		       ARPHRD_ETHER : ARPHRD_IPGRE;
 231	int score, cand_score = 4;
 232
 233	for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
 234		if (local != t->parms.iph.saddr ||
 235		    remote != t->parms.iph.daddr ||
 236		    key != t->parms.i_key ||
 237		    !(t->dev->flags & IFF_UP))
 238			continue;
 239
 240		if (t->dev->type != ARPHRD_IPGRE &&
 241		    t->dev->type != dev_type)
 242			continue;
 243
 244		score = 0;
 245		if (t->parms.link != link)
 246			score |= 1;
 247		if (t->dev->type != dev_type)
 248			score |= 2;
 249		if (score == 0)
 250			return t;
 251
 252		if (score < cand_score) {
 253			cand = t;
 254			cand_score = score;
 255		}
 256	}
 257
 258	for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
 259		if (remote != t->parms.iph.daddr ||
 260		    key != t->parms.i_key ||
 261		    !(t->dev->flags & IFF_UP))
 262			continue;
 263
 264		if (t->dev->type != ARPHRD_IPGRE &&
 265		    t->dev->type != dev_type)
 266			continue;
 267
 268		score = 0;
 269		if (t->parms.link != link)
 270			score |= 1;
 271		if (t->dev->type != dev_type)
 272			score |= 2;
 273		if (score == 0)
 274			return t;
 275
 276		if (score < cand_score) {
 277			cand = t;
 278			cand_score = score;
 279		}
 280	}
 281
 282	for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
 283		if ((local != t->parms.iph.saddr &&
 284		     (local != t->parms.iph.daddr ||
 285		      !ipv4_is_multicast(local))) ||
 286		    key != t->parms.i_key ||
 287		    !(t->dev->flags & IFF_UP))
 288			continue;
 289
 290		if (t->dev->type != ARPHRD_IPGRE &&
 291		    t->dev->type != dev_type)
 292			continue;
 293
 294		score = 0;
 295		if (t->parms.link != link)
 296			score |= 1;
 297		if (t->dev->type != dev_type)
 298			score |= 2;
 299		if (score == 0)
 300			return t;
 301
 302		if (score < cand_score) {
 303			cand = t;
 304			cand_score = score;
 305		}
 306	}
 307
 308	for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
 309		if (t->parms.i_key != key ||
 310		    !(t->dev->flags & IFF_UP))
 311			continue;
 312
 313		if (t->dev->type != ARPHRD_IPGRE &&
 314		    t->dev->type != dev_type)
 315			continue;
 316
 317		score = 0;
 318		if (t->parms.link != link)
 319			score |= 1;
 320		if (t->dev->type != dev_type)
 321			score |= 2;
 322		if (score == 0)
 323			return t;
 324
 325		if (score < cand_score) {
 326			cand = t;
 327			cand_score = score;
 328		}
 329	}
 330
 331	if (cand != NULL)
 332		return cand;
 333
 334	dev = ign->fb_tunnel_dev;
 335	if (dev->flags & IFF_UP)
 336		return netdev_priv(dev);
 337
 338	return NULL;
 339}
 340
 341static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
 342		struct ip_tunnel_parm *parms)
 343{
 344	__be32 remote = parms->iph.daddr;
 345	__be32 local = parms->iph.saddr;
 346	__be32 key = parms->i_key;
 347	unsigned int h = HASH(key);
 348	int prio = 0;
 349
 350	if (local)
 351		prio |= 1;
 352	if (remote && !ipv4_is_multicast(remote)) {
 353		prio |= 2;
 354		h ^= HASH(remote);
 355	}
 356
 357	return &ign->tunnels[prio][h];
 358}
 359
 360static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
 361		struct ip_tunnel *t)
 362{
 363	return __ipgre_bucket(ign, &t->parms);
 364}
 
 
 
 
 
 
 
 
 365
 366static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
 367{
 368	struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
 369
 370	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
 371	rcu_assign_pointer(*tp, t);
 372}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373
 374static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
 375{
 376	struct ip_tunnel __rcu **tp;
 377	struct ip_tunnel *iter;
 
 378
 379	for (tp = ipgre_bucket(ign, t);
 380	     (iter = rtnl_dereference(*tp)) != NULL;
 381	     tp = &iter->next) {
 382		if (t == iter) {
 383			rcu_assign_pointer(*tp, t->next);
 384			break;
 385		}
 386	}
 387}
 388
 389static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
 390					   struct ip_tunnel_parm *parms,
 391					   int type)
 392{
 393	__be32 remote = parms->iph.daddr;
 394	__be32 local = parms->iph.saddr;
 395	__be32 key = parms->i_key;
 396	int link = parms->link;
 397	struct ip_tunnel *t;
 398	struct ip_tunnel __rcu **tp;
 399	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 400
 401	for (tp = __ipgre_bucket(ign, parms);
 402	     (t = rtnl_dereference(*tp)) != NULL;
 403	     tp = &t->next)
 404		if (local == t->parms.iph.saddr &&
 405		    remote == t->parms.iph.daddr &&
 406		    key == t->parms.i_key &&
 407		    link == t->parms.link &&
 408		    type == t->dev->type)
 409			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 410
 411	return t;
 
 
 412}
 413
 414static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
 415		struct ip_tunnel_parm *parms, int create)
 416{
 417	struct ip_tunnel *t, *nt;
 418	struct net_device *dev;
 419	char name[IFNAMSIZ];
 420	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 421
 422	t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
 423	if (t || !create)
 424		return t;
 425
 426	if (parms->name[0])
 427		strlcpy(name, parms->name, IFNAMSIZ);
 428	else
 429		strcpy(name, "gre%d");
 430
 431	dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
 432	if (!dev)
 433		return NULL;
 434
 435	dev_net_set(dev, net);
 
 
 
 
 
 
 436
 437	nt = netdev_priv(dev);
 438	nt->parms = *parms;
 439	dev->rtnl_link_ops = &ipgre_link_ops;
 
 440
 441	dev->mtu = ipgre_tunnel_bind_dev(dev);
 
 
 442
 443	if (register_netdevice(dev) < 0)
 444		goto failed_free;
 
 
 
 445
 446	/* Can use a lockless transmit, unless we generate output sequences */
 447	if (!(nt->parms.o_flags & GRE_SEQ))
 448		dev->features |= NETIF_F_LLTX;
 
 449
 450	dev_hold(dev);
 451	ipgre_tunnel_link(ign, nt);
 452	return nt;
 453
 454failed_free:
 455	free_netdev(dev);
 456	return NULL;
 457}
 458
 459static void ipgre_tunnel_uninit(struct net_device *dev)
 
 460{
 461	struct net *net = dev_net(dev);
 462	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 
 463
 464	ipgre_tunnel_unlink(ign, netdev_priv(dev));
 465	dev_put(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 466}
 467
 468
 469static void ipgre_err(struct sk_buff *skb, u32 info)
 470{
 
 
 
 471
 472/* All the routers (except for Linux) return only
 473   8 bytes of packet payload. It means, that precise relaying of
 474   ICMP in the real Internet is absolutely infeasible.
 475
 476   Moreover, Cisco "wise men" put GRE key to the third word
 477   in GRE header. It makes impossible maintaining even soft state for keyed
 478   GRE tunnels with enabled checksum. Tell them "thank you".
 479
 480   Well, I wonder, rfc1812 was written by Cisco employee,
 481   what the hell these idiots break standards established
 482   by themselves???
 483 */
 484
 485	const struct iphdr *iph = (const struct iphdr *)skb->data;
 486	__be16	     *p = (__be16 *)(skb->data+(iph->ihl<<2));
 487	int grehlen = (iph->ihl<<2) + 4;
 488	const int type = icmp_hdr(skb)->type;
 489	const int code = icmp_hdr(skb)->code;
 490	struct ip_tunnel *t;
 491	__be16 flags;
 492
 493	flags = p[0];
 494	if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
 495		if (flags&(GRE_VERSION|GRE_ROUTING))
 496			return;
 497		if (flags&GRE_KEY) {
 498			grehlen += 4;
 499			if (flags&GRE_CSUM)
 500				grehlen += 4;
 501		}
 502	}
 
 503
 504	/* If only 8 bytes returned, keyed message will be dropped here */
 505	if (skb_headlen(skb) < grehlen)
 506		return;
 507
 508	switch (type) {
 509	default:
 510	case ICMP_PARAMETERPROB:
 511		return;
 512
 513	case ICMP_DEST_UNREACH:
 514		switch (code) {
 515		case ICMP_SR_FAILED:
 516		case ICMP_PORT_UNREACH:
 517			/* Impossible event. */
 518			return;
 519		case ICMP_FRAG_NEEDED:
 520			/* Soft state for pmtu is maintained by IP core. */
 521			return;
 522		default:
 523			/* All others are translated to HOST_UNREACH.
 524			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 525			   I believe they are just ether pollution. --ANK
 526			 */
 527			break;
 528		}
 529		break;
 530	case ICMP_TIME_EXCEEDED:
 531		if (code != ICMP_EXC_TTL)
 532			return;
 533		break;
 534	}
 535
 536	rcu_read_lock();
 537	t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
 538				flags & GRE_KEY ?
 539				*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
 540				p[1]);
 541	if (t == NULL || t->parms.iph.daddr == 0 ||
 542	    ipv4_is_multicast(t->parms.iph.daddr))
 543		goto out;
 544
 545	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 546		goto out;
 547
 548	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 549		t->err_count++;
 550	else
 551		t->err_count = 1;
 552	t->err_time = jiffies;
 553out:
 554	rcu_read_unlock();
 
 
 
 555}
 556
 557static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
 
 
 558{
 559	if (INET_ECN_is_ce(iph->tos)) {
 560		if (skb->protocol == htons(ETH_P_IP)) {
 561			IP_ECN_set_ce(ip_hdr(skb));
 562		} else if (skb->protocol == htons(ETH_P_IPV6)) {
 563			IP6_ECN_set_ce(ipv6_hdr(skb));
 564		}
 565	}
 
 
 
 
 
 566}
 567
 568static inline u8
 569ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
 570{
 571	u8 inner = 0;
 572	if (skb->protocol == htons(ETH_P_IP))
 573		inner = old_iph->tos;
 574	else if (skb->protocol == htons(ETH_P_IPV6))
 575		inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
 576	return INET_ECN_encapsulate(tos, inner);
 577}
 578
 579static int ipgre_rcv(struct sk_buff *skb)
 
 580{
 581	const struct iphdr *iph;
 582	u8     *h;
 583	__be16    flags;
 584	__sum16   csum = 0;
 585	__be32 key = 0;
 586	u32    seqno = 0;
 587	struct ip_tunnel *tunnel;
 588	int    offset = 4;
 589	__be16 gre_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590
 591	if (!pskb_may_pull(skb, 16))
 592		goto drop_nolock;
 593
 594	iph = ip_hdr(skb);
 595	h = skb->data;
 596	flags = *(__be16 *)h;
 597
 598	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
 599		/* - Version must be 0.
 600		   - We do not support routing headers.
 601		 */
 602		if (flags&(GRE_VERSION|GRE_ROUTING))
 603			goto drop_nolock;
 604
 605		if (flags&GRE_CSUM) {
 606			switch (skb->ip_summed) {
 607			case CHECKSUM_COMPLETE:
 608				csum = csum_fold(skb->csum);
 609				if (!csum)
 610					break;
 611				/* fall through */
 612			case CHECKSUM_NONE:
 613				skb->csum = 0;
 614				csum = __skb_checksum_complete(skb);
 615				skb->ip_summed = CHECKSUM_COMPLETE;
 616			}
 617			offset += 4;
 618		}
 619		if (flags&GRE_KEY) {
 620			key = *(__be32 *)(h + offset);
 621			offset += 4;
 622		}
 623		if (flags&GRE_SEQ) {
 624			seqno = ntohl(*(__be32 *)(h + offset));
 625			offset += 4;
 626		}
 627	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 628
 629	gre_proto = *(__be16 *)(h + 2);
 
 630
 631	rcu_read_lock();
 632	if ((tunnel = ipgre_tunnel_lookup(skb->dev,
 633					  iph->saddr, iph->daddr, key,
 634					  gre_proto))) {
 635		struct pcpu_tstats *tstats;
 636
 637		secpath_reset(skb);
 638
 639		skb->protocol = gre_proto;
 640		/* WCCP version 1 and 2 protocol decoding.
 641		 * - Change protocol to IP
 642		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
 643		 */
 644		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
 645			skb->protocol = htons(ETH_P_IP);
 646			if ((*(h + offset) & 0xF0) != 0x40)
 647				offset += 4;
 648		}
 
 
 
 
 649
 650		skb->mac_header = skb->network_header;
 651		__pskb_pull(skb, offset);
 652		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
 653		skb->pkt_type = PACKET_HOST;
 654#ifdef CONFIG_NET_IPGRE_BROADCAST
 655		if (ipv4_is_multicast(iph->daddr)) {
 656			/* Looped back packet, drop it! */
 657			if (rt_is_output_route(skb_rtable(skb)))
 658				goto drop;
 659			tunnel->dev->stats.multicast++;
 660			skb->pkt_type = PACKET_BROADCAST;
 661		}
 662#endif
 663
 664		if (((flags&GRE_CSUM) && csum) ||
 665		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
 666			tunnel->dev->stats.rx_crc_errors++;
 667			tunnel->dev->stats.rx_errors++;
 668			goto drop;
 669		}
 670		if (tunnel->parms.i_flags&GRE_SEQ) {
 671			if (!(flags&GRE_SEQ) ||
 672			    (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
 673				tunnel->dev->stats.rx_fifo_errors++;
 674				tunnel->dev->stats.rx_errors++;
 675				goto drop;
 676			}
 677			tunnel->i_seqno = seqno + 1;
 678		}
 679
 680		/* Warning: All skb pointers will be invalidated! */
 681		if (tunnel->dev->type == ARPHRD_ETHER) {
 682			if (!pskb_may_pull(skb, ETH_HLEN)) {
 683				tunnel->dev->stats.rx_length_errors++;
 684				tunnel->dev->stats.rx_errors++;
 685				goto drop;
 686			}
 687
 688			iph = ip_hdr(skb);
 689			skb->protocol = eth_type_trans(skb, tunnel->dev);
 690			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 691		}
 692
 693		tstats = this_cpu_ptr(tunnel->dev->tstats);
 694		u64_stats_update_begin(&tstats->syncp);
 695		tstats->rx_packets++;
 696		tstats->rx_bytes += skb->len;
 697		u64_stats_update_end(&tstats->syncp);
 698
 699		__skb_tunnel_rx(skb, tunnel->dev);
 
 
 
 
 
 700
 701		skb_reset_network_header(skb);
 702		ipgre_ecn_decapsulate(iph, skb);
 703
 704		netif_rx(skb);
 
 
 
 
 
 
 
 705
 706		rcu_read_unlock();
 707		return 0;
 708	}
 709	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 710
 711drop:
 712	rcu_read_unlock();
 713drop_nolock:
 714	kfree_skb(skb);
 715	return 0;
 716}
 717
 718static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
 719{
 720	struct ip_tunnel *tunnel = netdev_priv(dev);
 721	struct pcpu_tstats *tstats;
 722	const struct iphdr  *old_iph = ip_hdr(skb);
 723	const struct iphdr  *tiph;
 724	struct flowi4 fl4;
 725	u8     tos;
 726	__be16 df;
 727	struct rtable *rt;     			/* Route to the other host */
 728	struct net_device *tdev;		/* Device to other host */
 729	struct iphdr  *iph;			/* Our new IP header */
 730	unsigned int max_headroom;		/* The extra header space needed */
 731	int    gre_hlen;
 732	__be32 dst;
 733	int    mtu;
 734
 735	if (dev->type == ARPHRD_ETHER)
 736		IPCB(skb)->flags = 0;
 737
 738	if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
 739		gre_hlen = 0;
 740		tiph = (const struct iphdr *)skb->data;
 741	} else {
 742		gre_hlen = tunnel->hlen;
 743		tiph = &tunnel->parms.iph;
 744	}
 745
 746	if ((dst = tiph->daddr) == 0) {
 747		/* NBMA tunnel */
 748
 749		if (skb_dst(skb) == NULL) {
 750			dev->stats.tx_fifo_errors++;
 751			goto tx_error;
 752		}
 753
 754		if (skb->protocol == htons(ETH_P_IP)) {
 755			rt = skb_rtable(skb);
 756			dst = rt->rt_gateway;
 757		}
 758#if IS_ENABLED(CONFIG_IPV6)
 759		else if (skb->protocol == htons(ETH_P_IPV6)) {
 760			const struct in6_addr *addr6;
 761			struct neighbour *neigh;
 762			bool do_tx_error_icmp;
 763			int addr_type;
 764
 765			neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
 766			if (neigh == NULL)
 767				goto tx_error;
 768
 769			addr6 = (const struct in6_addr *)&neigh->primary_key;
 770			addr_type = ipv6_addr_type(addr6);
 771
 772			if (addr_type == IPV6_ADDR_ANY) {
 773				addr6 = &ipv6_hdr(skb)->daddr;
 774				addr_type = ipv6_addr_type(addr6);
 775			}
 776
 777			if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
 778				do_tx_error_icmp = true;
 779			else {
 780				do_tx_error_icmp = false;
 781				dst = addr6->s6_addr32[3];
 782			}
 783			neigh_release(neigh);
 784			if (do_tx_error_icmp)
 785				goto tx_error_icmp;
 786		}
 787#endif
 788		else
 789			goto tx_error;
 790	}
 791
 792	tos = tiph->tos;
 793	if (tos == 1) {
 794		tos = 0;
 795		if (skb->protocol == htons(ETH_P_IP))
 796			tos = old_iph->tos;
 797		else if (skb->protocol == htons(ETH_P_IPV6))
 798			tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
 799	}
 800
 801	rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
 802				 tunnel->parms.o_key, RT_TOS(tos),
 803				 tunnel->parms.link);
 804	if (IS_ERR(rt)) {
 805		dev->stats.tx_carrier_errors++;
 806		goto tx_error;
 807	}
 808	tdev = rt->dst.dev;
 809
 810	if (tdev == dev) {
 811		ip_rt_put(rt);
 812		dev->stats.collisions++;
 813		goto tx_error;
 814	}
 815
 816	df = tiph->frag_off;
 817	if (df)
 818		mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
 819	else
 820		mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 821
 822	if (skb_dst(skb))
 823		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 
 824
 825	if (skb->protocol == htons(ETH_P_IP)) {
 826		df |= (old_iph->frag_off&htons(IP_DF));
 
 
 
 
 827
 828		if ((old_iph->frag_off&htons(IP_DF)) &&
 829		    mtu < ntohs(old_iph->tot_len)) {
 830			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
 831			ip_rt_put(rt);
 832			goto tx_error;
 833		}
 834	}
 835#if IS_ENABLED(CONFIG_IPV6)
 836	else if (skb->protocol == htons(ETH_P_IPV6)) {
 837		struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 838
 839		if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
 840			if ((tunnel->parms.iph.daddr &&
 841			     !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
 842			    rt6->rt6i_dst.plen == 128) {
 843				rt6->rt6i_flags |= RTF_MODIFIED;
 844				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
 845			}
 846		}
 847
 848		if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
 849			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 850			ip_rt_put(rt);
 851			goto tx_error;
 852		}
 853	}
 854#endif
 855
 856	if (tunnel->err_count > 0) {
 857		if (time_before(jiffies,
 858				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
 859			tunnel->err_count--;
 860
 861			dst_link_failure(skb);
 862		} else
 863			tunnel->err_count = 0;
 864	}
 865
 866	max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
 867
 868	if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
 869	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
 870		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
 871		if (max_headroom > dev->needed_headroom)
 872			dev->needed_headroom = max_headroom;
 873		if (!new_skb) {
 874			ip_rt_put(rt);
 875			dev->stats.tx_dropped++;
 876			dev_kfree_skb(skb);
 877			return NETDEV_TX_OK;
 878		}
 879		if (skb->sk)
 880			skb_set_owner_w(new_skb, skb->sk);
 881		dev_kfree_skb(skb);
 882		skb = new_skb;
 883		old_iph = ip_hdr(skb);
 884	}
 885
 886	skb_reset_transport_header(skb);
 887	skb_push(skb, gre_hlen);
 888	skb_reset_network_header(skb);
 889	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 890	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
 891			      IPSKB_REROUTED);
 892	skb_dst_drop(skb);
 893	skb_dst_set(skb, &rt->dst);
 894
 895	/*
 896	 *	Push down and install the IPIP header.
 897	 */
 
 
 
 898
 899	iph 			=	ip_hdr(skb);
 900	iph->version		=	4;
 901	iph->ihl		=	sizeof(struct iphdr) >> 2;
 902	iph->frag_off		=	df;
 903	iph->protocol		=	IPPROTO_GRE;
 904	iph->tos		=	ipgre_ecn_encapsulate(tos, old_iph, skb);
 905	iph->daddr		=	fl4.daddr;
 906	iph->saddr		=	fl4.saddr;
 907
 908	if ((iph->ttl = tiph->ttl) == 0) {
 909		if (skb->protocol == htons(ETH_P_IP))
 910			iph->ttl = old_iph->ttl;
 911#if IS_ENABLED(CONFIG_IPV6)
 912		else if (skb->protocol == htons(ETH_P_IPV6))
 913			iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
 914#endif
 915		else
 916			iph->ttl = ip4_dst_hoplimit(&rt->dst);
 917	}
 918
 919	((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
 920	((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
 921				   htons(ETH_P_TEB) : skb->protocol;
 922
 923	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
 924		__be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
 925
 926		if (tunnel->parms.o_flags&GRE_SEQ) {
 927			++tunnel->o_seqno;
 928			*ptr = htonl(tunnel->o_seqno);
 929			ptr--;
 930		}
 931		if (tunnel->parms.o_flags&GRE_KEY) {
 932			*ptr = tunnel->parms.o_key;
 933			ptr--;
 934		}
 935		if (tunnel->parms.o_flags&GRE_CSUM) {
 936			*ptr = 0;
 937			*(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
 938		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 939	}
 940
 941	nf_reset(skb);
 942	tstats = this_cpu_ptr(dev->tstats);
 943	__IPTUNNEL_XMIT(tstats, &dev->stats);
 944	return NETDEV_TX_OK;
 945
 946#if IS_ENABLED(CONFIG_IPV6)
 947tx_error_icmp:
 948	dst_link_failure(skb);
 949#endif
 950tx_error:
 951	dev->stats.tx_errors++;
 952	dev_kfree_skb(skb);
 953	return NETDEV_TX_OK;
 954}
 955
 956static int ipgre_tunnel_bind_dev(struct net_device *dev)
 
 957{
 958	struct net_device *tdev = NULL;
 959	struct ip_tunnel *tunnel;
 960	const struct iphdr *iph;
 961	int hlen = LL_MAX_HEADER;
 962	int mtu = ETH_DATA_LEN;
 963	int addend = sizeof(struct iphdr) + 4;
 964
 965	tunnel = netdev_priv(dev);
 966	iph = &tunnel->parms.iph;
 967
 968	/* Guess output device to choose reasonable mtu and needed_headroom */
 
 
 
 969
 970	if (iph->daddr) {
 971		struct flowi4 fl4;
 972		struct rtable *rt;
 973
 974		rt = ip_route_output_gre(dev_net(dev), &fl4,
 975					 iph->daddr, iph->saddr,
 976					 tunnel->parms.o_key,
 977					 RT_TOS(iph->tos),
 978					 tunnel->parms.link);
 979		if (!IS_ERR(rt)) {
 980			tdev = rt->dst.dev;
 981			ip_rt_put(rt);
 982		}
 983
 984		if (dev->type != ARPHRD_ETHER)
 985			dev->flags |= IFF_POINTOPOINT;
 986	}
 987
 988	if (!tdev && tunnel->parms.link)
 989		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
 
 
 
 990
 991	if (tdev) {
 992		hlen = tdev->hard_header_len + tdev->needed_headroom;
 993		mtu = tdev->mtu;
 994	}
 995	dev->iflink = tunnel->parms.link;
 996
 997	/* Precalculate GRE options length */
 998	if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
 999		if (tunnel->parms.o_flags&GRE_CSUM)
1000			addend += 4;
1001		if (tunnel->parms.o_flags&GRE_KEY)
1002			addend += 4;
1003		if (tunnel->parms.o_flags&GRE_SEQ)
1004			addend += 4;
1005	}
1006	dev->needed_headroom = addend + hlen;
1007	mtu -= dev->hard_header_len + addend;
1008
1009	if (mtu < 68)
1010		mtu = 68;
 
 
1011
1012	tunnel->hlen = addend;
 
1013
1014	return mtu;
 
 
 
 
 
 
 
 
1015}
1016
1017static int
1018ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
 
1019{
1020	int err = 0;
1021	struct ip_tunnel_parm p;
1022	struct ip_tunnel *t;
1023	struct net *net = dev_net(dev);
1024	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 
 
 
 
 
 
 
 
 
 
 
1025
1026	switch (cmd) {
1027	case SIOCGETTUNNEL:
1028		t = NULL;
1029		if (dev == ign->fb_tunnel_dev) {
1030			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1031				err = -EFAULT;
1032				break;
1033			}
1034			t = ipgre_tunnel_locate(net, &p, 0);
1035		}
1036		if (t == NULL)
1037			t = netdev_priv(dev);
1038		memcpy(&p, &t->parms, sizeof(p));
1039		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1040			err = -EFAULT;
1041		break;
1042
1043	case SIOCADDTUNNEL:
1044	case SIOCCHGTUNNEL:
1045		err = -EPERM;
1046		if (!capable(CAP_NET_ADMIN))
1047			goto done;
1048
1049		err = -EFAULT;
1050		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1051			goto done;
1052
1053		err = -EINVAL;
1054		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1055		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1056		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1057			goto done;
1058		if (p.iph.ttl)
1059			p.iph.frag_off |= htons(IP_DF);
1060
1061		if (!(p.i_flags&GRE_KEY))
1062			p.i_key = 0;
1063		if (!(p.o_flags&GRE_KEY))
1064			p.o_key = 0;
1065
1066		t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1067
1068		if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1069			if (t != NULL) {
1070				if (t->dev != dev) {
1071					err = -EEXIST;
1072					break;
1073				}
1074			} else {
1075				unsigned int nflags = 0;
1076
1077				t = netdev_priv(dev);
1078
1079				if (ipv4_is_multicast(p.iph.daddr))
1080					nflags = IFF_BROADCAST;
1081				else if (p.iph.daddr)
1082					nflags = IFF_POINTOPOINT;
1083
1084				if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1085					err = -EINVAL;
1086					break;
1087				}
1088				ipgre_tunnel_unlink(ign, t);
1089				synchronize_net();
1090				t->parms.iph.saddr = p.iph.saddr;
1091				t->parms.iph.daddr = p.iph.daddr;
1092				t->parms.i_key = p.i_key;
1093				t->parms.o_key = p.o_key;
1094				memcpy(dev->dev_addr, &p.iph.saddr, 4);
1095				memcpy(dev->broadcast, &p.iph.daddr, 4);
1096				ipgre_tunnel_link(ign, t);
1097				netdev_state_change(dev);
1098			}
1099		}
1100
1101		if (t) {
1102			err = 0;
1103			if (cmd == SIOCCHGTUNNEL) {
1104				t->parms.iph.ttl = p.iph.ttl;
1105				t->parms.iph.tos = p.iph.tos;
1106				t->parms.iph.frag_off = p.iph.frag_off;
1107				if (t->parms.link != p.link) {
1108					t->parms.link = p.link;
1109					dev->mtu = ipgre_tunnel_bind_dev(dev);
1110					netdev_state_change(dev);
1111				}
1112			}
1113			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1114				err = -EFAULT;
1115		} else
1116			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1117		break;
1118
1119	case SIOCDELTUNNEL:
1120		err = -EPERM;
1121		if (!capable(CAP_NET_ADMIN))
1122			goto done;
1123
1124		if (dev == ign->fb_tunnel_dev) {
1125			err = -EFAULT;
1126			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1127				goto done;
1128			err = -ENOENT;
1129			if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1130				goto done;
1131			err = -EPERM;
1132			if (t == netdev_priv(ign->fb_tunnel_dev))
1133				goto done;
1134			dev = t->dev;
1135		}
1136		unregister_netdevice(dev);
1137		err = 0;
1138		break;
1139
1140	default:
1141		err = -EINVAL;
1142	}
1143
1144done:
1145	return err;
1146}
 
1147
1148static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1149{
1150	struct ip_tunnel *tunnel = netdev_priv(dev);
1151	if (new_mtu < 68 ||
1152	    new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1153		return -EINVAL;
1154	dev->mtu = new_mtu;
1155	return 0;
1156}
1157
1158/* Nice toy. Unfortunately, useless in real life :-)
1159   It allows to construct virtual multiprotocol broadcast "LAN"
1160   over the Internet, provided multicast routing is tuned.
1161
1162
1163   I have no idea was this bicycle invented before me,
1164   so that I had to set ARPHRD_IPGRE to a random value.
1165   I have an impression, that Cisco could make something similar,
1166   but this feature is apparently missing in IOS<=11.2(8).
1167
1168   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1169   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1170
1171   ping -t 255 224.66.66.66
1172
1173   If nobody answers, mbone does not work.
1174
1175   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1176   ip addr add 10.66.66.<somewhat>/24 dev Universe
1177   ifconfig Universe up
1178   ifconfig Universe add fe80::<Your_real_addr>/10
1179   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1180   ftp 10.66.66.66
1181   ...
1182   ftp fec0:6666:6666::193.233.7.65
1183   ...
1184
1185 */
1186
1187static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1188			unsigned short type,
1189			const void *daddr, const void *saddr, unsigned int len)
1190{
1191	struct ip_tunnel *t = netdev_priv(dev);
1192	struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1193	__be16 *p = (__be16 *)(iph+1);
 
 
 
 
 
1194
1195	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1196	p[0]		= t->parms.o_flags;
1197	p[1]		= htons(type);
1198
1199	/*
1200	 *	Set the source hardware address.
1201	 */
1202
 
1203	if (saddr)
1204		memcpy(&iph->saddr, saddr, 4);
1205	if (daddr)
1206		memcpy(&iph->daddr, daddr, 4);
1207	if (iph->daddr)
1208		return t->hlen;
1209
1210	return -t->hlen;
1211}
1212
1213static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1214{
1215	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1216	memcpy(haddr, &iph->saddr, 4);
1217	return 4;
1218}
1219
1220static const struct header_ops ipgre_header_ops = {
1221	.create	= ipgre_header,
1222	.parse	= ipgre_header_parse,
1223};
1224
1225#ifdef CONFIG_NET_IPGRE_BROADCAST
1226static int ipgre_open(struct net_device *dev)
1227{
1228	struct ip_tunnel *t = netdev_priv(dev);
1229
1230	if (ipv4_is_multicast(t->parms.iph.daddr)) {
1231		struct flowi4 fl4;
1232		struct rtable *rt;
1233
1234		rt = ip_route_output_gre(dev_net(dev), &fl4,
1235					 t->parms.iph.daddr,
1236					 t->parms.iph.saddr,
1237					 t->parms.o_key,
1238					 RT_TOS(t->parms.iph.tos),
1239					 t->parms.link);
1240		if (IS_ERR(rt))
1241			return -EADDRNOTAVAIL;
1242		dev = rt->dst.dev;
1243		ip_rt_put(rt);
1244		if (__in_dev_get_rtnl(dev) == NULL)
1245			return -EADDRNOTAVAIL;
1246		t->mlink = dev->ifindex;
1247		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1248	}
1249	return 0;
1250}
1251
1252static int ipgre_close(struct net_device *dev)
1253{
1254	struct ip_tunnel *t = netdev_priv(dev);
1255
1256	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1257		struct in_device *in_dev;
1258		in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1259		if (in_dev)
1260			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1261	}
1262	return 0;
1263}
1264
1265#endif
1266
1267static const struct net_device_ops ipgre_netdev_ops = {
1268	.ndo_init		= ipgre_tunnel_init,
1269	.ndo_uninit		= ipgre_tunnel_uninit,
1270#ifdef CONFIG_NET_IPGRE_BROADCAST
1271	.ndo_open		= ipgre_open,
1272	.ndo_stop		= ipgre_close,
1273#endif
1274	.ndo_start_xmit		= ipgre_tunnel_xmit,
1275	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
1276	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
1277	.ndo_get_stats64	= ipgre_get_stats64,
 
 
1278};
1279
1280static void ipgre_dev_free(struct net_device *dev)
1281{
1282	free_percpu(dev->tstats);
1283	free_netdev(dev);
1284}
1285
1286static void ipgre_tunnel_setup(struct net_device *dev)
1287{
1288	dev->netdev_ops		= &ipgre_netdev_ops;
1289	dev->destructor 	= ipgre_dev_free;
1290
1291	dev->type		= ARPHRD_IPGRE;
1292	dev->needed_headroom 	= LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1293	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1294	dev->flags		= IFF_NOARP;
1295	dev->iflink		= 0;
1296	dev->addr_len		= 4;
1297	dev->features		|= NETIF_F_NETNS_LOCAL;
1298	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
1299}
1300
1301static int ipgre_tunnel_init(struct net_device *dev)
1302{
1303	struct ip_tunnel *tunnel;
1304	struct iphdr *iph;
1305
1306	tunnel = netdev_priv(dev);
1307	iph = &tunnel->parms.iph;
 
1308
1309	tunnel->dev = dev;
1310	strcpy(tunnel->parms.name, dev->name);
1311
1312	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1313	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1314
1315	if (iph->daddr) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1316#ifdef CONFIG_NET_IPGRE_BROADCAST
1317		if (ipv4_is_multicast(iph->daddr)) {
1318			if (!iph->saddr)
1319				return -EINVAL;
1320			dev->flags = IFF_BROADCAST;
1321			dev->header_ops = &ipgre_header_ops;
 
 
1322		}
1323#endif
1324	} else
1325		dev->header_ops = &ipgre_header_ops;
 
 
 
1326
1327	dev->tstats = alloc_percpu(struct pcpu_tstats);
1328	if (!dev->tstats)
1329		return -ENOMEM;
1330
1331	return 0;
1332}
1333
1334static void ipgre_fb_tunnel_init(struct net_device *dev)
1335{
1336	struct ip_tunnel *tunnel = netdev_priv(dev);
1337	struct iphdr *iph = &tunnel->parms.iph;
1338
1339	tunnel->dev = dev;
1340	strcpy(tunnel->parms.name, dev->name);
1341
1342	iph->version		= 4;
1343	iph->protocol		= IPPROTO_GRE;
1344	iph->ihl		= 5;
1345	tunnel->hlen		= sizeof(struct iphdr) + 4;
1346
1347	dev_hold(dev);
1348}
1349
1350
1351static const struct gre_protocol ipgre_protocol = {
1352	.handler     = ipgre_rcv,
1353	.err_handler = ipgre_err,
1354};
1355
1356static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1357{
1358	int prio;
1359
1360	for (prio = 0; prio < 4; prio++) {
1361		int h;
1362		for (h = 0; h < HASH_SIZE; h++) {
1363			struct ip_tunnel *t;
1364
1365			t = rtnl_dereference(ign->tunnels[prio][h]);
1366
1367			while (t != NULL) {
1368				unregister_netdevice_queue(t->dev, head);
1369				t = rtnl_dereference(t->next);
1370			}
1371		}
1372	}
1373}
1374
1375static int __net_init ipgre_init_net(struct net *net)
1376{
1377	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1378	int err;
1379
1380	ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1381					   ipgre_tunnel_setup);
1382	if (!ign->fb_tunnel_dev) {
1383		err = -ENOMEM;
1384		goto err_alloc_dev;
1385	}
1386	dev_net_set(ign->fb_tunnel_dev, net);
1387
1388	ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1389	ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1390
1391	if ((err = register_netdev(ign->fb_tunnel_dev)))
1392		goto err_reg_dev;
1393
1394	rcu_assign_pointer(ign->tunnels_wc[0],
1395			   netdev_priv(ign->fb_tunnel_dev));
1396	return 0;
1397
1398err_reg_dev:
1399	ipgre_dev_free(ign->fb_tunnel_dev);
1400err_alloc_dev:
1401	return err;
1402}
1403
1404static void __net_exit ipgre_exit_net(struct net *net)
 
1405{
1406	struct ipgre_net *ign;
1407	LIST_HEAD(list);
1408
1409	ign = net_generic(net, ipgre_net_id);
1410	rtnl_lock();
1411	ipgre_destroy_tunnels(ign, &list);
1412	unregister_netdevice_many(&list);
1413	rtnl_unlock();
1414}
1415
1416static struct pernet_operations ipgre_net_ops = {
1417	.init = ipgre_init_net,
1418	.exit = ipgre_exit_net,
1419	.id   = &ipgre_net_id,
1420	.size = sizeof(struct ipgre_net),
1421};
1422
1423static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
 
1424{
1425	__be16 flags;
1426
1427	if (!data)
1428		return 0;
1429
1430	flags = 0;
1431	if (data[IFLA_GRE_IFLAGS])
1432		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1433	if (data[IFLA_GRE_OFLAGS])
1434		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1435	if (flags & (GRE_VERSION|GRE_ROUTING))
1436		return -EINVAL;
1437
 
 
 
 
 
1438	return 0;
1439}
1440
1441static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
 
1442{
1443	__be32 daddr;
1444
1445	if (tb[IFLA_ADDRESS]) {
1446		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1447			return -EINVAL;
1448		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1449			return -EADDRNOTAVAIL;
1450	}
1451
1452	if (!data)
1453		goto out;
1454
1455	if (data[IFLA_GRE_REMOTE]) {
1456		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1457		if (!daddr)
1458			return -EINVAL;
1459	}
1460
1461out:
1462	return ipgre_tunnel_validate(tb, data);
1463}
1464
1465static void ipgre_netlink_parms(struct nlattr *data[],
1466				struct ip_tunnel_parm *parms)
1467{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1468	memset(parms, 0, sizeof(*parms));
1469
1470	parms->iph.protocol = IPPROTO_GRE;
1471
1472	if (!data)
1473		return;
1474
1475	if (data[IFLA_GRE_LINK])
1476		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1477
1478	if (data[IFLA_GRE_IFLAGS])
1479		parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
 
1480
1481	if (data[IFLA_GRE_OFLAGS])
1482		parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
 
1483
1484	if (data[IFLA_GRE_IKEY])
1485		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1486
1487	if (data[IFLA_GRE_OKEY])
1488		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1489
1490	if (data[IFLA_GRE_LOCAL])
1491		parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1492
1493	if (data[IFLA_GRE_REMOTE])
1494		parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1495
1496	if (data[IFLA_GRE_TTL])
1497		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1498
1499	if (data[IFLA_GRE_TOS])
1500		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1501
1502	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
 
 
1503		parms->iph.frag_off = htons(IP_DF);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1504}
1505
1506static int ipgre_tap_init(struct net_device *dev)
 
 
 
 
1507{
1508	struct ip_tunnel *tunnel;
 
1509
1510	tunnel = netdev_priv(dev);
 
 
 
 
1511
1512	tunnel->dev = dev;
1513	strcpy(tunnel->parms.name, dev->name);
1514
1515	ipgre_tunnel_bind_dev(dev);
 
 
1516
1517	dev->tstats = alloc_percpu(struct pcpu_tstats);
1518	if (!dev->tstats)
1519		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1520
1521	return 0;
1522}
1523
1524static const struct net_device_ops ipgre_tap_netdev_ops = {
1525	.ndo_init		= ipgre_tap_init,
1526	.ndo_uninit		= ipgre_tunnel_uninit,
1527	.ndo_start_xmit		= ipgre_tunnel_xmit,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1528	.ndo_set_mac_address 	= eth_mac_addr,
1529	.ndo_validate_addr	= eth_validate_addr,
1530	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
1531	.ndo_get_stats64	= ipgre_get_stats64,
 
 
1532};
1533
1534static void ipgre_tap_setup(struct net_device *dev)
1535{
 
1536
1537	ether_setup(dev);
 
 
 
 
 
 
 
1538
1539	dev->netdev_ops		= &ipgre_tap_netdev_ops;
1540	dev->destructor 	= ipgre_dev_free;
 
 
1541
1542	dev->iflink		= 0;
1543	dev->features		|= NETIF_F_NETNS_LOCAL;
1544}
1545
1546static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1547			 struct nlattr *data[])
 
 
 
 
 
 
 
 
 
 
 
1548{
1549	struct ip_tunnel *nt;
1550	struct net *net = dev_net(dev);
1551	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1552	int mtu;
1553	int err;
 
 
1554
1555	nt = netdev_priv(dev);
1556	ipgre_netlink_parms(data, &nt->parms);
 
 
1557
1558	if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1559		return -EEXIST;
 
1560
1561	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1562		eth_hw_addr_random(dev);
 
1563
1564	mtu = ipgre_tunnel_bind_dev(dev);
1565	if (!tb[IFLA_MTU])
1566		dev->mtu = mtu;
1567
1568	/* Can use a lockless transmit, unless we generate output sequences */
1569	if (!(nt->parms.o_flags & GRE_SEQ))
1570		dev->features |= NETIF_F_LLTX;
 
 
 
 
1571
1572	err = register_netdevice(dev);
1573	if (err)
1574		goto out;
 
 
 
 
 
 
1575
1576	dev_hold(dev);
1577	ipgre_tunnel_link(ign, nt);
 
 
 
 
 
1578
1579out:
1580	return err;
 
 
 
 
 
 
1581}
1582
1583static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1584			    struct nlattr *data[])
 
1585{
1586	struct ip_tunnel *t, *nt;
1587	struct net *net = dev_net(dev);
1588	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1589	struct ip_tunnel_parm p;
1590	int mtu;
 
 
 
 
 
 
 
1591
1592	if (dev == ign->fb_tunnel_dev)
1593		return -EINVAL;
 
1594
1595	nt = netdev_priv(dev);
1596	ipgre_netlink_parms(data, &p);
1597
1598	t = ipgre_tunnel_locate(net, &p, 0);
1599
1600	if (t) {
1601		if (t->dev != dev)
1602			return -EEXIST;
1603	} else {
1604		t = nt;
1605
1606		if (dev->type != ARPHRD_ETHER) {
1607			unsigned int nflags = 0;
 
 
 
 
 
 
1608
1609			if (ipv4_is_multicast(p.iph.daddr))
1610				nflags = IFF_BROADCAST;
1611			else if (p.iph.daddr)
1612				nflags = IFF_POINTOPOINT;
1613
1614			if ((dev->flags ^ nflags) &
1615			    (IFF_POINTOPOINT | IFF_BROADCAST))
1616				return -EINVAL;
1617		}
1618
1619		ipgre_tunnel_unlink(ign, t);
1620		t->parms.iph.saddr = p.iph.saddr;
1621		t->parms.iph.daddr = p.iph.daddr;
1622		t->parms.i_key = p.i_key;
1623		if (dev->type != ARPHRD_ETHER) {
1624			memcpy(dev->dev_addr, &p.iph.saddr, 4);
1625			memcpy(dev->broadcast, &p.iph.daddr, 4);
1626		}
1627		ipgre_tunnel_link(ign, t);
1628		netdev_state_change(dev);
1629	}
1630
1631	t->parms.o_key = p.o_key;
1632	t->parms.iph.ttl = p.iph.ttl;
1633	t->parms.iph.tos = p.iph.tos;
1634	t->parms.iph.frag_off = p.iph.frag_off;
1635
1636	if (t->parms.link != p.link) {
1637		t->parms.link = p.link;
1638		mtu = ipgre_tunnel_bind_dev(dev);
1639		if (!tb[IFLA_MTU])
1640			dev->mtu = mtu;
1641		netdev_state_change(dev);
1642	}
1643
1644	return 0;
1645}
1646
1647static size_t ipgre_get_size(const struct net_device *dev)
1648{
1649	return
1650		/* IFLA_GRE_LINK */
1651		nla_total_size(4) +
1652		/* IFLA_GRE_IFLAGS */
1653		nla_total_size(2) +
1654		/* IFLA_GRE_OFLAGS */
1655		nla_total_size(2) +
1656		/* IFLA_GRE_IKEY */
1657		nla_total_size(4) +
1658		/* IFLA_GRE_OKEY */
1659		nla_total_size(4) +
1660		/* IFLA_GRE_LOCAL */
1661		nla_total_size(4) +
1662		/* IFLA_GRE_REMOTE */
1663		nla_total_size(4) +
1664		/* IFLA_GRE_TTL */
1665		nla_total_size(1) +
1666		/* IFLA_GRE_TOS */
1667		nla_total_size(1) +
1668		/* IFLA_GRE_PMTUDISC */
1669		nla_total_size(1) +
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1670		0;
1671}
1672
1673static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1674{
1675	struct ip_tunnel *t = netdev_priv(dev);
1676	struct ip_tunnel_parm *p = &t->parms;
 
 
 
1677
1678	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1679	    nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1680	    nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
 
 
1681	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1682	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1683	    nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1684	    nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1685	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1686	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1687	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1688		       !!(p->iph.frag_off & htons(IP_DF))))
 
1689		goto nla_put_failure;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1690	return 0;
1691
1692nla_put_failure:
1693	return -EMSGSIZE;
1694}
1695
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1696static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1697	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1698	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1699	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1700	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1701	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1702	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1703	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1704	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1705	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1706	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
 
 
 
 
 
 
 
 
 
 
 
1707};
1708
1709static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1710	.kind		= "gre",
1711	.maxtype	= IFLA_GRE_MAX,
1712	.policy		= ipgre_policy,
1713	.priv_size	= sizeof(struct ip_tunnel),
1714	.setup		= ipgre_tunnel_setup,
1715	.validate	= ipgre_tunnel_validate,
1716	.newlink	= ipgre_newlink,
1717	.changelink	= ipgre_changelink,
 
1718	.get_size	= ipgre_get_size,
1719	.fill_info	= ipgre_fill_info,
 
1720};
1721
1722static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1723	.kind		= "gretap",
1724	.maxtype	= IFLA_GRE_MAX,
1725	.policy		= ipgre_policy,
1726	.priv_size	= sizeof(struct ip_tunnel),
1727	.setup		= ipgre_tap_setup,
1728	.validate	= ipgre_tap_validate,
1729	.newlink	= ipgre_newlink,
1730	.changelink	= ipgre_changelink,
 
1731	.get_size	= ipgre_get_size,
1732	.fill_info	= ipgre_fill_info,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1733};
1734
1735/*
1736 *	And now the modules code and kernel interface.
1737 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1738
1739static int __init ipgre_init(void)
1740{
1741	int err;
1742
1743	pr_info("GRE over IPv4 tunneling driver\n");
1744
1745	err = register_pernet_device(&ipgre_net_ops);
1746	if (err < 0)
1747		return err;
1748
 
 
 
 
 
 
 
 
1749	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1750	if (err < 0) {
1751		pr_info("%s: can't add protocol\n", __func__);
1752		goto add_proto_failed;
1753	}
1754
1755	err = rtnl_link_register(&ipgre_link_ops);
1756	if (err < 0)
1757		goto rtnl_link_failed;
1758
1759	err = rtnl_link_register(&ipgre_tap_ops);
1760	if (err < 0)
1761		goto tap_ops_failed;
1762
1763out:
1764	return err;
 
 
 
1765
 
 
1766tap_ops_failed:
1767	rtnl_link_unregister(&ipgre_link_ops);
1768rtnl_link_failed:
1769	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1770add_proto_failed:
 
 
 
 
1771	unregister_pernet_device(&ipgre_net_ops);
1772	goto out;
1773}
1774
1775static void __exit ipgre_fini(void)
1776{
1777	rtnl_link_unregister(&ipgre_tap_ops);
1778	rtnl_link_unregister(&ipgre_link_ops);
1779	if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1780		pr_info("%s: can't remove protocol\n", __func__);
 
1781	unregister_pernet_device(&ipgre_net_ops);
 
1782}
1783
1784module_init(ipgre_init);
1785module_exit(ipgre_fini);
 
1786MODULE_LICENSE("GPL");
1787MODULE_ALIAS_RTNL_LINK("gre");
1788MODULE_ALIAS_RTNL_LINK("gretap");
 
1789MODULE_ALIAS_NETDEV("gre0");