Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux NET3:	GRE over IP protocol decoder.
   4 *
   5 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
 
 
 
 
 
 
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/uaccess.h>
  16#include <linux/skbuff.h>
  17#include <linux/netdevice.h>
  18#include <linux/in.h>
  19#include <linux/tcp.h>
  20#include <linux/udp.h>
  21#include <linux/if_arp.h>
  22#include <linux/if_vlan.h>
  23#include <linux/init.h>
  24#include <linux/in6.h>
  25#include <linux/inetdevice.h>
  26#include <linux/igmp.h>
  27#include <linux/netfilter_ipv4.h>
  28#include <linux/etherdevice.h>
  29#include <linux/if_ether.h>
  30
  31#include <net/sock.h>
  32#include <net/ip.h>
  33#include <net/icmp.h>
  34#include <net/protocol.h>
  35#include <net/ip_tunnels.h>
  36#include <net/arp.h>
  37#include <net/checksum.h>
  38#include <net/dsfield.h>
  39#include <net/inet_ecn.h>
  40#include <net/xfrm.h>
  41#include <net/net_namespace.h>
  42#include <net/netns/generic.h>
  43#include <net/rtnetlink.h>
  44#include <net/gre.h>
  45#include <net/dst_metadata.h>
  46#include <net/erspan.h>
 
 
 
 
  47
  48/*
  49   Problems & solutions
  50   --------------------
  51
  52   1. The most important issue is detecting local dead loops.
  53   They would cause complete host lockup in transmit, which
  54   would be "resolved" by stack overflow or, if queueing is enabled,
  55   with infinite looping in net_bh.
  56
  57   We cannot track such dead loops during route installation,
  58   it is infeasible task. The most general solutions would be
  59   to keep skb->encapsulation counter (sort of local ttl),
  60   and silently drop packet when it expires. It is a good
  61   solution, but it supposes maintaining new variable in ALL
  62   skb, even if no tunneling is used.
  63
  64   Current solution: xmit_recursion breaks dead loops. This is a percpu
  65   counter, since when we enter the first ndo_xmit(), cpu migration is
  66   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  67
  68   2. Networking dead loops would not kill routers, but would really
  69   kill network. IP hop limit plays role of "t->recursion" in this case,
  70   if we copy it from packet being encapsulated to upper header.
  71   It is very good solution, but it introduces two problems:
  72
  73   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  74     do not work over tunnels.
  75   - traceroute does not work. I planned to relay ICMP from tunnel,
  76     so that this problem would be solved and traceroute output
  77     would even more informative. This idea appeared to be wrong:
  78     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  79     true router now :-)), all routers (at least, in neighbourhood of mine)
  80     return only 8 bytes of payload. It is the end.
  81
  82   Hence, if we want that OSPF worked or traceroute said something reasonable,
  83   we should search for another solution.
  84
  85   One of them is to parse packet trying to detect inner encapsulation
  86   made by our node. It is difficult or even impossible, especially,
  87   taking into account fragmentation. TO be short, ttl is not solution at all.
  88
  89   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  90   We force DF flag on tunnels with preconfigured hop limit,
  91   that is ALL. :-) Well, it does not remove the problem completely,
  92   but exponential growth of network traffic is changed to linear
  93   (branches, that exceed pmtu are pruned) and tunnel mtu
  94   rapidly degrades to value <68, where looping stops.
  95   Yes, it is not good if there exists a router in the loop,
  96   which does not force DF, even when encapsulating packets have DF set.
  97   But it is not our problem! Nobody could accuse us, we made
  98   all that we could make. Even if it is your gated who injected
  99   fatal route to network, even if it were you who configured
 100   fatal static route: you are innocent. :-)
 101
 
 
 
 
 
 
 
 
 
 102   Alexey Kuznetsov.
 103 */
 104
 105static bool log_ecn_error = true;
 106module_param(log_ecn_error, bool, 0644);
 107MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 108
 109static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 110static const struct header_ops ipgre_header_ops;
 111
 112static int ipgre_tunnel_init(struct net_device *dev);
 113static void erspan_build_header(struct sk_buff *skb,
 114				u32 id, u32 index,
 115				bool truncate, bool is_ipv4);
 116
 117static unsigned int ipgre_net_id __read_mostly;
 118static unsigned int gre_tap_net_id __read_mostly;
 119static unsigned int erspan_net_id __read_mostly;
 120
 121static int ipgre_err(struct sk_buff *skb, u32 info,
 122		     const struct tnl_ptk_info *tpi)
 123{
 124
 125	/* All the routers (except for Linux) return only
 126	   8 bytes of packet payload. It means, that precise relaying of
 127	   ICMP in the real Internet is absolutely infeasible.
 128
 129	   Moreover, Cisco "wise men" put GRE key to the third word
 130	   in GRE header. It makes impossible maintaining even soft
 131	   state for keyed GRE tunnels with enabled checksum. Tell
 132	   them "thank you".
 133
 134	   Well, I wonder, rfc1812 was written by Cisco employee,
 135	   what the hell these idiots break standards established
 136	   by themselves???
 137	   */
 138	struct net *net = dev_net(skb->dev);
 139	struct ip_tunnel_net *itn;
 140	const struct iphdr *iph;
 141	const int type = icmp_hdr(skb)->type;
 142	const int code = icmp_hdr(skb)->code;
 143	unsigned int data_len = 0;
 144	struct ip_tunnel *t;
 145
 146	if (tpi->proto == htons(ETH_P_TEB))
 147		itn = net_generic(net, gre_tap_net_id);
 148	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
 149		 tpi->proto == htons(ETH_P_ERSPAN2))
 150		itn = net_generic(net, erspan_net_id);
 151	else
 152		itn = net_generic(net, ipgre_net_id);
 153
 154	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 155	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 156			     iph->daddr, iph->saddr, tpi->key);
 157
 158	if (!t)
 159		return -ENOENT;
 
 160
 161	switch (type) {
 162	default:
 163	case ICMP_PARAMETERPROB:
 164		return 0;
 165
 166	case ICMP_DEST_UNREACH:
 167		switch (code) {
 168		case ICMP_SR_FAILED:
 169		case ICMP_PORT_UNREACH:
 170			/* Impossible event. */
 171			return 0;
 172		default:
 173			/* All others are translated to HOST_UNREACH.
 174			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 175			   I believe they are just ether pollution. --ANK
 176			 */
 177			break;
 178		}
 179		break;
 180
 181	case ICMP_TIME_EXCEEDED:
 182		if (code != ICMP_EXC_TTL)
 183			return 0;
 184		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
 185		break;
 186
 187	case ICMP_REDIRECT:
 188		break;
 189	}
 
 
 
 
 
 190
 191#if IS_ENABLED(CONFIG_IPV6)
 192	if (tpi->proto == htons(ETH_P_IPV6) &&
 193	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 194					type, data_len))
 195		return 0;
 196#endif
 197
 198	if (t->parms.iph.daddr == 0 ||
 199	    ipv4_is_multicast(t->parms.iph.daddr))
 200		return 0;
 201
 202	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 203		return 0;
 
 
 
 
 
 204
 205	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 206		t->err_count++;
 207	else
 208		t->err_count = 1;
 209	t->err_time = jiffies;
 210
 211	return 0;
 212}
 
 
 
 
 
 
 213
 214static void gre_err(struct sk_buff *skb, u32 info)
 
 215{
 216	/* All the routers (except for Linux) return only
 217	 * 8 bytes of packet payload. It means, that precise relaying of
 218	 * ICMP in the real Internet is absolutely infeasible.
 219	 *
 220	 * Moreover, Cisco "wise men" put GRE key to the third word
 221	 * in GRE header. It makes impossible maintaining even soft
 222	 * state for keyed
 223	 * GRE tunnels with enabled checksum. Tell them "thank you".
 224	 *
 225	 * Well, I wonder, rfc1812 was written by Cisco employee,
 226	 * what the hell these idiots break standards established
 227	 * by themselves???
 228	 */
 229
 230	const struct iphdr *iph = (struct iphdr *)skb->data;
 231	const int type = icmp_hdr(skb)->type;
 232	const int code = icmp_hdr(skb)->code;
 233	struct tnl_ptk_info tpi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 234
 235	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
 236			     iph->ihl * 4) < 0)
 237		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238
 239	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 240		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 241				 skb->dev->ifindex, IPPROTO_GRE);
 242		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243	}
 244	if (type == ICMP_REDIRECT) {
 245		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
 246			      IPPROTO_GRE);
 247		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248	}
 249
 250	ipgre_err(skb, info, &tpi);
 251}
 252
 253static bool is_erspan_type1(int gre_hdr_len)
 254{
 255	/* Both ERSPAN type I (version 0) and type II (version 1) use
 256	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
 257	 * while type II has 8-byte.
 258	 */
 259	return gre_hdr_len == 4;
 260}
 261
 262static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 263		      int gre_hdr_len)
 264{
 265	struct net *net = dev_net(skb->dev);
 266	struct metadata_dst *tun_dst = NULL;
 267	struct erspan_base_hdr *ershdr;
 268	struct ip_tunnel_net *itn;
 269	struct ip_tunnel *tunnel;
 270	const struct iphdr *iph;
 271	struct erspan_md2 *md2;
 272	int ver;
 273	int len;
 274
 275	itn = net_generic(net, erspan_net_id);
 276	iph = ip_hdr(skb);
 277	if (is_erspan_type1(gre_hdr_len)) {
 278		ver = 0;
 279		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 280					  tpi->flags | TUNNEL_NO_KEY,
 281					  iph->saddr, iph->daddr, 0);
 282	} else {
 283		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 284		ver = ershdr->ver;
 285		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 286					  tpi->flags | TUNNEL_KEY,
 287					  iph->saddr, iph->daddr, tpi->key);
 288	}
 289
 290	if (tunnel) {
 291		if (is_erspan_type1(gre_hdr_len))
 292			len = gre_hdr_len;
 293		else
 294			len = gre_hdr_len + erspan_hdr_len(ver);
 295
 296		if (unlikely(!pskb_may_pull(skb, len)))
 297			return PACKET_REJECT;
 
 
 
 298
 299		if (__iptunnel_pull_header(skb,
 300					   len,
 301					   htons(ETH_P_TEB),
 302					   false, false) < 0)
 303			goto drop;
 304
 305		if (tunnel->collect_md) {
 306			struct erspan_metadata *pkt_md, *md;
 307			struct ip_tunnel_info *info;
 308			unsigned char *gh;
 309			__be64 tun_id;
 310			__be16 flags;
 311
 312			tpi->flags |= TUNNEL_KEY;
 313			flags = tpi->flags;
 314			tun_id = key32_to_tunnel_id(tpi->key);
 315
 316			tun_dst = ip_tun_rx_dst(skb, flags,
 317						tun_id, sizeof(*md));
 318			if (!tun_dst)
 319				return PACKET_REJECT;
 320
 321			/* skb can be uncloned in __iptunnel_pull_header, so
 322			 * old pkt_md is no longer valid and we need to reset
 323			 * it
 324			 */
 325			gh = skb_network_header(skb) +
 326			     skb_network_header_len(skb);
 327			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
 328							    sizeof(*ershdr));
 329			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 330			md->version = ver;
 331			md2 = &md->u.md2;
 332			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
 333						       ERSPAN_V2_MDSIZE);
 334
 335			info = &tun_dst->u.tun_info;
 336			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
 337			info->options_len = sizeof(*md);
 338		}
 339
 340		skb_reset_mac_header(skb);
 341		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 342		return PACKET_RCVD;
 343	}
 344	return PACKET_REJECT;
 345
 346drop:
 347	kfree_skb(skb);
 348	return PACKET_RCVD;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 349}
 350
 351static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 352		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 353{
 354	struct metadata_dst *tun_dst = NULL;
 355	const struct iphdr *iph;
 356	struct ip_tunnel *tunnel;
 
 357
 358	iph = ip_hdr(skb);
 359	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 360				  iph->saddr, iph->daddr, tpi->key);
 361
 362	if (tunnel) {
 363		const struct iphdr *tnl_params;
 
 
 364
 365		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
 366					   raw_proto, false) < 0)
 367			goto drop;
 368
 369		/* Special case for ipgre_header_parse(), which expects the
 370		 * mac_header to point to the outer IP header.
 371		 */
 372		if (tunnel->dev->header_ops == &ipgre_header_ops)
 373			skb_pop_mac_header(skb);
 374		else
 375			skb_reset_mac_header(skb);
 376
 377		tnl_params = &tunnel->parms.iph;
 378		if (tunnel->collect_md || tnl_params->daddr == 0) {
 379			__be16 flags;
 380			__be64 tun_id;
 381
 382			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
 383			tun_id = key32_to_tunnel_id(tpi->key);
 384			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 385			if (!tun_dst)
 386				return PACKET_REJECT;
 387		}
 388
 389		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 390		return PACKET_RCVD;
 391	}
 392	return PACKET_NEXT;
 393
 394drop:
 395	kfree_skb(skb);
 396	return PACKET_RCVD;
 
 
 
 
 
 
 
 
 397}
 398
 399static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 400		     int hdr_len)
 401{
 402	struct net *net = dev_net(skb->dev);
 403	struct ip_tunnel_net *itn;
 404	int res;
 405
 406	if (tpi->proto == htons(ETH_P_TEB))
 407		itn = net_generic(net, gre_tap_net_id);
 408	else
 409		itn = net_generic(net, ipgre_net_id);
 410
 411	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
 412	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
 413		/* ipgre tunnels in collect metadata mode should receive
 414		 * also ETH_P_TEB traffic.
 415		 */
 416		itn = net_generic(net, ipgre_net_id);
 417		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
 418	}
 419	return res;
 420}
 421
 422static int gre_rcv(struct sk_buff *skb)
 
 423{
 424	struct tnl_ptk_info tpi;
 425	bool csum_err = false;
 426	int hdr_len;
 427
 428#ifdef CONFIG_NET_IPGRE_BROADCAST
 429	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 430		/* Looped back packet, drop it! */
 431		if (rt_is_output_route(skb_rtable(skb)))
 432			goto drop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433	}
 434#endif
 435
 436	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 437	if (hdr_len < 0)
 438		goto drop;
 439
 440	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
 441		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 442		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 443			return 0;
 444		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445	}
 446
 447	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 448		return 0;
 
 
 
 
 
 
 449
 
 
 
 
 
 
 
 
 450out:
 451	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 452drop:
 453	kfree_skb(skb);
 454	return 0;
 455}
 456
 457static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 458		       const struct iphdr *tnl_params,
 459		       __be16 proto)
 460{
 461	struct ip_tunnel *tunnel = netdev_priv(dev);
 462	__be16 flags = tunnel->parms.o_flags;
 463
 464	/* Push GRE header. */
 465	gre_build_header(skb, tunnel->tun_hlen,
 466			 flags, proto, tunnel->parms.o_key,
 467			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 468
 469	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 470}
 471
 472static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 
 473{
 474	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 
 
 
 
 
 475}
 476
 477static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 478			__be16 proto)
 479{
 480	struct ip_tunnel *tunnel = netdev_priv(dev);
 481	struct ip_tunnel_info *tun_info;
 482	const struct ip_tunnel_key *key;
 483	int tunnel_hlen;
 484	__be16 flags;
 
 
 
 
 485
 486	tun_info = skb_tunnel_info(skb);
 487	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 488		     ip_tunnel_info_af(tun_info) != AF_INET))
 489		goto err_free_skb;
 490
 491	key = &tun_info->key;
 492	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 
 493
 494	if (skb_cow_head(skb, dev->needed_headroom))
 495		goto err_free_skb;
 
 
 
 
 496
 497	/* Push Tunnel header. */
 498	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
 499		goto err_free_skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500
 501	flags = tun_info->key.tun_flags &
 502		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
 503	gre_build_header(skb, tunnel_hlen, flags, proto,
 504			 tunnel_id_to_key32(tun_info->key.tun_id),
 505			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 506
 507	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508
 509	return;
 
 
 
 
 
 
 
 
 
 
 
 
 510
 511err_free_skb:
 512	kfree_skb(skb);
 513	DEV_STATS_INC(dev, tx_dropped);
 514}
 
 
 
 
 
 
 
 
 
 
 
 515
 516static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 517{
 518	struct ip_tunnel *tunnel = netdev_priv(dev);
 519	struct ip_tunnel_info *tun_info;
 520	const struct ip_tunnel_key *key;
 521	struct erspan_metadata *md;
 522	bool truncate = false;
 523	__be16 proto;
 524	int tunnel_hlen;
 525	int version;
 526	int nhoff;
 527
 528	tun_info = skb_tunnel_info(skb);
 529	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 530		     ip_tunnel_info_af(tun_info) != AF_INET))
 531		goto err_free_skb;
 532
 533	key = &tun_info->key;
 534	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
 535		goto err_free_skb;
 536	if (tun_info->options_len < sizeof(*md))
 537		goto err_free_skb;
 538	md = ip_tunnel_info_opts(tun_info);
 539
 540	/* ERSPAN has fixed 8 byte GRE header */
 541	version = md->version;
 542	tunnel_hlen = 8 + erspan_hdr_len(version);
 543
 544	if (skb_cow_head(skb, dev->needed_headroom))
 545		goto err_free_skb;
 546
 547	if (gre_handle_offloads(skb, false))
 548		goto err_free_skb;
 549
 550	if (skb->len > dev->mtu + dev->hard_header_len) {
 551		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 552			goto err_free_skb;
 553		truncate = true;
 554	}
 555
 556	nhoff = skb_network_offset(skb);
 557	if (skb->protocol == htons(ETH_P_IP) &&
 558	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 559		truncate = true;
 560
 561	if (skb->protocol == htons(ETH_P_IPV6)) {
 562		int thoff;
 
 
 
 563
 564		if (skb_transport_header_was_set(skb))
 565			thoff = skb_transport_offset(skb);
 566		else
 567			thoff = nhoff + sizeof(struct ipv6hdr);
 568		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
 569			truncate = true;
 570	}
 571
 572	if (version == 1) {
 573		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 574				    ntohl(md->u.index), truncate, true);
 575		proto = htons(ETH_P_ERSPAN);
 576	} else if (version == 2) {
 577		erspan_build_header_v2(skb,
 578				       ntohl(tunnel_id_to_key32(key->tun_id)),
 579				       md->u.md2.dir,
 580				       get_hwid(&md->u.md2),
 581				       truncate, true);
 582		proto = htons(ETH_P_ERSPAN2);
 583	} else {
 584		goto err_free_skb;
 585	}
 586
 587	gre_build_header(skb, 8, TUNNEL_SEQ,
 588			 proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
 589
 590	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 591
 592	return;
 
 
 
 593
 594err_free_skb:
 
 
 595	kfree_skb(skb);
 596	DEV_STATS_INC(dev, tx_dropped);
 597}
 598
 599static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 600{
 601	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 602	const struct ip_tunnel_key *key;
 603	struct rtable *rt;
 
 604	struct flowi4 fl4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 605
 606	if (ip_tunnel_info_af(info) != AF_INET)
 607		return -EINVAL;
 608
 609	key = &info->key;
 610	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
 611			    tunnel_id_to_key32(key->tun_id),
 612			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
 613			    skb->mark, skb_get_hash(skb), key->flow_flags);
 614	rt = ip_route_output_key(dev_net(dev), &fl4);
 615	if (IS_ERR(rt))
 616		return PTR_ERR(rt);
 617
 618	ip_rt_put(rt);
 619	info->key.u.ipv4.src = fl4.saddr;
 620	return 0;
 621}
 622
 623static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 624			      struct net_device *dev)
 625{
 626	struct ip_tunnel *tunnel = netdev_priv(dev);
 627	const struct iphdr *tnl_params;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 628
 629	if (!pskb_inet_may_pull(skb))
 630		goto free_skb;
 
 
 
 
 
 
 631
 632	if (tunnel->collect_md) {
 633		gre_fb_xmit(skb, dev, skb->protocol);
 634		return NETDEV_TX_OK;
 
 
 
 635	}
 
 636
 637	if (dev->header_ops) {
 638		int pull_len = tunnel->hlen + sizeof(struct iphdr);
 
 
 
 639
 640		if (skb_cow_head(skb, 0))
 641			goto free_skb;
 
 
 
 642
 643		tnl_params = (const struct iphdr *)skb->data;
 
 644
 645		if (!pskb_network_may_pull(skb, pull_len))
 646			goto free_skb;
 647
 648		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
 649		skb_pull(skb, pull_len);
 650		skb_reset_mac_header(skb);
 
 
 
 
 
 
 
 651
 652		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 653		    skb_checksum_start(skb) < skb->data)
 654			goto free_skb;
 655	} else {
 656		if (skb_cow_head(skb, dev->needed_headroom))
 657			goto free_skb;
 
 
 658
 659		tnl_params = &tunnel->parms.iph;
 
 
 
 
 660	}
 
 661
 662	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 663		goto free_skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664
 665	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 666	return NETDEV_TX_OK;
 
 667
 668free_skb:
 669	kfree_skb(skb);
 670	DEV_STATS_INC(dev, tx_dropped);
 671	return NETDEV_TX_OK;
 672}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673
 674static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 675			       struct net_device *dev)
 676{
 677	struct ip_tunnel *tunnel = netdev_priv(dev);
 678	bool truncate = false;
 679	__be16 proto;
 680
 681	if (!pskb_inet_may_pull(skb))
 682		goto free_skb;
 683
 684	if (tunnel->collect_md) {
 685		erspan_fb_xmit(skb, dev);
 686		return NETDEV_TX_OK;
 687	}
 688
 689	if (gre_handle_offloads(skb, false))
 690		goto free_skb;
 691
 692	if (skb_cow_head(skb, dev->needed_headroom))
 693		goto free_skb;
 694
 695	if (skb->len > dev->mtu + dev->hard_header_len) {
 696		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 697			goto free_skb;
 698		truncate = true;
 699	}
 700
 701	/* Push ERSPAN header */
 702	if (tunnel->erspan_ver == 0) {
 703		proto = htons(ETH_P_ERSPAN);
 704		tunnel->parms.o_flags &= ~TUNNEL_SEQ;
 705	} else if (tunnel->erspan_ver == 1) {
 706		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 707				    tunnel->index,
 708				    truncate, true);
 709		proto = htons(ETH_P_ERSPAN);
 710	} else if (tunnel->erspan_ver == 2) {
 711		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 712				       tunnel->dir, tunnel->hwid,
 713				       truncate, true);
 714		proto = htons(ETH_P_ERSPAN2);
 715	} else {
 716		goto free_skb;
 717	}
 718
 719	tunnel->parms.o_flags &= ~TUNNEL_KEY;
 720	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 
 721	return NETDEV_TX_OK;
 722
 723free_skb:
 724	kfree_skb(skb);
 725	DEV_STATS_INC(dev, tx_dropped);
 
 
 
 
 726	return NETDEV_TX_OK;
 727}
 728
 729static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 730				struct net_device *dev)
 731{
 732	struct ip_tunnel *tunnel = netdev_priv(dev);
 733
 734	if (!pskb_inet_may_pull(skb))
 735		goto free_skb;
 
 
 736
 737	if (tunnel->collect_md) {
 738		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 739		return NETDEV_TX_OK;
 740	}
 741
 742	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 743		goto free_skb;
 744
 745	if (skb_cow_head(skb, dev->needed_headroom))
 746		goto free_skb;
 
 747
 748	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 749	return NETDEV_TX_OK;
 
 
 
 
 
 
 
 750
 751free_skb:
 752	kfree_skb(skb);
 753	DEV_STATS_INC(dev, tx_dropped);
 754	return NETDEV_TX_OK;
 755}
 756
 757static void ipgre_link_update(struct net_device *dev, bool set_mtu)
 758{
 759	struct ip_tunnel *tunnel = netdev_priv(dev);
 760	__be16 flags;
 761	int len;
 762
 763	len = tunnel->tun_hlen;
 764	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 765	len = tunnel->tun_hlen - len;
 766	tunnel->hlen = tunnel->hlen + len;
 
 767
 768	if (dev->header_ops)
 769		dev->hard_header_len += len;
 770	else
 771		dev->needed_headroom += len;
 
 
 
 
 
 
 
 772
 773	if (set_mtu)
 774		dev->mtu = max_t(int, dev->mtu - len, 68);
 775
 776	flags = tunnel->parms.o_flags;
 777
 778	if (flags & TUNNEL_SEQ ||
 779	    (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
 780		dev->features &= ~NETIF_F_GSO_SOFTWARE;
 781		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 782	} else {
 783		dev->features |= NETIF_F_GSO_SOFTWARE;
 784		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 785	}
 786}
 787
 788static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
 789			    int cmd)
 790{
 791	int err;
 792
 793	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 794		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
 795		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
 796		    ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
 797			return -EINVAL;
 798	}
 799
 800	p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
 801	p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802
 803	err = ip_tunnel_ctl(dev, p, cmd);
 804	if (err)
 805		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 806
 807	if (cmd == SIOCCHGTUNNEL) {
 808		struct ip_tunnel *t = netdev_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 809
 810		t->parms.i_flags = p->i_flags;
 811		t->parms.o_flags = p->o_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 812
 813		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
 814			ipgre_link_update(dev, true);
 815	}
 816
 817	p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
 818	p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
 
 
 
 
 
 
 
 
 
 819	return 0;
 820}
 821
 822/* Nice toy. Unfortunately, useless in real life :-)
 823   It allows to construct virtual multiprotocol broadcast "LAN"
 824   over the Internet, provided multicast routing is tuned.
 825
 826
 827   I have no idea was this bicycle invented before me,
 828   so that I had to set ARPHRD_IPGRE to a random value.
 829   I have an impression, that Cisco could make something similar,
 830   but this feature is apparently missing in IOS<=11.2(8).
 831
 832   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 833   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 834
 835   ping -t 255 224.66.66.66
 836
 837   If nobody answers, mbone does not work.
 838
 839   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 840   ip addr add 10.66.66.<somewhat>/24 dev Universe
 841   ifconfig Universe up
 842   ifconfig Universe add fe80::<Your_real_addr>/10
 843   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 844   ftp 10.66.66.66
 845   ...
 846   ftp fec0:6666:6666::193.233.7.65
 847   ...
 
 848 */
 
 849static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 850			unsigned short type,
 851			const void *daddr, const void *saddr, unsigned int len)
 852{
 853	struct ip_tunnel *t = netdev_priv(dev);
 854	struct iphdr *iph;
 855	struct gre_base_hdr *greh;
 856
 857	iph = skb_push(skb, t->hlen + sizeof(*iph));
 858	greh = (struct gre_base_hdr *)(iph+1);
 859	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 860	greh->protocol = htons(type);
 861
 862	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 
 
 
 
 
 
 863
 864	/* Set the source hardware address. */
 865	if (saddr)
 866		memcpy(&iph->saddr, saddr, 4);
 867	if (daddr)
 868		memcpy(&iph->daddr, daddr, 4);
 869	if (iph->daddr)
 870		return t->hlen + sizeof(*iph);
 871
 872	return -(t->hlen + sizeof(*iph));
 873}
 874
 875static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 876{
 877	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 878	memcpy(haddr, &iph->saddr, 4);
 879	return 4;
 880}
 881
 882static const struct header_ops ipgre_header_ops = {
 883	.create	= ipgre_header,
 884	.parse	= ipgre_header_parse,
 885};
 886
 887#ifdef CONFIG_NET_IPGRE_BROADCAST
 888static int ipgre_open(struct net_device *dev)
 889{
 890	struct ip_tunnel *t = netdev_priv(dev);
 891
 892	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 893		struct flowi4 fl4;
 894		struct rtable *rt;
 895
 896		rt = ip_route_output_gre(t->net, &fl4,
 897					 t->parms.iph.daddr,
 898					 t->parms.iph.saddr,
 899					 t->parms.o_key,
 900					 RT_TOS(t->parms.iph.tos),
 901					 t->parms.link);
 902		if (IS_ERR(rt))
 903			return -EADDRNOTAVAIL;
 904		dev = rt->dst.dev;
 905		ip_rt_put(rt);
 906		if (!__in_dev_get_rtnl(dev))
 907			return -EADDRNOTAVAIL;
 908		t->mlink = dev->ifindex;
 909		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 910	}
 911	return 0;
 912}
 913
 914static int ipgre_close(struct net_device *dev)
 915{
 916	struct ip_tunnel *t = netdev_priv(dev);
 917
 918	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 919		struct in_device *in_dev;
 920		in_dev = inetdev_by_index(t->net, t->mlink);
 921		if (in_dev)
 922			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 923	}
 924	return 0;
 925}
 
 926#endif
 927
 928static const struct net_device_ops ipgre_netdev_ops = {
 929	.ndo_init		= ipgre_tunnel_init,
 930	.ndo_uninit		= ip_tunnel_uninit,
 931#ifdef CONFIG_NET_IPGRE_BROADCAST
 932	.ndo_open		= ipgre_open,
 933	.ndo_stop		= ipgre_close,
 934#endif
 935	.ndo_start_xmit		= ipgre_xmit,
 936	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
 937	.ndo_change_mtu		= ip_tunnel_change_mtu,
 938	.ndo_get_stats64	= dev_get_tstats64,
 939	.ndo_get_iflink		= ip_tunnel_get_iflink,
 940	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
 941};
 942
 943#define GRE_FEATURES (NETIF_F_SG |		\
 944		      NETIF_F_FRAGLIST |	\
 945		      NETIF_F_HIGHDMA |		\
 946		      NETIF_F_HW_CSUM)
 
 947
 948static void ipgre_tunnel_setup(struct net_device *dev)
 949{
 950	dev->netdev_ops		= &ipgre_netdev_ops;
 
 
 951	dev->type		= ARPHRD_IPGRE;
 952	ip_tunnel_setup(dev, ipgre_net_id);
 
 
 
 
 
 
 953}
 954
 955static void __gre_tunnel_init(struct net_device *dev)
 956{
 957	struct ip_tunnel *tunnel;
 958	__be16 flags;
 959
 960	tunnel = netdev_priv(dev);
 961	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 962	tunnel->parms.iph.protocol = IPPROTO_GRE;
 963
 964	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 965	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
 966
 967	dev->features		|= GRE_FEATURES | NETIF_F_LLTX;
 968	dev->hw_features	|= GRE_FEATURES;
 969
 970	flags = tunnel->parms.o_flags;
 971
 972	/* TCP offload with GRE SEQ is not supported, nor can we support 2
 973	 * levels of outer headers requiring an update.
 974	 */
 975	if (flags & TUNNEL_SEQ)
 976		return;
 977	if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
 978		return;
 979
 980	dev->features |= NETIF_F_GSO_SOFTWARE;
 981	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 982}
 983
 984static int ipgre_tunnel_init(struct net_device *dev)
 985{
 986	struct ip_tunnel *tunnel = netdev_priv(dev);
 987	struct iphdr *iph = &tunnel->parms.iph;
 988
 989	__gre_tunnel_init(dev);
 990
 991	__dev_addr_set(dev, &iph->saddr, 4);
 992	memcpy(dev->broadcast, &iph->daddr, 4);
 993
 994	dev->flags		= IFF_NOARP;
 995	netif_keep_dst(dev);
 996	dev->addr_len		= 4;
 997
 998	if (iph->daddr && !tunnel->collect_md) {
 999#ifdef CONFIG_NET_IPGRE_BROADCAST
1000		if (ipv4_is_multicast(iph->daddr)) {
1001			if (!iph->saddr)
1002				return -EINVAL;
1003			dev->flags = IFF_BROADCAST;
1004			dev->header_ops = &ipgre_header_ops;
1005			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1006			dev->needed_headroom = 0;
1007		}
1008#endif
1009	} else if (!tunnel->collect_md) {
1010		dev->header_ops = &ipgre_header_ops;
1011		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1012		dev->needed_headroom = 0;
1013	}
1014
1015	return ip_tunnel_init(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016}
1017
 
1018static const struct gre_protocol ipgre_protocol = {
1019	.handler     = gre_rcv,
1020	.err_handler = gre_err,
1021};
1022
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023static int __net_init ipgre_init_net(struct net *net)
1024{
1025	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026}
1027
1028static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1029{
1030	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
 
 
 
 
 
 
 
1031}
1032
1033static struct pernet_operations ipgre_net_ops = {
1034	.init = ipgre_init_net,
1035	.exit_batch = ipgre_exit_batch_net,
1036	.id   = &ipgre_net_id,
1037	.size = sizeof(struct ip_tunnel_net),
1038};
1039
1040static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1041				 struct netlink_ext_ack *extack)
1042{
1043	__be16 flags;
1044
1045	if (!data)
1046		return 0;
1047
1048	flags = 0;
1049	if (data[IFLA_GRE_IFLAGS])
1050		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1051	if (data[IFLA_GRE_OFLAGS])
1052		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1053	if (flags & (GRE_VERSION|GRE_ROUTING))
1054		return -EINVAL;
1055
1056	if (data[IFLA_GRE_COLLECT_METADATA] &&
1057	    data[IFLA_GRE_ENCAP_TYPE] &&
1058	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1059		return -EINVAL;
1060
1061	return 0;
1062}
1063
1064static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1065			      struct netlink_ext_ack *extack)
1066{
1067	__be32 daddr;
1068
1069	if (tb[IFLA_ADDRESS]) {
1070		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1071			return -EINVAL;
1072		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1073			return -EADDRNOTAVAIL;
1074	}
1075
1076	if (!data)
1077		goto out;
1078
1079	if (data[IFLA_GRE_REMOTE]) {
1080		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1081		if (!daddr)
1082			return -EINVAL;
1083	}
1084
1085out:
1086	return ipgre_tunnel_validate(tb, data, extack);
1087}
1088
1089static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1090			   struct netlink_ext_ack *extack)
1091{
1092	__be16 flags = 0;
1093	int ret;
1094
1095	if (!data)
1096		return 0;
1097
1098	ret = ipgre_tap_validate(tb, data, extack);
1099	if (ret)
1100		return ret;
1101
1102	if (data[IFLA_GRE_ERSPAN_VER] &&
1103	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1104		return 0;
1105
1106	/* ERSPAN type II/III should only have GRE sequence and key flag */
1107	if (data[IFLA_GRE_OFLAGS])
1108		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1109	if (data[IFLA_GRE_IFLAGS])
1110		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1111	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1112	    flags != (GRE_SEQ | GRE_KEY))
1113		return -EINVAL;
1114
1115	/* ERSPAN Session ID only has 10-bit. Since we reuse
1116	 * 32-bit key field as ID, check it's range.
1117	 */
1118	if (data[IFLA_GRE_IKEY] &&
1119	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1120		return -EINVAL;
1121
1122	if (data[IFLA_GRE_OKEY] &&
1123	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1124		return -EINVAL;
1125
1126	return 0;
1127}
1128
1129static int ipgre_netlink_parms(struct net_device *dev,
1130				struct nlattr *data[],
1131				struct nlattr *tb[],
1132				struct ip_tunnel_parm *parms,
1133				__u32 *fwmark)
1134{
1135	struct ip_tunnel *t = netdev_priv(dev);
1136
1137	memset(parms, 0, sizeof(*parms));
1138
1139	parms->iph.protocol = IPPROTO_GRE;
1140
1141	if (!data)
1142		return 0;
1143
1144	if (data[IFLA_GRE_LINK])
1145		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1146
1147	if (data[IFLA_GRE_IFLAGS])
1148		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1149
1150	if (data[IFLA_GRE_OFLAGS])
1151		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1152
1153	if (data[IFLA_GRE_IKEY])
1154		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1155
1156	if (data[IFLA_GRE_OKEY])
1157		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1158
1159	if (data[IFLA_GRE_LOCAL])
1160		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1161
1162	if (data[IFLA_GRE_REMOTE])
1163		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1164
1165	if (data[IFLA_GRE_TTL])
1166		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1167
1168	if (data[IFLA_GRE_TOS])
1169		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1170
1171	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1172		if (t->ignore_df)
1173			return -EINVAL;
1174		parms->iph.frag_off = htons(IP_DF);
1175	}
1176
1177	if (data[IFLA_GRE_COLLECT_METADATA]) {
1178		t->collect_md = true;
1179		if (dev->type == ARPHRD_IPGRE)
1180			dev->type = ARPHRD_NONE;
1181	}
1182
1183	if (data[IFLA_GRE_IGNORE_DF]) {
1184		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1185		  && (parms->iph.frag_off & htons(IP_DF)))
1186			return -EINVAL;
1187		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1188	}
1189
1190	if (data[IFLA_GRE_FWMARK])
1191		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1192
1193	return 0;
1194}
1195
1196static int erspan_netlink_parms(struct net_device *dev,
1197				struct nlattr *data[],
1198				struct nlattr *tb[],
1199				struct ip_tunnel_parm *parms,
1200				__u32 *fwmark)
1201{
1202	struct ip_tunnel *t = netdev_priv(dev);
1203	int err;
1204
1205	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1206	if (err)
1207		return err;
1208	if (!data)
1209		return 0;
1210
1211	if (data[IFLA_GRE_ERSPAN_VER]) {
1212		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1213
1214		if (t->erspan_ver > 2)
1215			return -EINVAL;
1216	}
1217
1218	if (t->erspan_ver == 1) {
1219		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1220			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1221			if (t->index & ~INDEX_MASK)
1222				return -EINVAL;
1223		}
1224	} else if (t->erspan_ver == 2) {
1225		if (data[IFLA_GRE_ERSPAN_DIR]) {
1226			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1227			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1228				return -EINVAL;
1229		}
1230		if (data[IFLA_GRE_ERSPAN_HWID]) {
1231			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1232			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1233				return -EINVAL;
1234		}
1235	}
1236
1237	return 0;
1238}
1239
1240/* This function returns true when ENCAP attributes are present in the nl msg */
1241static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1242				      struct ip_tunnel_encap *ipencap)
1243{
1244	bool ret = false;
1245
1246	memset(ipencap, 0, sizeof(*ipencap));
1247
1248	if (!data)
1249		return ret;
1250
1251	if (data[IFLA_GRE_ENCAP_TYPE]) {
1252		ret = true;
1253		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1254	}
1255
1256	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1257		ret = true;
1258		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1259	}
1260
1261	if (data[IFLA_GRE_ENCAP_SPORT]) {
1262		ret = true;
1263		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1264	}
1265
1266	if (data[IFLA_GRE_ENCAP_DPORT]) {
1267		ret = true;
1268		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1269	}
1270
1271	return ret;
1272}
1273
1274static int gre_tap_init(struct net_device *dev)
1275{
1276	__gre_tunnel_init(dev);
1277	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1278	netif_keep_dst(dev);
1279
1280	return ip_tunnel_init(dev);
1281}
1282
1283static const struct net_device_ops gre_tap_netdev_ops = {
1284	.ndo_init		= gre_tap_init,
1285	.ndo_uninit		= ip_tunnel_uninit,
1286	.ndo_start_xmit		= gre_tap_xmit,
1287	.ndo_set_mac_address 	= eth_mac_addr,
1288	.ndo_validate_addr	= eth_validate_addr,
1289	.ndo_change_mtu		= ip_tunnel_change_mtu,
1290	.ndo_get_stats64	= dev_get_tstats64,
1291	.ndo_get_iflink		= ip_tunnel_get_iflink,
1292	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1293};
1294
1295static int erspan_tunnel_init(struct net_device *dev)
1296{
1297	struct ip_tunnel *tunnel = netdev_priv(dev);
1298
1299	if (tunnel->erspan_ver == 0)
1300		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1301	else
1302		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1303
1304	tunnel->parms.iph.protocol = IPPROTO_GRE;
1305	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1306		       erspan_hdr_len(tunnel->erspan_ver);
1307
1308	dev->features		|= GRE_FEATURES;
1309	dev->hw_features	|= GRE_FEATURES;
1310	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1311	netif_keep_dst(dev);
1312
1313	return ip_tunnel_init(dev);
 
1314}
1315
1316static const struct net_device_ops erspan_netdev_ops = {
1317	.ndo_init		= erspan_tunnel_init,
1318	.ndo_uninit		= ip_tunnel_uninit,
1319	.ndo_start_xmit		= erspan_xmit,
1320	.ndo_set_mac_address	= eth_mac_addr,
1321	.ndo_validate_addr	= eth_validate_addr,
1322	.ndo_change_mtu		= ip_tunnel_change_mtu,
1323	.ndo_get_stats64	= dev_get_tstats64,
1324	.ndo_get_iflink		= ip_tunnel_get_iflink,
1325	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1326};
1327
1328static void ipgre_tap_setup(struct net_device *dev)
1329{
1330	ether_setup(dev);
1331	dev->max_mtu = 0;
1332	dev->netdev_ops	= &gre_tap_netdev_ops;
1333	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1334	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1335	ip_tunnel_setup(dev, gre_tap_net_id);
1336}
1337
1338static int
1339ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1340{
1341	struct ip_tunnel_encap ipencap;
1342
1343	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1344		struct ip_tunnel *t = netdev_priv(dev);
1345		int err = ip_tunnel_encap_setup(t, &ipencap);
1346
1347		if (err < 0)
1348			return err;
1349	}
1350
1351	return 0;
1352}
 
1353
1354static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1355			 struct nlattr *tb[], struct nlattr *data[],
1356			 struct netlink_ext_ack *extack)
1357{
1358	struct ip_tunnel_parm p;
1359	__u32 fwmark = 0;
1360	int err;
1361
1362	err = ipgre_newlink_encap_setup(dev, data);
1363	if (err)
1364		return err;
1365
1366	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1367	if (err < 0)
1368		return err;
1369	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1370}
1371
1372static int erspan_newlink(struct net *src_net, struct net_device *dev,
1373			  struct nlattr *tb[], struct nlattr *data[],
1374			  struct netlink_ext_ack *extack)
1375{
1376	struct ip_tunnel_parm p;
1377	__u32 fwmark = 0;
1378	int err;
1379
1380	err = ipgre_newlink_encap_setup(dev, data);
1381	if (err)
1382		return err;
1383
1384	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1385	if (err)
1386		return err;
1387	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1388}
1389
1390static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1391			    struct nlattr *data[],
1392			    struct netlink_ext_ack *extack)
1393{
1394	struct ip_tunnel *t = netdev_priv(dev);
1395	__u32 fwmark = t->fwmark;
 
1396	struct ip_tunnel_parm p;
1397	int err;
1398
1399	err = ipgre_newlink_encap_setup(dev, data);
1400	if (err)
1401		return err;
1402
1403	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1404	if (err < 0)
1405		return err;
1406
1407	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1408	if (err < 0)
1409		return err;
1410
1411	t->parms.i_flags = p.i_flags;
1412	t->parms.o_flags = p.o_flags;
1413
1414	ipgre_link_update(dev, !tb[IFLA_MTU]);
1415
1416	return 0;
1417}
 
 
 
1418
1419static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1420			     struct nlattr *data[],
1421			     struct netlink_ext_ack *extack)
1422{
1423	struct ip_tunnel *t = netdev_priv(dev);
1424	__u32 fwmark = t->fwmark;
1425	struct ip_tunnel_parm p;
1426	int err;
1427
1428	err = ipgre_newlink_encap_setup(dev, data);
1429	if (err)
1430		return err;
 
1431
1432	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1433	if (err < 0)
1434		return err;
 
1435
1436	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1437	if (err < 0)
1438		return err;
 
 
 
 
 
 
 
 
1439
1440	t->parms.i_flags = p.i_flags;
1441	t->parms.o_flags = p.o_flags;
 
 
 
 
 
 
 
 
 
 
1442
1443	return 0;
1444}
1445
1446static size_t ipgre_get_size(const struct net_device *dev)
1447{
1448	return
1449		/* IFLA_GRE_LINK */
1450		nla_total_size(4) +
1451		/* IFLA_GRE_IFLAGS */
1452		nla_total_size(2) +
1453		/* IFLA_GRE_OFLAGS */
1454		nla_total_size(2) +
1455		/* IFLA_GRE_IKEY */
1456		nla_total_size(4) +
1457		/* IFLA_GRE_OKEY */
1458		nla_total_size(4) +
1459		/* IFLA_GRE_LOCAL */
1460		nla_total_size(4) +
1461		/* IFLA_GRE_REMOTE */
1462		nla_total_size(4) +
1463		/* IFLA_GRE_TTL */
1464		nla_total_size(1) +
1465		/* IFLA_GRE_TOS */
1466		nla_total_size(1) +
1467		/* IFLA_GRE_PMTUDISC */
1468		nla_total_size(1) +
1469		/* IFLA_GRE_ENCAP_TYPE */
1470		nla_total_size(2) +
1471		/* IFLA_GRE_ENCAP_FLAGS */
1472		nla_total_size(2) +
1473		/* IFLA_GRE_ENCAP_SPORT */
1474		nla_total_size(2) +
1475		/* IFLA_GRE_ENCAP_DPORT */
1476		nla_total_size(2) +
1477		/* IFLA_GRE_COLLECT_METADATA */
1478		nla_total_size(0) +
1479		/* IFLA_GRE_IGNORE_DF */
1480		nla_total_size(1) +
1481		/* IFLA_GRE_FWMARK */
1482		nla_total_size(4) +
1483		/* IFLA_GRE_ERSPAN_INDEX */
1484		nla_total_size(4) +
1485		/* IFLA_GRE_ERSPAN_VER */
1486		nla_total_size(1) +
1487		/* IFLA_GRE_ERSPAN_DIR */
1488		nla_total_size(1) +
1489		/* IFLA_GRE_ERSPAN_HWID */
1490		nla_total_size(2) +
1491		0;
1492}
1493
1494static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1495{
1496	struct ip_tunnel *t = netdev_priv(dev);
1497	struct ip_tunnel_parm *p = &t->parms;
1498	__be16 o_flags = p->o_flags;
1499
1500	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1501	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1502			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1503	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1504			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1505	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1506	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1507	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1508	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1509	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1510	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1511	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1512		       !!(p->iph.frag_off & htons(IP_DF))) ||
1513	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1514		goto nla_put_failure;
1515
1516	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1517			t->encap.type) ||
1518	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1519			 t->encap.sport) ||
1520	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1521			 t->encap.dport) ||
1522	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1523			t->encap.flags))
1524		goto nla_put_failure;
1525
1526	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1527		goto nla_put_failure;
1528
1529	if (t->collect_md) {
1530		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1531			goto nla_put_failure;
1532	}
1533
1534	return 0;
1535
1536nla_put_failure:
1537	return -EMSGSIZE;
1538}
1539
1540static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1541{
1542	struct ip_tunnel *t = netdev_priv(dev);
1543
1544	if (t->erspan_ver <= 2) {
1545		if (t->erspan_ver != 0 && !t->collect_md)
1546			t->parms.o_flags |= TUNNEL_KEY;
1547
1548		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1549			goto nla_put_failure;
1550
1551		if (t->erspan_ver == 1) {
1552			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1553				goto nla_put_failure;
1554		} else if (t->erspan_ver == 2) {
1555			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1556				goto nla_put_failure;
1557			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1558				goto nla_put_failure;
1559		}
1560	}
1561
1562	return ipgre_fill_info(skb, dev);
1563
1564nla_put_failure:
1565	return -EMSGSIZE;
1566}
1567
1568static void erspan_setup(struct net_device *dev)
1569{
1570	struct ip_tunnel *t = netdev_priv(dev);
1571
1572	ether_setup(dev);
1573	dev->max_mtu = 0;
1574	dev->netdev_ops = &erspan_netdev_ops;
1575	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1576	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1577	ip_tunnel_setup(dev, erspan_net_id);
1578	t->erspan_ver = 1;
1579}
1580
1581static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1582	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1583	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1584	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1585	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1586	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1587	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1588	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1589	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1590	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1591	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1592	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1593	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1594	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1595	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1596	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1597	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1598	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1599	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1600	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1601	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1602	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1603};
1604
1605static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1606	.kind		= "gre",
1607	.maxtype	= IFLA_GRE_MAX,
1608	.policy		= ipgre_policy,
1609	.priv_size	= sizeof(struct ip_tunnel),
1610	.setup		= ipgre_tunnel_setup,
1611	.validate	= ipgre_tunnel_validate,
1612	.newlink	= ipgre_newlink,
1613	.changelink	= ipgre_changelink,
1614	.dellink	= ip_tunnel_dellink,
1615	.get_size	= ipgre_get_size,
1616	.fill_info	= ipgre_fill_info,
1617	.get_link_net	= ip_tunnel_get_link_net,
1618};
1619
1620static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1621	.kind		= "gretap",
1622	.maxtype	= IFLA_GRE_MAX,
1623	.policy		= ipgre_policy,
1624	.priv_size	= sizeof(struct ip_tunnel),
1625	.setup		= ipgre_tap_setup,
1626	.validate	= ipgre_tap_validate,
1627	.newlink	= ipgre_newlink,
1628	.changelink	= ipgre_changelink,
1629	.dellink	= ip_tunnel_dellink,
1630	.get_size	= ipgre_get_size,
1631	.fill_info	= ipgre_fill_info,
1632	.get_link_net	= ip_tunnel_get_link_net,
1633};
1634
1635static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1636	.kind		= "erspan",
1637	.maxtype	= IFLA_GRE_MAX,
1638	.policy		= ipgre_policy,
1639	.priv_size	= sizeof(struct ip_tunnel),
1640	.setup		= erspan_setup,
1641	.validate	= erspan_validate,
1642	.newlink	= erspan_newlink,
1643	.changelink	= erspan_changelink,
1644	.dellink	= ip_tunnel_dellink,
1645	.get_size	= ipgre_get_size,
1646	.fill_info	= erspan_fill_info,
1647	.get_link_net	= ip_tunnel_get_link_net,
1648};
1649
1650struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1651					u8 name_assign_type)
1652{
1653	struct nlattr *tb[IFLA_MAX + 1];
1654	struct net_device *dev;
1655	LIST_HEAD(list_kill);
1656	struct ip_tunnel *t;
1657	int err;
1658
1659	memset(&tb, 0, sizeof(tb));
1660
1661	dev = rtnl_create_link(net, name, name_assign_type,
1662			       &ipgre_tap_ops, tb, NULL);
1663	if (IS_ERR(dev))
1664		return dev;
1665
1666	/* Configure flow based GRE device. */
1667	t = netdev_priv(dev);
1668	t->collect_md = true;
1669
1670	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1671	if (err < 0) {
1672		free_netdev(dev);
1673		return ERR_PTR(err);
1674	}
1675
1676	/* openvswitch users expect packet sizes to be unrestricted,
1677	 * so set the largest MTU we can.
1678	 */
1679	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1680	if (err)
1681		goto out;
1682
1683	err = rtnl_configure_link(dev, NULL, 0, NULL);
1684	if (err < 0)
1685		goto out;
1686
1687	return dev;
1688out:
1689	ip_tunnel_dellink(dev, &list_kill);
1690	unregister_netdevice_many(&list_kill);
1691	return ERR_PTR(err);
1692}
1693EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1694
1695static int __net_init ipgre_tap_init_net(struct net *net)
1696{
1697	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1698}
1699
1700static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1701{
1702	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1703}
1704
1705static struct pernet_operations ipgre_tap_net_ops = {
1706	.init = ipgre_tap_init_net,
1707	.exit_batch = ipgre_tap_exit_batch_net,
1708	.id   = &gre_tap_net_id,
1709	.size = sizeof(struct ip_tunnel_net),
1710};
1711
1712static int __net_init erspan_init_net(struct net *net)
1713{
1714	return ip_tunnel_init_net(net, erspan_net_id,
1715				  &erspan_link_ops, "erspan0");
1716}
1717
1718static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1719{
1720	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1721}
1722
1723static struct pernet_operations erspan_net_ops = {
1724	.init = erspan_init_net,
1725	.exit_batch = erspan_exit_batch_net,
1726	.id   = &erspan_net_id,
1727	.size = sizeof(struct ip_tunnel_net),
1728};
1729
1730static int __init ipgre_init(void)
1731{
1732	int err;
1733
1734	pr_info("GRE over IPv4 tunneling driver\n");
1735
1736	err = register_pernet_device(&ipgre_net_ops);
1737	if (err < 0)
1738		return err;
1739
1740	err = register_pernet_device(&ipgre_tap_net_ops);
1741	if (err < 0)
1742		goto pnet_tap_failed;
1743
1744	err = register_pernet_device(&erspan_net_ops);
1745	if (err < 0)
1746		goto pnet_erspan_failed;
1747
1748	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1749	if (err < 0) {
1750		pr_info("%s: can't add protocol\n", __func__);
1751		goto add_proto_failed;
1752	}
1753
1754	err = rtnl_link_register(&ipgre_link_ops);
1755	if (err < 0)
1756		goto rtnl_link_failed;
1757
1758	err = rtnl_link_register(&ipgre_tap_ops);
1759	if (err < 0)
1760		goto tap_ops_failed;
1761
1762	err = rtnl_link_register(&erspan_link_ops);
1763	if (err < 0)
1764		goto erspan_link_failed;
1765
1766	return 0;
1767
1768erspan_link_failed:
1769	rtnl_link_unregister(&ipgre_tap_ops);
1770tap_ops_failed:
1771	rtnl_link_unregister(&ipgre_link_ops);
1772rtnl_link_failed:
1773	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1774add_proto_failed:
1775	unregister_pernet_device(&erspan_net_ops);
1776pnet_erspan_failed:
1777	unregister_pernet_device(&ipgre_tap_net_ops);
1778pnet_tap_failed:
1779	unregister_pernet_device(&ipgre_net_ops);
1780	return err;
1781}
1782
1783static void __exit ipgre_fini(void)
1784{
1785	rtnl_link_unregister(&ipgre_tap_ops);
1786	rtnl_link_unregister(&ipgre_link_ops);
1787	rtnl_link_unregister(&erspan_link_ops);
1788	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1789	unregister_pernet_device(&ipgre_tap_net_ops);
1790	unregister_pernet_device(&ipgre_net_ops);
1791	unregister_pernet_device(&erspan_net_ops);
1792}
1793
1794module_init(ipgre_init);
1795module_exit(ipgre_fini);
1796MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1797MODULE_LICENSE("GPL");
1798MODULE_ALIAS_RTNL_LINK("gre");
1799MODULE_ALIAS_RTNL_LINK("gretap");
1800MODULE_ALIAS_RTNL_LINK("erspan");
1801MODULE_ALIAS_NETDEV("gre0");
1802MODULE_ALIAS_NETDEV("gretap0");
1803MODULE_ALIAS_NETDEV("erspan0");
v3.5.6
 
   1/*
   2 *	Linux NET3:	GRE over IP protocol decoder.
   3 *
   4 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
   5 *
   6 *	This program is free software; you can redistribute it and/or
   7 *	modify it under the terms of the GNU General Public License
   8 *	as published by the Free Software Foundation; either version
   9 *	2 of the License, or (at your option) any later version.
  10 *
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/capability.h>
  16#include <linux/module.h>
  17#include <linux/types.h>
  18#include <linux/kernel.h>
  19#include <linux/slab.h>
  20#include <asm/uaccess.h>
  21#include <linux/skbuff.h>
  22#include <linux/netdevice.h>
  23#include <linux/in.h>
  24#include <linux/tcp.h>
  25#include <linux/udp.h>
  26#include <linux/if_arp.h>
  27#include <linux/mroute.h>
  28#include <linux/init.h>
  29#include <linux/in6.h>
  30#include <linux/inetdevice.h>
  31#include <linux/igmp.h>
  32#include <linux/netfilter_ipv4.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_ether.h>
  35
  36#include <net/sock.h>
  37#include <net/ip.h>
  38#include <net/icmp.h>
  39#include <net/protocol.h>
  40#include <net/ipip.h>
  41#include <net/arp.h>
  42#include <net/checksum.h>
  43#include <net/dsfield.h>
  44#include <net/inet_ecn.h>
  45#include <net/xfrm.h>
  46#include <net/net_namespace.h>
  47#include <net/netns/generic.h>
  48#include <net/rtnetlink.h>
  49#include <net/gre.h>
  50
  51#if IS_ENABLED(CONFIG_IPV6)
  52#include <net/ipv6.h>
  53#include <net/ip6_fib.h>
  54#include <net/ip6_route.h>
  55#endif
  56
  57/*
  58   Problems & solutions
  59   --------------------
  60
  61   1. The most important issue is detecting local dead loops.
  62   They would cause complete host lockup in transmit, which
  63   would be "resolved" by stack overflow or, if queueing is enabled,
  64   with infinite looping in net_bh.
  65
  66   We cannot track such dead loops during route installation,
  67   it is infeasible task. The most general solutions would be
  68   to keep skb->encapsulation counter (sort of local ttl),
  69   and silently drop packet when it expires. It is a good
  70   solution, but it supposes maintaining new variable in ALL
  71   skb, even if no tunneling is used.
  72
  73   Current solution: xmit_recursion breaks dead loops. This is a percpu
  74   counter, since when we enter the first ndo_xmit(), cpu migration is
  75   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  76
  77   2. Networking dead loops would not kill routers, but would really
  78   kill network. IP hop limit plays role of "t->recursion" in this case,
  79   if we copy it from packet being encapsulated to upper header.
  80   It is very good solution, but it introduces two problems:
  81
  82   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  83     do not work over tunnels.
  84   - traceroute does not work. I planned to relay ICMP from tunnel,
  85     so that this problem would be solved and traceroute output
  86     would even more informative. This idea appeared to be wrong:
  87     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  88     true router now :-)), all routers (at least, in neighbourhood of mine)
  89     return only 8 bytes of payload. It is the end.
  90
  91   Hence, if we want that OSPF worked or traceroute said something reasonable,
  92   we should search for another solution.
  93
  94   One of them is to parse packet trying to detect inner encapsulation
  95   made by our node. It is difficult or even impossible, especially,
  96   taking into account fragmentation. TO be short, ttl is not solution at all.
  97
  98   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  99   We force DF flag on tunnels with preconfigured hop limit,
 100   that is ALL. :-) Well, it does not remove the problem completely,
 101   but exponential growth of network traffic is changed to linear
 102   (branches, that exceed pmtu are pruned) and tunnel mtu
 103   rapidly degrades to value <68, where looping stops.
 104   Yes, it is not good if there exists a router in the loop,
 105   which does not force DF, even when encapsulating packets have DF set.
 106   But it is not our problem! Nobody could accuse us, we made
 107   all that we could make. Even if it is your gated who injected
 108   fatal route to network, even if it were you who configured
 109   fatal static route: you are innocent. :-)
 110
 111
 112
 113   3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
 114   practically identical code. It would be good to glue them
 115   together, but it is not very evident, how to make them modular.
 116   sit is integral part of IPv6, ipip and gre are naturally modular.
 117   We could extract common parts (hash table, ioctl etc)
 118   to a separate module (ip_tunnel.c).
 119
 120   Alexey Kuznetsov.
 121 */
 122
 
 
 
 
 123static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 
 
 124static int ipgre_tunnel_init(struct net_device *dev);
 125static void ipgre_tunnel_setup(struct net_device *dev);
 126static int ipgre_tunnel_bind_dev(struct net_device *dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127
 128/* Fallback tunnel: no source, no destination, no key, no options */
 
 
 
 
 
 
 129
 130#define HASH_SIZE  16
 
 
 131
 132static int ipgre_net_id __read_mostly;
 133struct ipgre_net {
 134	struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
 135
 136	struct net_device *fb_tunnel_dev;
 137};
 
 
 138
 139/* Tunnel hash table */
 
 
 
 
 
 
 
 
 
 
 
 
 
 140
 141/*
 142   4 hash tables:
 
 
 
 143
 144   3: (remote,local)
 145   2: (remote,*)
 146   1: (*,local)
 147   0: (*,*)
 148
 149   We require exact key match i.e. if a key is present in packet
 150   it will match only tunnel with the same key; if it is not present,
 151   it will match only keyless tunnel.
 152
 153   All keysless packets, if not matched configured keyless tunnels
 154   will match fallback tunnel.
 155 */
 
 
 
 156
 157#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
 
 158
 159#define tunnels_r_l	tunnels[3]
 160#define tunnels_r	tunnels[2]
 161#define tunnels_l	tunnels[1]
 162#define tunnels_wc	tunnels[0]
 163/*
 164 * Locking : hash tables are protected by RCU and RTNL
 165 */
 166
 167#define for_each_ip_tunnel_rcu(start) \
 168	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
 
 
 
 169
 170/* often modified stats are per cpu, other are shared (netdev->stats) */
 171struct pcpu_tstats {
 172	u64	rx_packets;
 173	u64	rx_bytes;
 174	u64	tx_packets;
 175	u64	tx_bytes;
 176	struct u64_stats_sync	syncp;
 177};
 178
 179static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
 180						   struct rtnl_link_stats64 *tot)
 181{
 182	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 183
 184	for_each_possible_cpu(i) {
 185		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
 186		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
 187		unsigned int start;
 188
 189		do {
 190			start = u64_stats_fetch_begin_bh(&tstats->syncp);
 191			rx_packets = tstats->rx_packets;
 192			tx_packets = tstats->tx_packets;
 193			rx_bytes = tstats->rx_bytes;
 194			tx_bytes = tstats->tx_bytes;
 195		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
 196
 197		tot->rx_packets += rx_packets;
 198		tot->tx_packets += tx_packets;
 199		tot->rx_bytes   += rx_bytes;
 200		tot->tx_bytes   += tx_bytes;
 201	}
 202
 203	tot->multicast = dev->stats.multicast;
 204	tot->rx_crc_errors = dev->stats.rx_crc_errors;
 205	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
 206	tot->rx_length_errors = dev->stats.rx_length_errors;
 207	tot->rx_errors = dev->stats.rx_errors;
 208	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
 209	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
 210	tot->tx_dropped = dev->stats.tx_dropped;
 211	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
 212	tot->tx_errors = dev->stats.tx_errors;
 213
 214	return tot;
 215}
 216
 217/* Given src, dst and key, find appropriate for input tunnel. */
 218
 219static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
 220					     __be32 remote, __be32 local,
 221					     __be32 key, __be16 gre_proto)
 222{
 223	struct net *net = dev_net(dev);
 224	int link = dev->ifindex;
 225	unsigned int h0 = HASH(remote);
 226	unsigned int h1 = HASH(key);
 227	struct ip_tunnel *t, *cand = NULL;
 228	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 229	int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
 230		       ARPHRD_ETHER : ARPHRD_IPGRE;
 231	int score, cand_score = 4;
 232
 233	for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
 234		if (local != t->parms.iph.saddr ||
 235		    remote != t->parms.iph.daddr ||
 236		    key != t->parms.i_key ||
 237		    !(t->dev->flags & IFF_UP))
 238			continue;
 239
 240		if (t->dev->type != ARPHRD_IPGRE &&
 241		    t->dev->type != dev_type)
 242			continue;
 243
 244		score = 0;
 245		if (t->parms.link != link)
 246			score |= 1;
 247		if (t->dev->type != dev_type)
 248			score |= 2;
 249		if (score == 0)
 250			return t;
 251
 252		if (score < cand_score) {
 253			cand = t;
 254			cand_score = score;
 255		}
 256	}
 257
 258	for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
 259		if (remote != t->parms.iph.daddr ||
 260		    key != t->parms.i_key ||
 261		    !(t->dev->flags & IFF_UP))
 262			continue;
 263
 264		if (t->dev->type != ARPHRD_IPGRE &&
 265		    t->dev->type != dev_type)
 266			continue;
 267
 268		score = 0;
 269		if (t->parms.link != link)
 270			score |= 1;
 271		if (t->dev->type != dev_type)
 272			score |= 2;
 273		if (score == 0)
 274			return t;
 275
 276		if (score < cand_score) {
 277			cand = t;
 278			cand_score = score;
 279		}
 280	}
 281
 282	for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
 283		if ((local != t->parms.iph.saddr &&
 284		     (local != t->parms.iph.daddr ||
 285		      !ipv4_is_multicast(local))) ||
 286		    key != t->parms.i_key ||
 287		    !(t->dev->flags & IFF_UP))
 288			continue;
 289
 290		if (t->dev->type != ARPHRD_IPGRE &&
 291		    t->dev->type != dev_type)
 292			continue;
 293
 294		score = 0;
 295		if (t->parms.link != link)
 296			score |= 1;
 297		if (t->dev->type != dev_type)
 298			score |= 2;
 299		if (score == 0)
 300			return t;
 301
 302		if (score < cand_score) {
 303			cand = t;
 304			cand_score = score;
 305		}
 306	}
 307
 308	for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
 309		if (t->parms.i_key != key ||
 310		    !(t->dev->flags & IFF_UP))
 311			continue;
 312
 313		if (t->dev->type != ARPHRD_IPGRE &&
 314		    t->dev->type != dev_type)
 315			continue;
 316
 317		score = 0;
 318		if (t->parms.link != link)
 319			score |= 1;
 320		if (t->dev->type != dev_type)
 321			score |= 2;
 322		if (score == 0)
 323			return t;
 324
 325		if (score < cand_score) {
 326			cand = t;
 327			cand_score = score;
 328		}
 329	}
 330
 331	if (cand != NULL)
 332		return cand;
 333
 334	dev = ign->fb_tunnel_dev;
 335	if (dev->flags & IFF_UP)
 336		return netdev_priv(dev);
 337
 338	return NULL;
 
 
 339}
 340
 341static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
 342		struct ip_tunnel_parm *parms)
 343{
 344	__be32 remote = parms->iph.daddr;
 345	__be32 local = parms->iph.saddr;
 346	__be32 key = parms->i_key;
 347	unsigned int h = HASH(key);
 348	int prio = 0;
 
 
 
 
 349
 350	if (local)
 351		prio |= 1;
 352	if (remote && !ipv4_is_multicast(remote)) {
 353		prio |= 2;
 354		h ^= HASH(remote);
 
 
 
 
 
 
 
 
 355	}
 356
 357	return &ign->tunnels[prio][h];
 358}
 
 
 
 359
 360static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
 361		struct ip_tunnel *t)
 362{
 363	return __ipgre_bucket(ign, &t->parms);
 364}
 365
 366static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
 367{
 368	struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
 
 
 369
 370	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
 371	rcu_assign_pointer(*tp, t);
 372}
 373
 374static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
 375{
 376	struct ip_tunnel __rcu **tp;
 377	struct ip_tunnel *iter;
 378
 379	for (tp = ipgre_bucket(ign, t);
 380	     (iter = rtnl_dereference(*tp)) != NULL;
 381	     tp = &iter->next) {
 382		if (t == iter) {
 383			rcu_assign_pointer(*tp, t->next);
 384			break;
 385		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 386	}
 387}
 388
 389static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
 390					   struct ip_tunnel_parm *parms,
 391					   int type)
 392{
 393	__be32 remote = parms->iph.daddr;
 394	__be32 local = parms->iph.saddr;
 395	__be32 key = parms->i_key;
 396	int link = parms->link;
 397	struct ip_tunnel *t;
 398	struct ip_tunnel __rcu **tp;
 399	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 400
 401	for (tp = __ipgre_bucket(ign, parms);
 402	     (t = rtnl_dereference(*tp)) != NULL;
 403	     tp = &t->next)
 404		if (local == t->parms.iph.saddr &&
 405		    remote == t->parms.iph.daddr &&
 406		    key == t->parms.i_key &&
 407		    link == t->parms.link &&
 408		    type == t->dev->type)
 409			break;
 410
 411	return t;
 412}
 413
 414static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
 415		struct ip_tunnel_parm *parms, int create)
 416{
 417	struct ip_tunnel *t, *nt;
 418	struct net_device *dev;
 419	char name[IFNAMSIZ];
 420	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 421
 422	t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
 423	if (t || !create)
 424		return t;
 425
 426	if (parms->name[0])
 427		strlcpy(name, parms->name, IFNAMSIZ);
 428	else
 429		strcpy(name, "gre%d");
 430
 431	dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
 432	if (!dev)
 433		return NULL;
 434
 435	dev_net_set(dev, net);
 
 
 
 
 
 
 436
 437	nt = netdev_priv(dev);
 438	nt->parms = *parms;
 439	dev->rtnl_link_ops = &ipgre_link_ops;
 
 440
 441	dev->mtu = ipgre_tunnel_bind_dev(dev);
 
 
 
 
 
 442
 443	if (register_netdevice(dev) < 0)
 444		goto failed_free;
 
 
 445
 446	/* Can use a lockless transmit, unless we generate output sequences */
 447	if (!(nt->parms.o_flags & GRE_SEQ))
 448		dev->features |= NETIF_F_LLTX;
 449
 450	dev_hold(dev);
 451	ipgre_tunnel_link(ign, nt);
 452	return nt;
 453
 454failed_free:
 455	free_netdev(dev);
 456	return NULL;
 457}
 458
 459static void ipgre_tunnel_uninit(struct net_device *dev)
 
 460{
 461	struct net *net = dev_net(dev);
 462	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 
 
 
 
 
 
 463
 464	ipgre_tunnel_unlink(ign, netdev_priv(dev));
 465	dev_put(dev);
 
 
 
 
 
 
 
 466}
 467
 468
 469static void ipgre_err(struct sk_buff *skb, u32 info)
 470{
 
 
 
 471
 472/* All the routers (except for Linux) return only
 473   8 bytes of packet payload. It means, that precise relaying of
 474   ICMP in the real Internet is absolutely infeasible.
 475
 476   Moreover, Cisco "wise men" put GRE key to the third word
 477   in GRE header. It makes impossible maintaining even soft state for keyed
 478   GRE tunnels with enabled checksum. Tell them "thank you".
 479
 480   Well, I wonder, rfc1812 was written by Cisco employee,
 481   what the hell these idiots break standards established
 482   by themselves???
 483 */
 484
 485	const struct iphdr *iph = (const struct iphdr *)skb->data;
 486	__be16	     *p = (__be16 *)(skb->data+(iph->ihl<<2));
 487	int grehlen = (iph->ihl<<2) + 4;
 488	const int type = icmp_hdr(skb)->type;
 489	const int code = icmp_hdr(skb)->code;
 490	struct ip_tunnel *t;
 491	__be16 flags;
 492
 493	flags = p[0];
 494	if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
 495		if (flags&(GRE_VERSION|GRE_ROUTING))
 496			return;
 497		if (flags&GRE_KEY) {
 498			grehlen += 4;
 499			if (flags&GRE_CSUM)
 500				grehlen += 4;
 501		}
 502	}
 
 503
 504	/* If only 8 bytes returned, keyed message will be dropped here */
 505	if (skb_headlen(skb) < grehlen)
 506		return;
 507
 508	switch (type) {
 509	default:
 510	case ICMP_PARAMETERPROB:
 511		return;
 512
 513	case ICMP_DEST_UNREACH:
 514		switch (code) {
 515		case ICMP_SR_FAILED:
 516		case ICMP_PORT_UNREACH:
 517			/* Impossible event. */
 518			return;
 519		case ICMP_FRAG_NEEDED:
 520			/* Soft state for pmtu is maintained by IP core. */
 521			return;
 522		default:
 523			/* All others are translated to HOST_UNREACH.
 524			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 525			   I believe they are just ether pollution. --ANK
 526			 */
 527			break;
 528		}
 529		break;
 530	case ICMP_TIME_EXCEEDED:
 531		if (code != ICMP_EXC_TTL)
 532			return;
 533		break;
 534	}
 535
 536	rcu_read_lock();
 537	t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
 538				flags & GRE_KEY ?
 539				*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
 540				p[1]);
 541	if (t == NULL || t->parms.iph.daddr == 0 ||
 542	    ipv4_is_multicast(t->parms.iph.daddr))
 543		goto out;
 544
 545	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 546		goto out;
 547
 548	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 549		t->err_count++;
 550	else
 551		t->err_count = 1;
 552	t->err_time = jiffies;
 553out:
 554	rcu_read_unlock();
 
 
 
 555}
 556
 557static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
 
 
 558{
 559	if (INET_ECN_is_ce(iph->tos)) {
 560		if (skb->protocol == htons(ETH_P_IP)) {
 561			IP_ECN_set_ce(ip_hdr(skb));
 562		} else if (skb->protocol == htons(ETH_P_IPV6)) {
 563			IP6_ECN_set_ce(ipv6_hdr(skb));
 564		}
 565	}
 
 
 566}
 567
 568static inline u8
 569ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
 570{
 571	u8 inner = 0;
 572	if (skb->protocol == htons(ETH_P_IP))
 573		inner = old_iph->tos;
 574	else if (skb->protocol == htons(ETH_P_IPV6))
 575		inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
 576	return INET_ECN_encapsulate(tos, inner);
 577}
 578
 579static int ipgre_rcv(struct sk_buff *skb)
 
 580{
 581	const struct iphdr *iph;
 582	u8     *h;
 583	__be16    flags;
 584	__sum16   csum = 0;
 585	__be32 key = 0;
 586	u32    seqno = 0;
 587	struct ip_tunnel *tunnel;
 588	int    offset = 4;
 589	__be16 gre_proto;
 590
 591	if (!pskb_may_pull(skb, 16))
 592		goto drop_nolock;
 
 
 593
 594	iph = ip_hdr(skb);
 595	h = skb->data;
 596	flags = *(__be16 *)h;
 597
 598	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
 599		/* - Version must be 0.
 600		   - We do not support routing headers.
 601		 */
 602		if (flags&(GRE_VERSION|GRE_ROUTING))
 603			goto drop_nolock;
 604
 605		if (flags&GRE_CSUM) {
 606			switch (skb->ip_summed) {
 607			case CHECKSUM_COMPLETE:
 608				csum = csum_fold(skb->csum);
 609				if (!csum)
 610					break;
 611				/* fall through */
 612			case CHECKSUM_NONE:
 613				skb->csum = 0;
 614				csum = __skb_checksum_complete(skb);
 615				skb->ip_summed = CHECKSUM_COMPLETE;
 616			}
 617			offset += 4;
 618		}
 619		if (flags&GRE_KEY) {
 620			key = *(__be32 *)(h + offset);
 621			offset += 4;
 622		}
 623		if (flags&GRE_SEQ) {
 624			seqno = ntohl(*(__be32 *)(h + offset));
 625			offset += 4;
 626		}
 627	}
 628
 629	gre_proto = *(__be16 *)(h + 2);
 
 
 
 
 630
 631	rcu_read_lock();
 632	if ((tunnel = ipgre_tunnel_lookup(skb->dev,
 633					  iph->saddr, iph->daddr, key,
 634					  gre_proto))) {
 635		struct pcpu_tstats *tstats;
 636
 637		secpath_reset(skb);
 638
 639		skb->protocol = gre_proto;
 640		/* WCCP version 1 and 2 protocol decoding.
 641		 * - Change protocol to IP
 642		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
 643		 */
 644		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
 645			skb->protocol = htons(ETH_P_IP);
 646			if ((*(h + offset) & 0xF0) != 0x40)
 647				offset += 4;
 648		}
 649
 650		skb->mac_header = skb->network_header;
 651		__pskb_pull(skb, offset);
 652		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
 653		skb->pkt_type = PACKET_HOST;
 654#ifdef CONFIG_NET_IPGRE_BROADCAST
 655		if (ipv4_is_multicast(iph->daddr)) {
 656			/* Looped back packet, drop it! */
 657			if (rt_is_output_route(skb_rtable(skb)))
 658				goto drop;
 659			tunnel->dev->stats.multicast++;
 660			skb->pkt_type = PACKET_BROADCAST;
 661		}
 662#endif
 663
 664		if (((flags&GRE_CSUM) && csum) ||
 665		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
 666			tunnel->dev->stats.rx_crc_errors++;
 667			tunnel->dev->stats.rx_errors++;
 668			goto drop;
 669		}
 670		if (tunnel->parms.i_flags&GRE_SEQ) {
 671			if (!(flags&GRE_SEQ) ||
 672			    (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
 673				tunnel->dev->stats.rx_fifo_errors++;
 674				tunnel->dev->stats.rx_errors++;
 675				goto drop;
 676			}
 677			tunnel->i_seqno = seqno + 1;
 678		}
 679
 680		/* Warning: All skb pointers will be invalidated! */
 681		if (tunnel->dev->type == ARPHRD_ETHER) {
 682			if (!pskb_may_pull(skb, ETH_HLEN)) {
 683				tunnel->dev->stats.rx_length_errors++;
 684				tunnel->dev->stats.rx_errors++;
 685				goto drop;
 686			}
 687
 688			iph = ip_hdr(skb);
 689			skb->protocol = eth_type_trans(skb, tunnel->dev);
 690			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 691		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692
 693		tstats = this_cpu_ptr(tunnel->dev->tstats);
 694		u64_stats_update_begin(&tstats->syncp);
 695		tstats->rx_packets++;
 696		tstats->rx_bytes += skb->len;
 697		u64_stats_update_end(&tstats->syncp);
 698
 699		__skb_tunnel_rx(skb, tunnel->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700
 701		skb_reset_network_header(skb);
 702		ipgre_ecn_decapsulate(iph, skb);
 703
 704		netif_rx(skb);
 705
 706		rcu_read_unlock();
 707		return 0;
 708	}
 709	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 710
 711drop:
 712	rcu_read_unlock();
 713drop_nolock:
 714	kfree_skb(skb);
 715	return 0;
 716}
 717
 718static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 719{
 720	struct ip_tunnel *tunnel = netdev_priv(dev);
 721	struct pcpu_tstats *tstats;
 722	const struct iphdr  *old_iph = ip_hdr(skb);
 723	const struct iphdr  *tiph;
 724	struct flowi4 fl4;
 725	u8     tos;
 726	__be16 df;
 727	struct rtable *rt;     			/* Route to the other host */
 728	struct net_device *tdev;		/* Device to other host */
 729	struct iphdr  *iph;			/* Our new IP header */
 730	unsigned int max_headroom;		/* The extra header space needed */
 731	int    gre_hlen;
 732	__be32 dst;
 733	int    mtu;
 734
 735	if (dev->type == ARPHRD_ETHER)
 736		IPCB(skb)->flags = 0;
 737
 738	if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
 739		gre_hlen = 0;
 740		tiph = (const struct iphdr *)skb->data;
 741	} else {
 742		gre_hlen = tunnel->hlen;
 743		tiph = &tunnel->parms.iph;
 744	}
 745
 746	if ((dst = tiph->daddr) == 0) {
 747		/* NBMA tunnel */
 
 
 
 
 
 
 
 
 
 748
 749		if (skb_dst(skb) == NULL) {
 750			dev->stats.tx_fifo_errors++;
 751			goto tx_error;
 752		}
 753
 754		if (skb->protocol == htons(ETH_P_IP)) {
 755			rt = skb_rtable(skb);
 756			dst = rt->rt_gateway;
 757		}
 758#if IS_ENABLED(CONFIG_IPV6)
 759		else if (skb->protocol == htons(ETH_P_IPV6)) {
 760			const struct in6_addr *addr6;
 761			struct neighbour *neigh;
 762			bool do_tx_error_icmp;
 763			int addr_type;
 764
 765			neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
 766			if (neigh == NULL)
 767				goto tx_error;
 768
 769			addr6 = (const struct in6_addr *)&neigh->primary_key;
 770			addr_type = ipv6_addr_type(addr6);
 771
 772			if (addr_type == IPV6_ADDR_ANY) {
 773				addr6 = &ipv6_hdr(skb)->daddr;
 774				addr_type = ipv6_addr_type(addr6);
 775			}
 776
 777			if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
 778				do_tx_error_icmp = true;
 779			else {
 780				do_tx_error_icmp = false;
 781				dst = addr6->s6_addr32[3];
 782			}
 783			neigh_release(neigh);
 784			if (do_tx_error_icmp)
 785				goto tx_error_icmp;
 786		}
 787#endif
 788		else
 789			goto tx_error;
 790	}
 791
 792	tos = tiph->tos;
 793	if (tos == 1) {
 794		tos = 0;
 795		if (skb->protocol == htons(ETH_P_IP))
 796			tos = old_iph->tos;
 797		else if (skb->protocol == htons(ETH_P_IPV6))
 798			tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
 799	}
 800
 801	rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
 802				 tunnel->parms.o_key, RT_TOS(tos),
 803				 tunnel->parms.link);
 804	if (IS_ERR(rt)) {
 805		dev->stats.tx_carrier_errors++;
 806		goto tx_error;
 807	}
 808	tdev = rt->dst.dev;
 809
 810	if (tdev == dev) {
 811		ip_rt_put(rt);
 812		dev->stats.collisions++;
 813		goto tx_error;
 814	}
 815
 816	df = tiph->frag_off;
 817	if (df)
 818		mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
 819	else
 820		mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 821
 822	if (skb_dst(skb))
 823		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 824
 825	if (skb->protocol == htons(ETH_P_IP)) {
 826		df |= (old_iph->frag_off&htons(IP_DF));
 827
 828		if ((old_iph->frag_off&htons(IP_DF)) &&
 829		    mtu < ntohs(old_iph->tot_len)) {
 830			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
 831			ip_rt_put(rt);
 832			goto tx_error;
 833		}
 834	}
 835#if IS_ENABLED(CONFIG_IPV6)
 836	else if (skb->protocol == htons(ETH_P_IPV6)) {
 837		struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 838
 839		if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
 840			if ((tunnel->parms.iph.daddr &&
 841			     !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
 842			    rt6->rt6i_dst.plen == 128) {
 843				rt6->rt6i_flags |= RTF_MODIFIED;
 844				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
 845			}
 846		}
 847
 848		if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
 849			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 850			ip_rt_put(rt);
 851			goto tx_error;
 852		}
 853	}
 854#endif
 855
 856	if (tunnel->err_count > 0) {
 857		if (time_before(jiffies,
 858				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
 859			tunnel->err_count--;
 860
 861			dst_link_failure(skb);
 862		} else
 863			tunnel->err_count = 0;
 864	}
 865
 866	max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
 867
 868	if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
 869	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
 870		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
 871		if (max_headroom > dev->needed_headroom)
 872			dev->needed_headroom = max_headroom;
 873		if (!new_skb) {
 874			ip_rt_put(rt);
 875			dev->stats.tx_dropped++;
 876			dev_kfree_skb(skb);
 877			return NETDEV_TX_OK;
 878		}
 879		if (skb->sk)
 880			skb_set_owner_w(new_skb, skb->sk);
 881		dev_kfree_skb(skb);
 882		skb = new_skb;
 883		old_iph = ip_hdr(skb);
 884	}
 885
 886	skb_reset_transport_header(skb);
 887	skb_push(skb, gre_hlen);
 888	skb_reset_network_header(skb);
 889	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 890	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
 891			      IPSKB_REROUTED);
 892	skb_dst_drop(skb);
 893	skb_dst_set(skb, &rt->dst);
 894
 895	/*
 896	 *	Push down and install the IPIP header.
 897	 */
 898
 899	iph 			=	ip_hdr(skb);
 900	iph->version		=	4;
 901	iph->ihl		=	sizeof(struct iphdr) >> 2;
 902	iph->frag_off		=	df;
 903	iph->protocol		=	IPPROTO_GRE;
 904	iph->tos		=	ipgre_ecn_encapsulate(tos, old_iph, skb);
 905	iph->daddr		=	fl4.daddr;
 906	iph->saddr		=	fl4.saddr;
 907
 908	if ((iph->ttl = tiph->ttl) == 0) {
 909		if (skb->protocol == htons(ETH_P_IP))
 910			iph->ttl = old_iph->ttl;
 911#if IS_ENABLED(CONFIG_IPV6)
 912		else if (skb->protocol == htons(ETH_P_IPV6))
 913			iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
 914#endif
 915		else
 916			iph->ttl = ip4_dst_hoplimit(&rt->dst);
 917	}
 918
 919	((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
 920	((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
 921				   htons(ETH_P_TEB) : skb->protocol;
 
 
 
 922
 923	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
 924		__be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
 925
 926		if (tunnel->parms.o_flags&GRE_SEQ) {
 927			++tunnel->o_seqno;
 928			*ptr = htonl(tunnel->o_seqno);
 929			ptr--;
 930		}
 931		if (tunnel->parms.o_flags&GRE_KEY) {
 932			*ptr = tunnel->parms.o_key;
 933			ptr--;
 934		}
 935		if (tunnel->parms.o_flags&GRE_CSUM) {
 936			*ptr = 0;
 937			*(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
 938		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939	}
 940
 941	nf_reset(skb);
 942	tstats = this_cpu_ptr(dev->tstats);
 943	__IPTUNNEL_XMIT(tstats, &dev->stats);
 944	return NETDEV_TX_OK;
 945
 946#if IS_ENABLED(CONFIG_IPV6)
 947tx_error_icmp:
 948	dst_link_failure(skb);
 949#endif
 950tx_error:
 951	dev->stats.tx_errors++;
 952	dev_kfree_skb(skb);
 953	return NETDEV_TX_OK;
 954}
 955
 956static int ipgre_tunnel_bind_dev(struct net_device *dev)
 
 957{
 958	struct net_device *tdev = NULL;
 959	struct ip_tunnel *tunnel;
 960	const struct iphdr *iph;
 961	int hlen = LL_MAX_HEADER;
 962	int mtu = ETH_DATA_LEN;
 963	int addend = sizeof(struct iphdr) + 4;
 964
 965	tunnel = netdev_priv(dev);
 966	iph = &tunnel->parms.iph;
 
 
 967
 968	/* Guess output device to choose reasonable mtu and needed_headroom */
 
 969
 970	if (iph->daddr) {
 971		struct flowi4 fl4;
 972		struct rtable *rt;
 973
 974		rt = ip_route_output_gre(dev_net(dev), &fl4,
 975					 iph->daddr, iph->saddr,
 976					 tunnel->parms.o_key,
 977					 RT_TOS(iph->tos),
 978					 tunnel->parms.link);
 979		if (!IS_ERR(rt)) {
 980			tdev = rt->dst.dev;
 981			ip_rt_put(rt);
 982		}
 983
 984		if (dev->type != ARPHRD_ETHER)
 985			dev->flags |= IFF_POINTOPOINT;
 986	}
 
 
 987
 988	if (!tdev && tunnel->parms.link)
 989		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
 
 
 
 990
 991	if (tdev) {
 992		hlen = tdev->hard_header_len + tdev->needed_headroom;
 993		mtu = tdev->mtu;
 994	}
 995	dev->iflink = tunnel->parms.link;
 996
 997	/* Precalculate GRE options length */
 998	if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
 999		if (tunnel->parms.o_flags&GRE_CSUM)
1000			addend += 4;
1001		if (tunnel->parms.o_flags&GRE_KEY)
1002			addend += 4;
1003		if (tunnel->parms.o_flags&GRE_SEQ)
1004			addend += 4;
1005	}
1006	dev->needed_headroom = addend + hlen;
1007	mtu -= dev->hard_header_len + addend;
1008
1009	if (mtu < 68)
1010		mtu = 68;
1011
1012	tunnel->hlen = addend;
1013
1014	return mtu;
 
 
 
 
 
 
 
1015}
1016
1017static int
1018ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1019{
1020	int err = 0;
1021	struct ip_tunnel_parm p;
1022	struct ip_tunnel *t;
1023	struct net *net = dev_net(dev);
1024	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
 
 
 
1025
1026	switch (cmd) {
1027	case SIOCGETTUNNEL:
1028		t = NULL;
1029		if (dev == ign->fb_tunnel_dev) {
1030			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1031				err = -EFAULT;
1032				break;
1033			}
1034			t = ipgre_tunnel_locate(net, &p, 0);
1035		}
1036		if (t == NULL)
1037			t = netdev_priv(dev);
1038		memcpy(&p, &t->parms, sizeof(p));
1039		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1040			err = -EFAULT;
1041		break;
1042
1043	case SIOCADDTUNNEL:
1044	case SIOCCHGTUNNEL:
1045		err = -EPERM;
1046		if (!capable(CAP_NET_ADMIN))
1047			goto done;
1048
1049		err = -EFAULT;
1050		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1051			goto done;
1052
1053		err = -EINVAL;
1054		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1055		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1056		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1057			goto done;
1058		if (p.iph.ttl)
1059			p.iph.frag_off |= htons(IP_DF);
1060
1061		if (!(p.i_flags&GRE_KEY))
1062			p.i_key = 0;
1063		if (!(p.o_flags&GRE_KEY))
1064			p.o_key = 0;
1065
1066		t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1067
1068		if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1069			if (t != NULL) {
1070				if (t->dev != dev) {
1071					err = -EEXIST;
1072					break;
1073				}
1074			} else {
1075				unsigned int nflags = 0;
1076
1077				t = netdev_priv(dev);
1078
1079				if (ipv4_is_multicast(p.iph.daddr))
1080					nflags = IFF_BROADCAST;
1081				else if (p.iph.daddr)
1082					nflags = IFF_POINTOPOINT;
1083
1084				if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1085					err = -EINVAL;
1086					break;
1087				}
1088				ipgre_tunnel_unlink(ign, t);
1089				synchronize_net();
1090				t->parms.iph.saddr = p.iph.saddr;
1091				t->parms.iph.daddr = p.iph.daddr;
1092				t->parms.i_key = p.i_key;
1093				t->parms.o_key = p.o_key;
1094				memcpy(dev->dev_addr, &p.iph.saddr, 4);
1095				memcpy(dev->broadcast, &p.iph.daddr, 4);
1096				ipgre_tunnel_link(ign, t);
1097				netdev_state_change(dev);
1098			}
1099		}
1100
1101		if (t) {
1102			err = 0;
1103			if (cmd == SIOCCHGTUNNEL) {
1104				t->parms.iph.ttl = p.iph.ttl;
1105				t->parms.iph.tos = p.iph.tos;
1106				t->parms.iph.frag_off = p.iph.frag_off;
1107				if (t->parms.link != p.link) {
1108					t->parms.link = p.link;
1109					dev->mtu = ipgre_tunnel_bind_dev(dev);
1110					netdev_state_change(dev);
1111				}
1112			}
1113			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1114				err = -EFAULT;
1115		} else
1116			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1117		break;
1118
1119	case SIOCDELTUNNEL:
1120		err = -EPERM;
1121		if (!capable(CAP_NET_ADMIN))
1122			goto done;
1123
1124		if (dev == ign->fb_tunnel_dev) {
1125			err = -EFAULT;
1126			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1127				goto done;
1128			err = -ENOENT;
1129			if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1130				goto done;
1131			err = -EPERM;
1132			if (t == netdev_priv(ign->fb_tunnel_dev))
1133				goto done;
1134			dev = t->dev;
1135		}
1136		unregister_netdevice(dev);
1137		err = 0;
1138		break;
1139
1140	default:
1141		err = -EINVAL;
1142	}
1143
1144done:
1145	return err;
1146}
1147
1148static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1149{
1150	struct ip_tunnel *tunnel = netdev_priv(dev);
1151	if (new_mtu < 68 ||
1152	    new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1153		return -EINVAL;
1154	dev->mtu = new_mtu;
1155	return 0;
1156}
1157
1158/* Nice toy. Unfortunately, useless in real life :-)
1159   It allows to construct virtual multiprotocol broadcast "LAN"
1160   over the Internet, provided multicast routing is tuned.
1161
1162
1163   I have no idea was this bicycle invented before me,
1164   so that I had to set ARPHRD_IPGRE to a random value.
1165   I have an impression, that Cisco could make something similar,
1166   but this feature is apparently missing in IOS<=11.2(8).
1167
1168   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1169   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1170
1171   ping -t 255 224.66.66.66
1172
1173   If nobody answers, mbone does not work.
1174
1175   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1176   ip addr add 10.66.66.<somewhat>/24 dev Universe
1177   ifconfig Universe up
1178   ifconfig Universe add fe80::<Your_real_addr>/10
1179   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1180   ftp 10.66.66.66
1181   ...
1182   ftp fec0:6666:6666::193.233.7.65
1183   ...
1184
1185 */
1186
1187static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1188			unsigned short type,
1189			const void *daddr, const void *saddr, unsigned int len)
1190{
1191	struct ip_tunnel *t = netdev_priv(dev);
1192	struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1193	__be16 *p = (__be16 *)(iph+1);
 
 
 
 
 
1194
1195	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1196	p[0]		= t->parms.o_flags;
1197	p[1]		= htons(type);
1198
1199	/*
1200	 *	Set the source hardware address.
1201	 */
1202
 
1203	if (saddr)
1204		memcpy(&iph->saddr, saddr, 4);
1205	if (daddr)
1206		memcpy(&iph->daddr, daddr, 4);
1207	if (iph->daddr)
1208		return t->hlen;
1209
1210	return -t->hlen;
1211}
1212
1213static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1214{
1215	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1216	memcpy(haddr, &iph->saddr, 4);
1217	return 4;
1218}
1219
1220static const struct header_ops ipgre_header_ops = {
1221	.create	= ipgre_header,
1222	.parse	= ipgre_header_parse,
1223};
1224
1225#ifdef CONFIG_NET_IPGRE_BROADCAST
1226static int ipgre_open(struct net_device *dev)
1227{
1228	struct ip_tunnel *t = netdev_priv(dev);
1229
1230	if (ipv4_is_multicast(t->parms.iph.daddr)) {
1231		struct flowi4 fl4;
1232		struct rtable *rt;
1233
1234		rt = ip_route_output_gre(dev_net(dev), &fl4,
1235					 t->parms.iph.daddr,
1236					 t->parms.iph.saddr,
1237					 t->parms.o_key,
1238					 RT_TOS(t->parms.iph.tos),
1239					 t->parms.link);
1240		if (IS_ERR(rt))
1241			return -EADDRNOTAVAIL;
1242		dev = rt->dst.dev;
1243		ip_rt_put(rt);
1244		if (__in_dev_get_rtnl(dev) == NULL)
1245			return -EADDRNOTAVAIL;
1246		t->mlink = dev->ifindex;
1247		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1248	}
1249	return 0;
1250}
1251
1252static int ipgre_close(struct net_device *dev)
1253{
1254	struct ip_tunnel *t = netdev_priv(dev);
1255
1256	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1257		struct in_device *in_dev;
1258		in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1259		if (in_dev)
1260			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1261	}
1262	return 0;
1263}
1264
1265#endif
1266
1267static const struct net_device_ops ipgre_netdev_ops = {
1268	.ndo_init		= ipgre_tunnel_init,
1269	.ndo_uninit		= ipgre_tunnel_uninit,
1270#ifdef CONFIG_NET_IPGRE_BROADCAST
1271	.ndo_open		= ipgre_open,
1272	.ndo_stop		= ipgre_close,
1273#endif
1274	.ndo_start_xmit		= ipgre_tunnel_xmit,
1275	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
1276	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
1277	.ndo_get_stats64	= ipgre_get_stats64,
 
 
1278};
1279
1280static void ipgre_dev_free(struct net_device *dev)
1281{
1282	free_percpu(dev->tstats);
1283	free_netdev(dev);
1284}
1285
1286static void ipgre_tunnel_setup(struct net_device *dev)
1287{
1288	dev->netdev_ops		= &ipgre_netdev_ops;
1289	dev->destructor 	= ipgre_dev_free;
1290
1291	dev->type		= ARPHRD_IPGRE;
1292	dev->needed_headroom 	= LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1293	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1294	dev->flags		= IFF_NOARP;
1295	dev->iflink		= 0;
1296	dev->addr_len		= 4;
1297	dev->features		|= NETIF_F_NETNS_LOCAL;
1298	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
1299}
1300
1301static int ipgre_tunnel_init(struct net_device *dev)
1302{
1303	struct ip_tunnel *tunnel;
1304	struct iphdr *iph;
1305
1306	tunnel = netdev_priv(dev);
1307	iph = &tunnel->parms.iph;
 
1308
1309	tunnel->dev = dev;
1310	strcpy(tunnel->parms.name, dev->name);
1311
1312	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1313	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1314
1315	if (iph->daddr) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1316#ifdef CONFIG_NET_IPGRE_BROADCAST
1317		if (ipv4_is_multicast(iph->daddr)) {
1318			if (!iph->saddr)
1319				return -EINVAL;
1320			dev->flags = IFF_BROADCAST;
1321			dev->header_ops = &ipgre_header_ops;
 
 
1322		}
1323#endif
1324	} else
1325		dev->header_ops = &ipgre_header_ops;
 
 
 
1326
1327	dev->tstats = alloc_percpu(struct pcpu_tstats);
1328	if (!dev->tstats)
1329		return -ENOMEM;
1330
1331	return 0;
1332}
1333
1334static void ipgre_fb_tunnel_init(struct net_device *dev)
1335{
1336	struct ip_tunnel *tunnel = netdev_priv(dev);
1337	struct iphdr *iph = &tunnel->parms.iph;
1338
1339	tunnel->dev = dev;
1340	strcpy(tunnel->parms.name, dev->name);
1341
1342	iph->version		= 4;
1343	iph->protocol		= IPPROTO_GRE;
1344	iph->ihl		= 5;
1345	tunnel->hlen		= sizeof(struct iphdr) + 4;
1346
1347	dev_hold(dev);
1348}
1349
1350
1351static const struct gre_protocol ipgre_protocol = {
1352	.handler     = ipgre_rcv,
1353	.err_handler = ipgre_err,
1354};
1355
1356static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1357{
1358	int prio;
1359
1360	for (prio = 0; prio < 4; prio++) {
1361		int h;
1362		for (h = 0; h < HASH_SIZE; h++) {
1363			struct ip_tunnel *t;
1364
1365			t = rtnl_dereference(ign->tunnels[prio][h]);
1366
1367			while (t != NULL) {
1368				unregister_netdevice_queue(t->dev, head);
1369				t = rtnl_dereference(t->next);
1370			}
1371		}
1372	}
1373}
1374
1375static int __net_init ipgre_init_net(struct net *net)
1376{
1377	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1378	int err;
1379
1380	ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1381					   ipgre_tunnel_setup);
1382	if (!ign->fb_tunnel_dev) {
1383		err = -ENOMEM;
1384		goto err_alloc_dev;
1385	}
1386	dev_net_set(ign->fb_tunnel_dev, net);
1387
1388	ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1389	ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1390
1391	if ((err = register_netdev(ign->fb_tunnel_dev)))
1392		goto err_reg_dev;
1393
1394	rcu_assign_pointer(ign->tunnels_wc[0],
1395			   netdev_priv(ign->fb_tunnel_dev));
1396	return 0;
1397
1398err_reg_dev:
1399	ipgre_dev_free(ign->fb_tunnel_dev);
1400err_alloc_dev:
1401	return err;
1402}
1403
1404static void __net_exit ipgre_exit_net(struct net *net)
1405{
1406	struct ipgre_net *ign;
1407	LIST_HEAD(list);
1408
1409	ign = net_generic(net, ipgre_net_id);
1410	rtnl_lock();
1411	ipgre_destroy_tunnels(ign, &list);
1412	unregister_netdevice_many(&list);
1413	rtnl_unlock();
1414}
1415
1416static struct pernet_operations ipgre_net_ops = {
1417	.init = ipgre_init_net,
1418	.exit = ipgre_exit_net,
1419	.id   = &ipgre_net_id,
1420	.size = sizeof(struct ipgre_net),
1421};
1422
1423static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
 
1424{
1425	__be16 flags;
1426
1427	if (!data)
1428		return 0;
1429
1430	flags = 0;
1431	if (data[IFLA_GRE_IFLAGS])
1432		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1433	if (data[IFLA_GRE_OFLAGS])
1434		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1435	if (flags & (GRE_VERSION|GRE_ROUTING))
1436		return -EINVAL;
1437
 
 
 
 
 
1438	return 0;
1439}
1440
1441static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
 
1442{
1443	__be32 daddr;
1444
1445	if (tb[IFLA_ADDRESS]) {
1446		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1447			return -EINVAL;
1448		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1449			return -EADDRNOTAVAIL;
1450	}
1451
1452	if (!data)
1453		goto out;
1454
1455	if (data[IFLA_GRE_REMOTE]) {
1456		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1457		if (!daddr)
1458			return -EINVAL;
1459	}
1460
1461out:
1462	return ipgre_tunnel_validate(tb, data);
1463}
1464
1465static void ipgre_netlink_parms(struct nlattr *data[],
1466				struct ip_tunnel_parm *parms)
1467{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1468	memset(parms, 0, sizeof(*parms));
1469
1470	parms->iph.protocol = IPPROTO_GRE;
1471
1472	if (!data)
1473		return;
1474
1475	if (data[IFLA_GRE_LINK])
1476		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1477
1478	if (data[IFLA_GRE_IFLAGS])
1479		parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1480
1481	if (data[IFLA_GRE_OFLAGS])
1482		parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1483
1484	if (data[IFLA_GRE_IKEY])
1485		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1486
1487	if (data[IFLA_GRE_OKEY])
1488		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1489
1490	if (data[IFLA_GRE_LOCAL])
1491		parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1492
1493	if (data[IFLA_GRE_REMOTE])
1494		parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1495
1496	if (data[IFLA_GRE_TTL])
1497		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1498
1499	if (data[IFLA_GRE_TOS])
1500		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1501
1502	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
 
 
1503		parms->iph.frag_off = htons(IP_DF);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1504}
1505
1506static int ipgre_tap_init(struct net_device *dev)
 
 
 
 
1507{
1508	struct ip_tunnel *tunnel;
 
1509
1510	tunnel = netdev_priv(dev);
 
 
 
 
1511
1512	tunnel->dev = dev;
1513	strcpy(tunnel->parms.name, dev->name);
1514
1515	ipgre_tunnel_bind_dev(dev);
 
 
1516
1517	dev->tstats = alloc_percpu(struct pcpu_tstats);
1518	if (!dev->tstats)
1519		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1520
1521	return 0;
1522}
1523
1524static const struct net_device_ops ipgre_tap_netdev_ops = {
1525	.ndo_init		= ipgre_tap_init,
1526	.ndo_uninit		= ipgre_tunnel_uninit,
1527	.ndo_start_xmit		= ipgre_tunnel_xmit,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1528	.ndo_set_mac_address 	= eth_mac_addr,
1529	.ndo_validate_addr	= eth_validate_addr,
1530	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
1531	.ndo_get_stats64	= ipgre_get_stats64,
 
 
1532};
1533
1534static void ipgre_tap_setup(struct net_device *dev)
1535{
 
 
 
 
 
 
1536
1537	ether_setup(dev);
 
 
1538
1539	dev->netdev_ops		= &ipgre_tap_netdev_ops;
1540	dev->destructor 	= ipgre_dev_free;
 
 
1541
1542	dev->iflink		= 0;
1543	dev->features		|= NETIF_F_NETNS_LOCAL;
1544}
1545
1546static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1547			 struct nlattr *data[])
 
 
 
 
 
 
 
 
 
 
 
1548{
1549	struct ip_tunnel *nt;
1550	struct net *net = dev_net(dev);
1551	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1552	int mtu;
1553	int err;
 
 
1554
1555	nt = netdev_priv(dev);
1556	ipgre_netlink_parms(data, &nt->parms);
 
 
1557
1558	if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1559		return -EEXIST;
 
1560
1561	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1562		eth_hw_addr_random(dev);
 
1563
1564	mtu = ipgre_tunnel_bind_dev(dev);
1565	if (!tb[IFLA_MTU])
1566		dev->mtu = mtu;
1567
1568	/* Can use a lockless transmit, unless we generate output sequences */
1569	if (!(nt->parms.o_flags & GRE_SEQ))
1570		dev->features |= NETIF_F_LLTX;
 
 
 
 
1571
1572	err = register_netdevice(dev);
1573	if (err)
1574		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575
1576	dev_hold(dev);
1577	ipgre_tunnel_link(ign, nt);
 
1578
1579out:
1580	return err;
 
 
1581}
1582
1583static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1584			    struct nlattr *data[])
 
1585{
1586	struct ip_tunnel *t, *nt;
1587	struct net *net = dev_net(dev);
1588	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1589	struct ip_tunnel_parm p;
1590	int mtu;
1591
1592	if (dev == ign->fb_tunnel_dev)
1593		return -EINVAL;
 
 
 
 
 
 
 
 
 
1594
1595	nt = netdev_priv(dev);
1596	ipgre_netlink_parms(data, &p);
1597
1598	t = ipgre_tunnel_locate(net, &p, 0);
1599
1600	if (t) {
1601		if (t->dev != dev)
1602			return -EEXIST;
1603	} else {
1604		t = nt;
1605
1606		if (dev->type != ARPHRD_ETHER) {
1607			unsigned int nflags = 0;
 
 
 
 
 
 
1608
1609			if (ipv4_is_multicast(p.iph.daddr))
1610				nflags = IFF_BROADCAST;
1611			else if (p.iph.daddr)
1612				nflags = IFF_POINTOPOINT;
1613
1614			if ((dev->flags ^ nflags) &
1615			    (IFF_POINTOPOINT | IFF_BROADCAST))
1616				return -EINVAL;
1617		}
1618
1619		ipgre_tunnel_unlink(ign, t);
1620		t->parms.iph.saddr = p.iph.saddr;
1621		t->parms.iph.daddr = p.iph.daddr;
1622		t->parms.i_key = p.i_key;
1623		if (dev->type != ARPHRD_ETHER) {
1624			memcpy(dev->dev_addr, &p.iph.saddr, 4);
1625			memcpy(dev->broadcast, &p.iph.daddr, 4);
1626		}
1627		ipgre_tunnel_link(ign, t);
1628		netdev_state_change(dev);
1629	}
1630
1631	t->parms.o_key = p.o_key;
1632	t->parms.iph.ttl = p.iph.ttl;
1633	t->parms.iph.tos = p.iph.tos;
1634	t->parms.iph.frag_off = p.iph.frag_off;
1635
1636	if (t->parms.link != p.link) {
1637		t->parms.link = p.link;
1638		mtu = ipgre_tunnel_bind_dev(dev);
1639		if (!tb[IFLA_MTU])
1640			dev->mtu = mtu;
1641		netdev_state_change(dev);
1642	}
1643
1644	return 0;
1645}
1646
1647static size_t ipgre_get_size(const struct net_device *dev)
1648{
1649	return
1650		/* IFLA_GRE_LINK */
1651		nla_total_size(4) +
1652		/* IFLA_GRE_IFLAGS */
1653		nla_total_size(2) +
1654		/* IFLA_GRE_OFLAGS */
1655		nla_total_size(2) +
1656		/* IFLA_GRE_IKEY */
1657		nla_total_size(4) +
1658		/* IFLA_GRE_OKEY */
1659		nla_total_size(4) +
1660		/* IFLA_GRE_LOCAL */
1661		nla_total_size(4) +
1662		/* IFLA_GRE_REMOTE */
1663		nla_total_size(4) +
1664		/* IFLA_GRE_TTL */
1665		nla_total_size(1) +
1666		/* IFLA_GRE_TOS */
1667		nla_total_size(1) +
1668		/* IFLA_GRE_PMTUDISC */
1669		nla_total_size(1) +
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1670		0;
1671}
1672
1673static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1674{
1675	struct ip_tunnel *t = netdev_priv(dev);
1676	struct ip_tunnel_parm *p = &t->parms;
 
1677
1678	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1679	    nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1680	    nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
 
 
1681	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1682	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1683	    nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1684	    nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1685	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1686	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1687	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1688		       !!(p->iph.frag_off & htons(IP_DF))))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689		goto nla_put_failure;
 
 
 
 
 
 
1690	return 0;
1691
1692nla_put_failure:
1693	return -EMSGSIZE;
1694}
1695
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1696static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1697	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1698	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1699	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1700	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1701	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1702	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1703	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1704	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1705	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1706	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
 
 
 
 
 
 
 
 
 
 
 
1707};
1708
1709static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1710	.kind		= "gre",
1711	.maxtype	= IFLA_GRE_MAX,
1712	.policy		= ipgre_policy,
1713	.priv_size	= sizeof(struct ip_tunnel),
1714	.setup		= ipgre_tunnel_setup,
1715	.validate	= ipgre_tunnel_validate,
1716	.newlink	= ipgre_newlink,
1717	.changelink	= ipgre_changelink,
 
1718	.get_size	= ipgre_get_size,
1719	.fill_info	= ipgre_fill_info,
 
1720};
1721
1722static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1723	.kind		= "gretap",
1724	.maxtype	= IFLA_GRE_MAX,
1725	.policy		= ipgre_policy,
1726	.priv_size	= sizeof(struct ip_tunnel),
1727	.setup		= ipgre_tap_setup,
1728	.validate	= ipgre_tap_validate,
1729	.newlink	= ipgre_newlink,
1730	.changelink	= ipgre_changelink,
 
1731	.get_size	= ipgre_get_size,
1732	.fill_info	= ipgre_fill_info,
 
1733};
1734
1735/*
1736 *	And now the modules code and kernel interface.
1737 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1738
1739static int __init ipgre_init(void)
1740{
1741	int err;
1742
1743	pr_info("GRE over IPv4 tunneling driver\n");
1744
1745	err = register_pernet_device(&ipgre_net_ops);
1746	if (err < 0)
1747		return err;
1748
 
 
 
 
 
 
 
 
1749	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1750	if (err < 0) {
1751		pr_info("%s: can't add protocol\n", __func__);
1752		goto add_proto_failed;
1753	}
1754
1755	err = rtnl_link_register(&ipgre_link_ops);
1756	if (err < 0)
1757		goto rtnl_link_failed;
1758
1759	err = rtnl_link_register(&ipgre_tap_ops);
1760	if (err < 0)
1761		goto tap_ops_failed;
1762
1763out:
1764	return err;
 
 
 
1765
 
 
1766tap_ops_failed:
1767	rtnl_link_unregister(&ipgre_link_ops);
1768rtnl_link_failed:
1769	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1770add_proto_failed:
 
 
 
 
1771	unregister_pernet_device(&ipgre_net_ops);
1772	goto out;
1773}
1774
1775static void __exit ipgre_fini(void)
1776{
1777	rtnl_link_unregister(&ipgre_tap_ops);
1778	rtnl_link_unregister(&ipgre_link_ops);
1779	if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1780		pr_info("%s: can't remove protocol\n", __func__);
 
1781	unregister_pernet_device(&ipgre_net_ops);
 
1782}
1783
1784module_init(ipgre_init);
1785module_exit(ipgre_fini);
 
1786MODULE_LICENSE("GPL");
1787MODULE_ALIAS_RTNL_LINK("gre");
1788MODULE_ALIAS_RTNL_LINK("gretap");
 
1789MODULE_ALIAS_NETDEV("gre0");