Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
 
   1/*
   2 *	Linux NET3:	GRE over IP protocol decoder.
   3 *
   4 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
   5 *
   6 *	This program is free software; you can redistribute it and/or
   7 *	modify it under the terms of the GNU General Public License
   8 *	as published by the Free Software Foundation; either version
   9 *	2 of the License, or (at your option) any later version.
  10 *
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/capability.h>
  16#include <linux/module.h>
  17#include <linux/types.h>
  18#include <linux/kernel.h>
  19#include <linux/slab.h>
  20#include <linux/uaccess.h>
  21#include <linux/skbuff.h>
  22#include <linux/netdevice.h>
  23#include <linux/in.h>
  24#include <linux/tcp.h>
  25#include <linux/udp.h>
  26#include <linux/if_arp.h>
  27#include <linux/if_vlan.h>
  28#include <linux/init.h>
  29#include <linux/in6.h>
  30#include <linux/inetdevice.h>
  31#include <linux/igmp.h>
  32#include <linux/netfilter_ipv4.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_ether.h>
  35
  36#include <net/sock.h>
  37#include <net/ip.h>
  38#include <net/icmp.h>
  39#include <net/protocol.h>
  40#include <net/ip_tunnels.h>
  41#include <net/arp.h>
  42#include <net/checksum.h>
  43#include <net/dsfield.h>
  44#include <net/inet_ecn.h>
  45#include <net/xfrm.h>
  46#include <net/net_namespace.h>
  47#include <net/netns/generic.h>
  48#include <net/rtnetlink.h>
  49#include <net/gre.h>
  50#include <net/dst_metadata.h>
  51#include <net/erspan.h>
 
  52
  53/*
  54   Problems & solutions
  55   --------------------
  56
  57   1. The most important issue is detecting local dead loops.
  58   They would cause complete host lockup in transmit, which
  59   would be "resolved" by stack overflow or, if queueing is enabled,
  60   with infinite looping in net_bh.
  61
  62   We cannot track such dead loops during route installation,
  63   it is infeasible task. The most general solutions would be
  64   to keep skb->encapsulation counter (sort of local ttl),
  65   and silently drop packet when it expires. It is a good
  66   solution, but it supposes maintaining new variable in ALL
  67   skb, even if no tunneling is used.
  68
  69   Current solution: xmit_recursion breaks dead loops. This is a percpu
  70   counter, since when we enter the first ndo_xmit(), cpu migration is
  71   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  72
  73   2. Networking dead loops would not kill routers, but would really
  74   kill network. IP hop limit plays role of "t->recursion" in this case,
  75   if we copy it from packet being encapsulated to upper header.
  76   It is very good solution, but it introduces two problems:
  77
  78   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  79     do not work over tunnels.
  80   - traceroute does not work. I planned to relay ICMP from tunnel,
  81     so that this problem would be solved and traceroute output
  82     would even more informative. This idea appeared to be wrong:
  83     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  84     true router now :-)), all routers (at least, in neighbourhood of mine)
  85     return only 8 bytes of payload. It is the end.
  86
  87   Hence, if we want that OSPF worked or traceroute said something reasonable,
  88   we should search for another solution.
  89
  90   One of them is to parse packet trying to detect inner encapsulation
  91   made by our node. It is difficult or even impossible, especially,
  92   taking into account fragmentation. TO be short, ttl is not solution at all.
  93
  94   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  95   We force DF flag on tunnels with preconfigured hop limit,
  96   that is ALL. :-) Well, it does not remove the problem completely,
  97   but exponential growth of network traffic is changed to linear
  98   (branches, that exceed pmtu are pruned) and tunnel mtu
  99   rapidly degrades to value <68, where looping stops.
 100   Yes, it is not good if there exists a router in the loop,
 101   which does not force DF, even when encapsulating packets have DF set.
 102   But it is not our problem! Nobody could accuse us, we made
 103   all that we could make. Even if it is your gated who injected
 104   fatal route to network, even if it were you who configured
 105   fatal static route: you are innocent. :-)
 106
 107   Alexey Kuznetsov.
 108 */
 109
 110static bool log_ecn_error = true;
 111module_param(log_ecn_error, bool, 0644);
 112MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 113
 114static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 
 
 115static int ipgre_tunnel_init(struct net_device *dev);
 116static void erspan_build_header(struct sk_buff *skb,
 117				u32 id, u32 index,
 118				bool truncate, bool is_ipv4);
 119
 120static unsigned int ipgre_net_id __read_mostly;
 121static unsigned int gre_tap_net_id __read_mostly;
 122static unsigned int erspan_net_id __read_mostly;
 123
 124static void ipgre_err(struct sk_buff *skb, u32 info,
 125		      const struct tnl_ptk_info *tpi)
 126{
 127
 128	/* All the routers (except for Linux) return only
 129	   8 bytes of packet payload. It means, that precise relaying of
 130	   ICMP in the real Internet is absolutely infeasible.
 131
 132	   Moreover, Cisco "wise men" put GRE key to the third word
 133	   in GRE header. It makes impossible maintaining even soft
 134	   state for keyed GRE tunnels with enabled checksum. Tell
 135	   them "thank you".
 136
 137	   Well, I wonder, rfc1812 was written by Cisco employee,
 138	   what the hell these idiots break standards established
 139	   by themselves???
 140	   */
 141	struct net *net = dev_net(skb->dev);
 142	struct ip_tunnel_net *itn;
 143	const struct iphdr *iph;
 144	const int type = icmp_hdr(skb)->type;
 145	const int code = icmp_hdr(skb)->code;
 146	unsigned int data_len = 0;
 147	struct ip_tunnel *t;
 148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149	switch (type) {
 150	default:
 151	case ICMP_PARAMETERPROB:
 152		return;
 153
 154	case ICMP_DEST_UNREACH:
 155		switch (code) {
 156		case ICMP_SR_FAILED:
 157		case ICMP_PORT_UNREACH:
 158			/* Impossible event. */
 159			return;
 160		default:
 161			/* All others are translated to HOST_UNREACH.
 162			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 163			   I believe they are just ether pollution. --ANK
 164			 */
 165			break;
 166		}
 167		break;
 168
 169	case ICMP_TIME_EXCEEDED:
 170		if (code != ICMP_EXC_TTL)
 171			return;
 172		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
 173		break;
 174
 175	case ICMP_REDIRECT:
 176		break;
 177	}
 178
 179	if (tpi->proto == htons(ETH_P_TEB))
 180		itn = net_generic(net, gre_tap_net_id);
 181	else
 182		itn = net_generic(net, ipgre_net_id);
 183
 184	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 185	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 186			     iph->daddr, iph->saddr, tpi->key);
 187
 188	if (!t)
 189		return;
 190
 191#if IS_ENABLED(CONFIG_IPV6)
 192       if (tpi->proto == htons(ETH_P_IPV6) &&
 193           !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 194				       type, data_len))
 195               return;
 196#endif
 197
 198	if (t->parms.iph.daddr == 0 ||
 199	    ipv4_is_multicast(t->parms.iph.daddr))
 200		return;
 201
 202	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 203		return;
 204
 205	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 206		t->err_count++;
 207	else
 208		t->err_count = 1;
 209	t->err_time = jiffies;
 
 
 210}
 211
 212static void gre_err(struct sk_buff *skb, u32 info)
 213{
 214	/* All the routers (except for Linux) return only
 215	 * 8 bytes of packet payload. It means, that precise relaying of
 216	 * ICMP in the real Internet is absolutely infeasible.
 217	 *
 218	 * Moreover, Cisco "wise men" put GRE key to the third word
 219	 * in GRE header. It makes impossible maintaining even soft
 220	 * state for keyed
 221	 * GRE tunnels with enabled checksum. Tell them "thank you".
 222	 *
 223	 * Well, I wonder, rfc1812 was written by Cisco employee,
 224	 * what the hell these idiots break standards established
 225	 * by themselves???
 226	 */
 227
 228	const struct iphdr *iph = (struct iphdr *)skb->data;
 229	const int type = icmp_hdr(skb)->type;
 230	const int code = icmp_hdr(skb)->code;
 231	struct tnl_ptk_info tpi;
 232	bool csum_err = false;
 233
 234	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
 235			     iph->ihl * 4) < 0) {
 236		if (!csum_err)		/* ignore csum errors. */
 237			return;
 238	}
 239
 240	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 241		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 242				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
 243		return;
 244	}
 245	if (type == ICMP_REDIRECT) {
 246		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
 247			      IPPROTO_GRE, 0);
 248		return;
 249	}
 250
 251	ipgre_err(skb, info, &tpi);
 252}
 253
 
 
 
 
 
 
 
 
 
 254static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 255		      int gre_hdr_len)
 256{
 257	struct net *net = dev_net(skb->dev);
 258	struct metadata_dst *tun_dst = NULL;
 259	struct erspan_base_hdr *ershdr;
 260	struct erspan_metadata *pkt_md;
 261	struct ip_tunnel_net *itn;
 262	struct ip_tunnel *tunnel;
 263	const struct iphdr *iph;
 264	struct erspan_md2 *md2;
 265	int ver;
 266	int len;
 267
 268	itn = net_generic(net, erspan_net_id);
 269	len = gre_hdr_len + sizeof(*ershdr);
 270
 271	/* Check based hdr len */
 272	if (unlikely(!pskb_may_pull(skb, len)))
 273		return PACKET_REJECT;
 274
 
 275	iph = ip_hdr(skb);
 276	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 277	ver = ershdr->ver;
 
 
 
 
 
 
 
 278
 279	/* The original GRE header does not have key field,
 280	 * Use ERSPAN 10-bit session ID as key.
 281	 */
 282	tpi->key = cpu_to_be32(get_session_id(ershdr));
 283	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 284				  tpi->flags | TUNNEL_KEY,
 285				  iph->saddr, iph->daddr, tpi->key);
 286
 287	if (tunnel) {
 288		len = gre_hdr_len + erspan_hdr_len(ver);
 
 
 
 
 289		if (unlikely(!pskb_may_pull(skb, len)))
 290			return PACKET_REJECT;
 291
 292		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 293		pkt_md = (struct erspan_metadata *)(ershdr + 1);
 294
 295		if (__iptunnel_pull_header(skb,
 296					   len,
 297					   htons(ETH_P_TEB),
 298					   false, false) < 0)
 299			goto drop;
 300
 301		if (tunnel->collect_md) {
 
 302			struct ip_tunnel_info *info;
 303			struct erspan_metadata *md;
 304			__be64 tun_id;
 305			__be16 flags;
 306
 307			tpi->flags |= TUNNEL_KEY;
 308			flags = tpi->flags;
 309			tun_id = key32_to_tunnel_id(tpi->key);
 310
 311			tun_dst = ip_tun_rx_dst(skb, flags,
 312						tun_id, sizeof(*md));
 313			if (!tun_dst)
 314				return PACKET_REJECT;
 315
 
 
 
 
 
 
 
 
 316			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 317			md->version = ver;
 318			md2 = &md->u.md2;
 319			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
 320						       ERSPAN_V2_MDSIZE);
 321
 322			info = &tun_dst->u.tun_info;
 323			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
 
 324			info->options_len = sizeof(*md);
 325		}
 326
 327		skb_reset_mac_header(skb);
 328		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 329		return PACKET_RCVD;
 330	}
 
 
 331drop:
 332	kfree_skb(skb);
 333	return PACKET_RCVD;
 334}
 335
 336static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 337		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 338{
 339	struct metadata_dst *tun_dst = NULL;
 340	const struct iphdr *iph;
 341	struct ip_tunnel *tunnel;
 342
 343	iph = ip_hdr(skb);
 344	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 345				  iph->saddr, iph->daddr, tpi->key);
 346
 347	if (tunnel) {
 
 
 348		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
 349					   raw_proto, false) < 0)
 350			goto drop;
 351
 352		if (tunnel->dev->type != ARPHRD_NONE)
 
 
 
 353			skb_pop_mac_header(skb);
 354		else
 355			skb_reset_mac_header(skb);
 356		if (tunnel->collect_md) {
 357			__be16 flags;
 
 
 358			__be64 tun_id;
 359
 360			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
 
 
 
 361			tun_id = key32_to_tunnel_id(tpi->key);
 362			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 363			if (!tun_dst)
 364				return PACKET_REJECT;
 365		}
 366
 367		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 368		return PACKET_RCVD;
 369	}
 370	return PACKET_NEXT;
 371
 372drop:
 373	kfree_skb(skb);
 374	return PACKET_RCVD;
 375}
 376
 377static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 378		     int hdr_len)
 379{
 380	struct net *net = dev_net(skb->dev);
 381	struct ip_tunnel_net *itn;
 382	int res;
 383
 384	if (tpi->proto == htons(ETH_P_TEB))
 385		itn = net_generic(net, gre_tap_net_id);
 386	else
 387		itn = net_generic(net, ipgre_net_id);
 388
 389	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
 390	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
 391		/* ipgre tunnels in collect metadata mode should receive
 392		 * also ETH_P_TEB traffic.
 393		 */
 394		itn = net_generic(net, ipgre_net_id);
 395		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
 396	}
 397	return res;
 398}
 399
 400static int gre_rcv(struct sk_buff *skb)
 401{
 402	struct tnl_ptk_info tpi;
 403	bool csum_err = false;
 404	int hdr_len;
 405
 406#ifdef CONFIG_NET_IPGRE_BROADCAST
 407	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 408		/* Looped back packet, drop it! */
 409		if (rt_is_output_route(skb_rtable(skb)))
 410			goto drop;
 411	}
 412#endif
 413
 414	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 415	if (hdr_len < 0)
 416		goto drop;
 417
 418	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
 419		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 420		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 421			return 0;
 422		goto out;
 423	}
 424
 425	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 426		return 0;
 427
 428out:
 429	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 430drop:
 431	kfree_skb(skb);
 432	return 0;
 433}
 434
 435static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 436		       const struct iphdr *tnl_params,
 437		       __be16 proto)
 438{
 439	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 440
 441	if (tunnel->parms.o_flags & TUNNEL_SEQ)
 442		tunnel->o_seqno++;
 443
 444	/* Push GRE header. */
 445	gre_build_header(skb, tunnel->tun_hlen,
 446			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
 447			 htonl(tunnel->o_seqno));
 
 448
 449	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 450}
 451
 452static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 453{
 454	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 455}
 456
 457static struct rtable *gre_get_rt(struct sk_buff *skb,
 458				 struct net_device *dev,
 459				 struct flowi4 *fl,
 460				 const struct ip_tunnel_key *key)
 461{
 462	struct net *net = dev_net(dev);
 463
 464	memset(fl, 0, sizeof(*fl));
 465	fl->daddr = key->u.ipv4.dst;
 466	fl->saddr = key->u.ipv4.src;
 467	fl->flowi4_tos = RT_TOS(key->tos);
 468	fl->flowi4_mark = skb->mark;
 469	fl->flowi4_proto = IPPROTO_GRE;
 470
 471	return ip_route_output_key(net, fl);
 472}
 473
 474static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
 475				      struct net_device *dev,
 476				      struct flowi4 *fl,
 477				      int tunnel_hlen)
 478{
 479	struct ip_tunnel_info *tun_info;
 480	const struct ip_tunnel_key *key;
 481	struct rtable *rt = NULL;
 482	int min_headroom;
 483	bool use_cache;
 484	int err;
 485
 486	tun_info = skb_tunnel_info(skb);
 487	key = &tun_info->key;
 488	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
 489
 490	if (use_cache)
 491		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
 492	if (!rt) {
 493		rt = gre_get_rt(skb, dev, fl, key);
 494		if (IS_ERR(rt))
 495			goto err_free_skb;
 496		if (use_cache)
 497			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
 498					  fl->saddr);
 499	}
 500
 501	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
 502			+ tunnel_hlen + sizeof(struct iphdr);
 503	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
 504		int head_delta = SKB_DATA_ALIGN(min_headroom -
 505						skb_headroom(skb) +
 506						16);
 507		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
 508				       0, GFP_ATOMIC);
 509		if (unlikely(err))
 510			goto err_free_rt;
 511	}
 512	return rt;
 513
 514err_free_rt:
 515	ip_rt_put(rt);
 516err_free_skb:
 517	kfree_skb(skb);
 518	dev->stats.tx_dropped++;
 519	return NULL;
 520}
 521
 522static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 523			__be16 proto)
 524{
 525	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 526	struct ip_tunnel_info *tun_info;
 527	const struct ip_tunnel_key *key;
 528	struct rtable *rt = NULL;
 529	struct flowi4 fl;
 530	int tunnel_hlen;
 531	__be16 df, flags;
 532
 533	tun_info = skb_tunnel_info(skb);
 534	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 535		     ip_tunnel_info_af(tun_info) != AF_INET))
 536		goto err_free_skb;
 537
 538	key = &tun_info->key;
 539	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 540
 541	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
 542	if (!rt)
 543		return;
 544
 545	/* Push Tunnel header. */
 546	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
 547		goto err_free_rt;
 
 
 
 
 
 
 548
 549	flags = tun_info->key.tun_flags &
 550		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
 551	gre_build_header(skb, tunnel_hlen, flags, proto,
 552			 tunnel_id_to_key32(tun_info->key.tun_id),
 553			 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
 
 554
 555	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
 556
 557	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
 558		      key->tos, key->ttl, df, false);
 559	return;
 560
 561err_free_rt:
 562	ip_rt_put(rt);
 563err_free_skb:
 564	kfree_skb(skb);
 565	dev->stats.tx_dropped++;
 566}
 567
 568static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 569			   __be16 proto)
 570{
 571	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 572	struct ip_tunnel_info *tun_info;
 573	const struct ip_tunnel_key *key;
 574	struct erspan_metadata *md;
 575	struct rtable *rt = NULL;
 576	bool truncate = false;
 577	struct flowi4 fl;
 578	int tunnel_hlen;
 579	int version;
 580	__be16 df;
 581
 582	tun_info = skb_tunnel_info(skb);
 583	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 584		     ip_tunnel_info_af(tun_info) != AF_INET))
 585		goto err_free_skb;
 586
 587	key = &tun_info->key;
 
 
 
 
 588	md = ip_tunnel_info_opts(tun_info);
 589	if (!md)
 590		goto err_free_rt;
 591
 592	/* ERSPAN has fixed 8 byte GRE header */
 593	version = md->version;
 594	tunnel_hlen = 8 + erspan_hdr_len(version);
 595
 596	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
 597	if (!rt)
 598		return;
 599
 600	if (gre_handle_offloads(skb, false))
 601		goto err_free_rt;
 602
 603	if (skb->len > dev->mtu + dev->hard_header_len) {
 604		pskb_trim(skb, dev->mtu + dev->hard_header_len);
 
 605		truncate = true;
 606	}
 607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 608	if (version == 1) {
 609		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 610				    ntohl(md->u.index), truncate, true);
 
 611	} else if (version == 2) {
 612		erspan_build_header_v2(skb,
 613				       ntohl(tunnel_id_to_key32(key->tun_id)),
 614				       md->u.md2.dir,
 615				       get_hwid(&md->u.md2),
 616				       truncate, true);
 
 617	} else {
 618		goto err_free_rt;
 619	}
 620
 621	gre_build_header(skb, 8, TUNNEL_SEQ,
 622			 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
 
 623
 624	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
 625
 626	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
 627		      key->tos, key->ttl, df, false);
 628	return;
 629
 630err_free_rt:
 631	ip_rt_put(rt);
 632err_free_skb:
 633	kfree_skb(skb);
 634	dev->stats.tx_dropped++;
 635}
 636
 637static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 638{
 639	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
 640	struct rtable *rt;
 641	struct flowi4 fl4;
 642
 643	if (ip_tunnel_info_af(info) != AF_INET)
 644		return -EINVAL;
 645
 646	rt = gre_get_rt(skb, dev, &fl4, &info->key);
 
 
 
 
 
 647	if (IS_ERR(rt))
 648		return PTR_ERR(rt);
 649
 650	ip_rt_put(rt);
 651	info->key.u.ipv4.src = fl4.saddr;
 652	return 0;
 653}
 654
 655static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 656			      struct net_device *dev)
 657{
 658	struct ip_tunnel *tunnel = netdev_priv(dev);
 659	const struct iphdr *tnl_params;
 660
 
 
 
 661	if (tunnel->collect_md) {
 662		gre_fb_xmit(skb, dev, skb->protocol);
 663		return NETDEV_TX_OK;
 664	}
 665
 666	if (dev->header_ops) {
 667		/* Need space for new headers */
 668		if (skb_cow_head(skb, dev->needed_headroom -
 669				      (tunnel->hlen + sizeof(struct iphdr))))
 
 
 
 670			goto free_skb;
 671
 672		tnl_params = (const struct iphdr *)skb->data;
 673
 674		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
 675		 * to gre header.
 676		 */
 677		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
 678		skb_reset_mac_header(skb);
 
 
 
 
 679	} else {
 680		if (skb_cow_head(skb, dev->needed_headroom))
 681			goto free_skb;
 682
 683		tnl_params = &tunnel->parms.iph;
 684	}
 685
 686	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 
 687		goto free_skb;
 688
 689	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 690	return NETDEV_TX_OK;
 691
 692free_skb:
 693	kfree_skb(skb);
 694	dev->stats.tx_dropped++;
 695	return NETDEV_TX_OK;
 696}
 697
 698static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 699			       struct net_device *dev)
 700{
 701	struct ip_tunnel *tunnel = netdev_priv(dev);
 702	bool truncate = false;
 
 
 
 
 703
 704	if (tunnel->collect_md) {
 705		erspan_fb_xmit(skb, dev, skb->protocol);
 706		return NETDEV_TX_OK;
 707	}
 708
 709	if (gre_handle_offloads(skb, false))
 710		goto free_skb;
 711
 712	if (skb_cow_head(skb, dev->needed_headroom))
 713		goto free_skb;
 714
 715	if (skb->len > dev->mtu + dev->hard_header_len) {
 716		pskb_trim(skb, dev->mtu + dev->hard_header_len);
 
 717		truncate = true;
 718	}
 719
 720	/* Push ERSPAN header */
 721	if (tunnel->erspan_ver == 1)
 
 
 
 722		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 723				    tunnel->index,
 724				    truncate, true);
 725	else if (tunnel->erspan_ver == 2)
 
 726		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 727				       tunnel->dir, tunnel->hwid,
 728				       truncate, true);
 729	else
 
 730		goto free_skb;
 
 731
 732	tunnel->parms.o_flags &= ~TUNNEL_KEY;
 733	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
 734	return NETDEV_TX_OK;
 735
 736free_skb:
 737	kfree_skb(skb);
 738	dev->stats.tx_dropped++;
 739	return NETDEV_TX_OK;
 740}
 741
 742static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 743				struct net_device *dev)
 744{
 745	struct ip_tunnel *tunnel = netdev_priv(dev);
 746
 
 
 
 747	if (tunnel->collect_md) {
 748		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 749		return NETDEV_TX_OK;
 750	}
 751
 752	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 
 753		goto free_skb;
 754
 755	if (skb_cow_head(skb, dev->needed_headroom))
 756		goto free_skb;
 757
 758	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 759	return NETDEV_TX_OK;
 760
 761free_skb:
 762	kfree_skb(skb);
 763	dev->stats.tx_dropped++;
 764	return NETDEV_TX_OK;
 765}
 766
 767static void ipgre_link_update(struct net_device *dev, bool set_mtu)
 768{
 769	struct ip_tunnel *tunnel = netdev_priv(dev);
 770	int len;
 771
 772	len = tunnel->tun_hlen;
 773	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 774	len = tunnel->tun_hlen - len;
 775	tunnel->hlen = tunnel->hlen + len;
 776
 777	dev->needed_headroom = dev->needed_headroom + len;
 
 
 
 
 778	if (set_mtu)
 779		dev->mtu = max_t(int, dev->mtu - len, 68);
 780
 781	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
 782		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
 783		    tunnel->encap.type == TUNNEL_ENCAP_NONE) {
 784			dev->features |= NETIF_F_GSO_SOFTWARE;
 785			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 786		} else {
 787			dev->features &= ~NETIF_F_GSO_SOFTWARE;
 788			dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 789		}
 790		dev->features |= NETIF_F_LLTX;
 791	} else {
 792		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 793		dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
 
 
 794	}
 795}
 796
 797static int ipgre_tunnel_ioctl(struct net_device *dev,
 798			      struct ifreq *ifr, int cmd)
 
 799{
 800	struct ip_tunnel_parm p;
 801	int err;
 802
 803	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 804		return -EFAULT;
 
 
 
 
 805
 806	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 807		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
 808		    p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
 809		    ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
 810			return -EINVAL;
 811	}
 812
 813	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
 814	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
 815
 816	err = ip_tunnel_ioctl(dev, &p, cmd);
 817	if (err)
 818		return err;
 819
 820	if (cmd == SIOCCHGTUNNEL) {
 821		struct ip_tunnel *t = netdev_priv(dev);
 822
 823		t->parms.i_flags = p.i_flags;
 824		t->parms.o_flags = p.o_flags;
 825
 826		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
 827			ipgre_link_update(dev, true);
 828	}
 829
 830	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
 831	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
 832
 833	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
 834		return -EFAULT;
 835
 836	return 0;
 837}
 838
 839/* Nice toy. Unfortunately, useless in real life :-)
 840   It allows to construct virtual multiprotocol broadcast "LAN"
 841   over the Internet, provided multicast routing is tuned.
 842
 843
 844   I have no idea was this bicycle invented before me,
 845   so that I had to set ARPHRD_IPGRE to a random value.
 846   I have an impression, that Cisco could make something similar,
 847   but this feature is apparently missing in IOS<=11.2(8).
 848
 849   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 850   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 851
 852   ping -t 255 224.66.66.66
 853
 854   If nobody answers, mbone does not work.
 855
 856   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 857   ip addr add 10.66.66.<somewhat>/24 dev Universe
 858   ifconfig Universe up
 859   ifconfig Universe add fe80::<Your_real_addr>/10
 860   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 861   ftp 10.66.66.66
 862   ...
 863   ftp fec0:6666:6666::193.233.7.65
 864   ...
 865 */
 866static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 867			unsigned short type,
 868			const void *daddr, const void *saddr, unsigned int len)
 869{
 870	struct ip_tunnel *t = netdev_priv(dev);
 871	struct iphdr *iph;
 872	struct gre_base_hdr *greh;
 873
 874	iph = skb_push(skb, t->hlen + sizeof(*iph));
 875	greh = (struct gre_base_hdr *)(iph+1);
 876	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 877	greh->protocol = htons(type);
 878
 879	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 880
 881	/* Set the source hardware address. */
 882	if (saddr)
 883		memcpy(&iph->saddr, saddr, 4);
 884	if (daddr)
 885		memcpy(&iph->daddr, daddr, 4);
 886	if (iph->daddr)
 887		return t->hlen + sizeof(*iph);
 888
 889	return -(t->hlen + sizeof(*iph));
 890}
 891
 892static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 893{
 894	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 895	memcpy(haddr, &iph->saddr, 4);
 896	return 4;
 897}
 898
 899static const struct header_ops ipgre_header_ops = {
 900	.create	= ipgre_header,
 901	.parse	= ipgre_header_parse,
 902};
 903
 904#ifdef CONFIG_NET_IPGRE_BROADCAST
 905static int ipgre_open(struct net_device *dev)
 906{
 907	struct ip_tunnel *t = netdev_priv(dev);
 908
 909	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 910		struct flowi4 fl4;
 911		struct rtable *rt;
 912
 913		rt = ip_route_output_gre(t->net, &fl4,
 914					 t->parms.iph.daddr,
 915					 t->parms.iph.saddr,
 916					 t->parms.o_key,
 917					 RT_TOS(t->parms.iph.tos),
 918					 t->parms.link);
 919		if (IS_ERR(rt))
 920			return -EADDRNOTAVAIL;
 921		dev = rt->dst.dev;
 922		ip_rt_put(rt);
 923		if (!__in_dev_get_rtnl(dev))
 924			return -EADDRNOTAVAIL;
 925		t->mlink = dev->ifindex;
 926		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 927	}
 928	return 0;
 929}
 930
 931static int ipgre_close(struct net_device *dev)
 932{
 933	struct ip_tunnel *t = netdev_priv(dev);
 934
 935	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 936		struct in_device *in_dev;
 937		in_dev = inetdev_by_index(t->net, t->mlink);
 938		if (in_dev)
 939			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 940	}
 941	return 0;
 942}
 943#endif
 944
 945static const struct net_device_ops ipgre_netdev_ops = {
 946	.ndo_init		= ipgre_tunnel_init,
 947	.ndo_uninit		= ip_tunnel_uninit,
 948#ifdef CONFIG_NET_IPGRE_BROADCAST
 949	.ndo_open		= ipgre_open,
 950	.ndo_stop		= ipgre_close,
 951#endif
 952	.ndo_start_xmit		= ipgre_xmit,
 953	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
 954	.ndo_change_mtu		= ip_tunnel_change_mtu,
 955	.ndo_get_stats64	= ip_tunnel_get_stats64,
 956	.ndo_get_iflink		= ip_tunnel_get_iflink,
 
 957};
 958
 959#define GRE_FEATURES (NETIF_F_SG |		\
 960		      NETIF_F_FRAGLIST |	\
 961		      NETIF_F_HIGHDMA |		\
 962		      NETIF_F_HW_CSUM)
 963
 964static void ipgre_tunnel_setup(struct net_device *dev)
 965{
 966	dev->netdev_ops		= &ipgre_netdev_ops;
 967	dev->type		= ARPHRD_IPGRE;
 968	ip_tunnel_setup(dev, ipgre_net_id);
 969}
 970
 971static void __gre_tunnel_init(struct net_device *dev)
 972{
 973	struct ip_tunnel *tunnel;
 974	int t_hlen;
 975
 976	tunnel = netdev_priv(dev);
 977	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 978	tunnel->parms.iph.protocol = IPPROTO_GRE;
 979
 980	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 981
 982	t_hlen = tunnel->hlen + sizeof(struct iphdr);
 983
 984	dev->features		|= GRE_FEATURES;
 985	dev->hw_features	|= GRE_FEATURES;
 986
 987	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
 988		/* TCP offload with GRE SEQ is not supported, nor
 989		 * can we support 2 levels of outer headers requiring
 990		 * an update.
 991		 */
 992		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
 993		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
 994			dev->features    |= NETIF_F_GSO_SOFTWARE;
 995			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 996		}
 997
 998		/* Can use a lockless transmit, unless we generate
 999		 * output sequences
1000		 */
1001		dev->features |= NETIF_F_LLTX;
1002	}
1003}
1004
1005static int ipgre_tunnel_init(struct net_device *dev)
1006{
1007	struct ip_tunnel *tunnel = netdev_priv(dev);
1008	struct iphdr *iph = &tunnel->parms.iph;
1009
1010	__gre_tunnel_init(dev);
1011
1012	memcpy(dev->dev_addr, &iph->saddr, 4);
1013	memcpy(dev->broadcast, &iph->daddr, 4);
1014
1015	dev->flags		= IFF_NOARP;
1016	netif_keep_dst(dev);
1017	dev->addr_len		= 4;
1018
1019	if (iph->daddr && !tunnel->collect_md) {
1020#ifdef CONFIG_NET_IPGRE_BROADCAST
1021		if (ipv4_is_multicast(iph->daddr)) {
1022			if (!iph->saddr)
1023				return -EINVAL;
1024			dev->flags = IFF_BROADCAST;
1025			dev->header_ops = &ipgre_header_ops;
 
 
1026		}
1027#endif
1028	} else if (!tunnel->collect_md) {
1029		dev->header_ops = &ipgre_header_ops;
 
 
1030	}
1031
1032	return ip_tunnel_init(dev);
1033}
1034
1035static const struct gre_protocol ipgre_protocol = {
1036	.handler     = gre_rcv,
1037	.err_handler = gre_err,
1038};
1039
1040static int __net_init ipgre_init_net(struct net *net)
1041{
1042	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1043}
1044
1045static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
 
1046{
1047	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
 
1048}
1049
1050static struct pernet_operations ipgre_net_ops = {
1051	.init = ipgre_init_net,
1052	.exit_batch = ipgre_exit_batch_net,
1053	.id   = &ipgre_net_id,
1054	.size = sizeof(struct ip_tunnel_net),
1055};
1056
1057static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1058				 struct netlink_ext_ack *extack)
1059{
1060	__be16 flags;
1061
1062	if (!data)
1063		return 0;
1064
1065	flags = 0;
1066	if (data[IFLA_GRE_IFLAGS])
1067		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1068	if (data[IFLA_GRE_OFLAGS])
1069		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1070	if (flags & (GRE_VERSION|GRE_ROUTING))
1071		return -EINVAL;
1072
1073	if (data[IFLA_GRE_COLLECT_METADATA] &&
1074	    data[IFLA_GRE_ENCAP_TYPE] &&
1075	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1076		return -EINVAL;
1077
1078	return 0;
1079}
1080
1081static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1082			      struct netlink_ext_ack *extack)
1083{
1084	__be32 daddr;
1085
1086	if (tb[IFLA_ADDRESS]) {
1087		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1088			return -EINVAL;
1089		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1090			return -EADDRNOTAVAIL;
1091	}
1092
1093	if (!data)
1094		goto out;
1095
1096	if (data[IFLA_GRE_REMOTE]) {
1097		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1098		if (!daddr)
1099			return -EINVAL;
1100	}
1101
1102out:
1103	return ipgre_tunnel_validate(tb, data, extack);
1104}
1105
1106static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1107			   struct netlink_ext_ack *extack)
1108{
1109	__be16 flags = 0;
1110	int ret;
1111
1112	if (!data)
1113		return 0;
1114
1115	ret = ipgre_tap_validate(tb, data, extack);
1116	if (ret)
1117		return ret;
1118
1119	/* ERSPAN should only have GRE sequence and key flag */
 
 
 
 
1120	if (data[IFLA_GRE_OFLAGS])
1121		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1122	if (data[IFLA_GRE_IFLAGS])
1123		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1124	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1125	    flags != (GRE_SEQ | GRE_KEY))
1126		return -EINVAL;
1127
1128	/* ERSPAN Session ID only has 10-bit. Since we reuse
1129	 * 32-bit key field as ID, check it's range.
1130	 */
1131	if (data[IFLA_GRE_IKEY] &&
1132	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1133		return -EINVAL;
1134
1135	if (data[IFLA_GRE_OKEY] &&
1136	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1137		return -EINVAL;
1138
1139	return 0;
1140}
1141
1142static int ipgre_netlink_parms(struct net_device *dev,
1143				struct nlattr *data[],
1144				struct nlattr *tb[],
1145				struct ip_tunnel_parm *parms,
1146				__u32 *fwmark)
1147{
1148	struct ip_tunnel *t = netdev_priv(dev);
1149
1150	memset(parms, 0, sizeof(*parms));
1151
1152	parms->iph.protocol = IPPROTO_GRE;
1153
1154	if (!data)
1155		return 0;
1156
1157	if (data[IFLA_GRE_LINK])
1158		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1159
1160	if (data[IFLA_GRE_IFLAGS])
1161		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
 
1162
1163	if (data[IFLA_GRE_OFLAGS])
1164		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
 
1165
1166	if (data[IFLA_GRE_IKEY])
1167		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1168
1169	if (data[IFLA_GRE_OKEY])
1170		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1171
1172	if (data[IFLA_GRE_LOCAL])
1173		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1174
1175	if (data[IFLA_GRE_REMOTE])
1176		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1177
1178	if (data[IFLA_GRE_TTL])
1179		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1180
1181	if (data[IFLA_GRE_TOS])
1182		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1183
1184	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1185		if (t->ignore_df)
1186			return -EINVAL;
1187		parms->iph.frag_off = htons(IP_DF);
1188	}
1189
1190	if (data[IFLA_GRE_COLLECT_METADATA]) {
1191		t->collect_md = true;
1192		if (dev->type == ARPHRD_IPGRE)
1193			dev->type = ARPHRD_NONE;
1194	}
1195
1196	if (data[IFLA_GRE_IGNORE_DF]) {
1197		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1198		  && (parms->iph.frag_off & htons(IP_DF)))
1199			return -EINVAL;
1200		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1201	}
1202
1203	if (data[IFLA_GRE_FWMARK])
1204		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206	if (data[IFLA_GRE_ERSPAN_VER]) {
1207		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1208
1209		if (t->erspan_ver != 1 && t->erspan_ver != 2)
1210			return -EINVAL;
1211	}
1212
1213	if (t->erspan_ver == 1) {
1214		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1215			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1216			if (t->index & ~INDEX_MASK)
1217				return -EINVAL;
1218		}
1219	} else if (t->erspan_ver == 2) {
1220		if (data[IFLA_GRE_ERSPAN_DIR]) {
1221			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1222			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1223				return -EINVAL;
1224		}
1225		if (data[IFLA_GRE_ERSPAN_HWID]) {
1226			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1227			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1228				return -EINVAL;
1229		}
1230	}
1231
1232	return 0;
1233}
1234
1235/* This function returns true when ENCAP attributes are present in the nl msg */
1236static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1237				      struct ip_tunnel_encap *ipencap)
1238{
1239	bool ret = false;
1240
1241	memset(ipencap, 0, sizeof(*ipencap));
1242
1243	if (!data)
1244		return ret;
1245
1246	if (data[IFLA_GRE_ENCAP_TYPE]) {
1247		ret = true;
1248		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1249	}
1250
1251	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1252		ret = true;
1253		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1254	}
1255
1256	if (data[IFLA_GRE_ENCAP_SPORT]) {
1257		ret = true;
1258		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1259	}
1260
1261	if (data[IFLA_GRE_ENCAP_DPORT]) {
1262		ret = true;
1263		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1264	}
1265
1266	return ret;
1267}
1268
1269static int gre_tap_init(struct net_device *dev)
1270{
1271	__gre_tunnel_init(dev);
1272	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1273	netif_keep_dst(dev);
1274
1275	return ip_tunnel_init(dev);
1276}
1277
1278static const struct net_device_ops gre_tap_netdev_ops = {
1279	.ndo_init		= gre_tap_init,
1280	.ndo_uninit		= ip_tunnel_uninit,
1281	.ndo_start_xmit		= gre_tap_xmit,
1282	.ndo_set_mac_address 	= eth_mac_addr,
1283	.ndo_validate_addr	= eth_validate_addr,
1284	.ndo_change_mtu		= ip_tunnel_change_mtu,
1285	.ndo_get_stats64	= ip_tunnel_get_stats64,
1286	.ndo_get_iflink		= ip_tunnel_get_iflink,
1287	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1288};
1289
1290static int erspan_tunnel_init(struct net_device *dev)
1291{
1292	struct ip_tunnel *tunnel = netdev_priv(dev);
1293	int t_hlen;
1294
1295	tunnel->tun_hlen = 8;
 
 
 
 
1296	tunnel->parms.iph.protocol = IPPROTO_GRE;
1297	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1298		       erspan_hdr_len(tunnel->erspan_ver);
1299	t_hlen = tunnel->hlen + sizeof(struct iphdr);
1300
1301	dev->features		|= GRE_FEATURES;
1302	dev->hw_features	|= GRE_FEATURES;
1303	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1304	netif_keep_dst(dev);
1305
1306	return ip_tunnel_init(dev);
1307}
1308
1309static const struct net_device_ops erspan_netdev_ops = {
1310	.ndo_init		= erspan_tunnel_init,
1311	.ndo_uninit		= ip_tunnel_uninit,
1312	.ndo_start_xmit		= erspan_xmit,
1313	.ndo_set_mac_address	= eth_mac_addr,
1314	.ndo_validate_addr	= eth_validate_addr,
1315	.ndo_change_mtu		= ip_tunnel_change_mtu,
1316	.ndo_get_stats64	= ip_tunnel_get_stats64,
1317	.ndo_get_iflink		= ip_tunnel_get_iflink,
1318	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1319};
1320
1321static void ipgre_tap_setup(struct net_device *dev)
1322{
1323	ether_setup(dev);
1324	dev->max_mtu = 0;
1325	dev->netdev_ops	= &gre_tap_netdev_ops;
1326	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1327	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1328	ip_tunnel_setup(dev, gre_tap_net_id);
1329}
1330
1331bool is_gretap_dev(const struct net_device *dev)
 
1332{
1333	return dev->netdev_ops == &gre_tap_netdev_ops;
 
 
 
 
 
 
 
 
 
 
1334}
1335EXPORT_SYMBOL_GPL(is_gretap_dev);
1336
1337static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1338			 struct nlattr *tb[], struct nlattr *data[],
1339			 struct netlink_ext_ack *extack)
1340{
1341	struct ip_tunnel_parm p;
1342	struct ip_tunnel_encap ipencap;
1343	__u32 fwmark = 0;
1344	int err;
1345
1346	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1347		struct ip_tunnel *t = netdev_priv(dev);
1348		err = ip_tunnel_encap_setup(t, &ipencap);
1349
1350		if (err < 0)
1351			return err;
1352	}
1353
1354	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1355	if (err < 0)
1356		return err;
1357	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1358}
1359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1360static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1361			    struct nlattr *data[],
1362			    struct netlink_ext_ack *extack)
1363{
1364	struct ip_tunnel *t = netdev_priv(dev);
1365	struct ip_tunnel_encap ipencap;
1366	__u32 fwmark = t->fwmark;
1367	struct ip_tunnel_parm p;
1368	int err;
1369
1370	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1371		err = ip_tunnel_encap_setup(t, &ipencap);
1372
1373		if (err < 0)
1374			return err;
1375	}
1376
1377	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1378	if (err < 0)
1379		return err;
1380
1381	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1382	if (err < 0)
1383		return err;
1384
1385	t->parms.i_flags = p.i_flags;
1386	t->parms.o_flags = p.o_flags;
1387
1388	if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1389		ipgre_link_update(dev, !tb[IFLA_MTU]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390
1391	return 0;
1392}
1393
1394static size_t ipgre_get_size(const struct net_device *dev)
1395{
1396	return
1397		/* IFLA_GRE_LINK */
1398		nla_total_size(4) +
1399		/* IFLA_GRE_IFLAGS */
1400		nla_total_size(2) +
1401		/* IFLA_GRE_OFLAGS */
1402		nla_total_size(2) +
1403		/* IFLA_GRE_IKEY */
1404		nla_total_size(4) +
1405		/* IFLA_GRE_OKEY */
1406		nla_total_size(4) +
1407		/* IFLA_GRE_LOCAL */
1408		nla_total_size(4) +
1409		/* IFLA_GRE_REMOTE */
1410		nla_total_size(4) +
1411		/* IFLA_GRE_TTL */
1412		nla_total_size(1) +
1413		/* IFLA_GRE_TOS */
1414		nla_total_size(1) +
1415		/* IFLA_GRE_PMTUDISC */
1416		nla_total_size(1) +
1417		/* IFLA_GRE_ENCAP_TYPE */
1418		nla_total_size(2) +
1419		/* IFLA_GRE_ENCAP_FLAGS */
1420		nla_total_size(2) +
1421		/* IFLA_GRE_ENCAP_SPORT */
1422		nla_total_size(2) +
1423		/* IFLA_GRE_ENCAP_DPORT */
1424		nla_total_size(2) +
1425		/* IFLA_GRE_COLLECT_METADATA */
1426		nla_total_size(0) +
1427		/* IFLA_GRE_IGNORE_DF */
1428		nla_total_size(1) +
1429		/* IFLA_GRE_FWMARK */
1430		nla_total_size(4) +
1431		/* IFLA_GRE_ERSPAN_INDEX */
1432		nla_total_size(4) +
1433		/* IFLA_GRE_ERSPAN_VER */
1434		nla_total_size(1) +
1435		/* IFLA_GRE_ERSPAN_DIR */
1436		nla_total_size(1) +
1437		/* IFLA_GRE_ERSPAN_HWID */
1438		nla_total_size(2) +
1439		0;
1440}
1441
1442static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1443{
1444	struct ip_tunnel *t = netdev_priv(dev);
1445	struct ip_tunnel_parm *p = &t->parms;
 
 
 
1446
1447	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1448	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1449			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1450	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1451			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1452	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1453	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1454	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1455	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1456	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1457	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1458	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1459		       !!(p->iph.frag_off & htons(IP_DF))) ||
1460	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1461		goto nla_put_failure;
1462
1463	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1464			t->encap.type) ||
1465	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1466			 t->encap.sport) ||
1467	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1468			 t->encap.dport) ||
1469	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1470			t->encap.flags))
1471		goto nla_put_failure;
1472
1473	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1474		goto nla_put_failure;
1475
1476	if (t->collect_md) {
1477		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1478			goto nla_put_failure;
1479	}
1480
1481	if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1482		goto nla_put_failure;
1483
1484	if (t->erspan_ver == 1) {
1485		if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1486			goto nla_put_failure;
1487	} else if (t->erspan_ver == 2) {
1488		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1489			goto nla_put_failure;
1490		if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
 
 
 
 
 
 
1491			goto nla_put_failure;
 
 
 
 
 
 
 
 
 
 
1492	}
1493
1494	return 0;
1495
1496nla_put_failure:
1497	return -EMSGSIZE;
1498}
1499
1500static void erspan_setup(struct net_device *dev)
1501{
 
 
1502	ether_setup(dev);
 
1503	dev->netdev_ops = &erspan_netdev_ops;
1504	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1505	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1506	ip_tunnel_setup(dev, erspan_net_id);
 
1507}
1508
1509static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1510	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1511	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1512	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1513	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1514	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1515	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1516	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1517	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1518	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1519	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1520	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1521	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1522	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1523	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1524	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1525	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1526	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1527	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1528	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1529	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1530	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1531};
1532
1533static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1534	.kind		= "gre",
1535	.maxtype	= IFLA_GRE_MAX,
1536	.policy		= ipgre_policy,
1537	.priv_size	= sizeof(struct ip_tunnel),
1538	.setup		= ipgre_tunnel_setup,
1539	.validate	= ipgre_tunnel_validate,
1540	.newlink	= ipgre_newlink,
1541	.changelink	= ipgre_changelink,
1542	.dellink	= ip_tunnel_dellink,
1543	.get_size	= ipgre_get_size,
1544	.fill_info	= ipgre_fill_info,
1545	.get_link_net	= ip_tunnel_get_link_net,
1546};
1547
1548static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1549	.kind		= "gretap",
1550	.maxtype	= IFLA_GRE_MAX,
1551	.policy		= ipgre_policy,
1552	.priv_size	= sizeof(struct ip_tunnel),
1553	.setup		= ipgre_tap_setup,
1554	.validate	= ipgre_tap_validate,
1555	.newlink	= ipgre_newlink,
1556	.changelink	= ipgre_changelink,
1557	.dellink	= ip_tunnel_dellink,
1558	.get_size	= ipgre_get_size,
1559	.fill_info	= ipgre_fill_info,
1560	.get_link_net	= ip_tunnel_get_link_net,
1561};
1562
1563static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1564	.kind		= "erspan",
1565	.maxtype	= IFLA_GRE_MAX,
1566	.policy		= ipgre_policy,
1567	.priv_size	= sizeof(struct ip_tunnel),
1568	.setup		= erspan_setup,
1569	.validate	= erspan_validate,
1570	.newlink	= ipgre_newlink,
1571	.changelink	= ipgre_changelink,
1572	.dellink	= ip_tunnel_dellink,
1573	.get_size	= ipgre_get_size,
1574	.fill_info	= ipgre_fill_info,
1575	.get_link_net	= ip_tunnel_get_link_net,
1576};
1577
1578struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1579					u8 name_assign_type)
1580{
1581	struct nlattr *tb[IFLA_MAX + 1];
1582	struct net_device *dev;
1583	LIST_HEAD(list_kill);
1584	struct ip_tunnel *t;
1585	int err;
1586
1587	memset(&tb, 0, sizeof(tb));
1588
1589	dev = rtnl_create_link(net, name, name_assign_type,
1590			       &ipgre_tap_ops, tb);
1591	if (IS_ERR(dev))
1592		return dev;
1593
1594	/* Configure flow based GRE device. */
1595	t = netdev_priv(dev);
1596	t->collect_md = true;
1597
1598	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1599	if (err < 0) {
1600		free_netdev(dev);
1601		return ERR_PTR(err);
1602	}
1603
1604	/* openvswitch users expect packet sizes to be unrestricted,
1605	 * so set the largest MTU we can.
1606	 */
1607	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1608	if (err)
1609		goto out;
1610
1611	err = rtnl_configure_link(dev, NULL);
1612	if (err < 0)
1613		goto out;
1614
1615	return dev;
1616out:
1617	ip_tunnel_dellink(dev, &list_kill);
1618	unregister_netdevice_many(&list_kill);
1619	return ERR_PTR(err);
1620}
1621EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1622
1623static int __net_init ipgre_tap_init_net(struct net *net)
1624{
1625	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1626}
1627
1628static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
 
1629{
1630	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
 
1631}
1632
1633static struct pernet_operations ipgre_tap_net_ops = {
1634	.init = ipgre_tap_init_net,
1635	.exit_batch = ipgre_tap_exit_batch_net,
1636	.id   = &gre_tap_net_id,
1637	.size = sizeof(struct ip_tunnel_net),
1638};
1639
1640static int __net_init erspan_init_net(struct net *net)
1641{
1642	return ip_tunnel_init_net(net, erspan_net_id,
1643				  &erspan_link_ops, "erspan0");
1644}
1645
1646static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
 
1647{
1648	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
 
1649}
1650
1651static struct pernet_operations erspan_net_ops = {
1652	.init = erspan_init_net,
1653	.exit_batch = erspan_exit_batch_net,
1654	.id   = &erspan_net_id,
1655	.size = sizeof(struct ip_tunnel_net),
1656};
1657
1658static int __init ipgre_init(void)
1659{
1660	int err;
1661
1662	pr_info("GRE over IPv4 tunneling driver\n");
1663
1664	err = register_pernet_device(&ipgre_net_ops);
1665	if (err < 0)
1666		return err;
1667
1668	err = register_pernet_device(&ipgre_tap_net_ops);
1669	if (err < 0)
1670		goto pnet_tap_failed;
1671
1672	err = register_pernet_device(&erspan_net_ops);
1673	if (err < 0)
1674		goto pnet_erspan_failed;
1675
1676	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1677	if (err < 0) {
1678		pr_info("%s: can't add protocol\n", __func__);
1679		goto add_proto_failed;
1680	}
1681
1682	err = rtnl_link_register(&ipgre_link_ops);
1683	if (err < 0)
1684		goto rtnl_link_failed;
1685
1686	err = rtnl_link_register(&ipgre_tap_ops);
1687	if (err < 0)
1688		goto tap_ops_failed;
1689
1690	err = rtnl_link_register(&erspan_link_ops);
1691	if (err < 0)
1692		goto erspan_link_failed;
1693
1694	return 0;
1695
1696erspan_link_failed:
1697	rtnl_link_unregister(&ipgre_tap_ops);
1698tap_ops_failed:
1699	rtnl_link_unregister(&ipgre_link_ops);
1700rtnl_link_failed:
1701	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1702add_proto_failed:
1703	unregister_pernet_device(&erspan_net_ops);
1704pnet_erspan_failed:
1705	unregister_pernet_device(&ipgre_tap_net_ops);
1706pnet_tap_failed:
1707	unregister_pernet_device(&ipgre_net_ops);
1708	return err;
1709}
1710
1711static void __exit ipgre_fini(void)
1712{
1713	rtnl_link_unregister(&ipgre_tap_ops);
1714	rtnl_link_unregister(&ipgre_link_ops);
1715	rtnl_link_unregister(&erspan_link_ops);
1716	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1717	unregister_pernet_device(&ipgre_tap_net_ops);
1718	unregister_pernet_device(&ipgre_net_ops);
1719	unregister_pernet_device(&erspan_net_ops);
1720}
1721
1722module_init(ipgre_init);
1723module_exit(ipgre_fini);
 
1724MODULE_LICENSE("GPL");
1725MODULE_ALIAS_RTNL_LINK("gre");
1726MODULE_ALIAS_RTNL_LINK("gretap");
1727MODULE_ALIAS_RTNL_LINK("erspan");
1728MODULE_ALIAS_NETDEV("gre0");
1729MODULE_ALIAS_NETDEV("gretap0");
1730MODULE_ALIAS_NETDEV("erspan0");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux NET3:	GRE over IP protocol decoder.
   4 *
   5 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
 
 
 
 
 
 
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/uaccess.h>
  16#include <linux/skbuff.h>
  17#include <linux/netdevice.h>
  18#include <linux/in.h>
  19#include <linux/tcp.h>
  20#include <linux/udp.h>
  21#include <linux/if_arp.h>
  22#include <linux/if_vlan.h>
  23#include <linux/init.h>
  24#include <linux/in6.h>
  25#include <linux/inetdevice.h>
  26#include <linux/igmp.h>
  27#include <linux/netfilter_ipv4.h>
  28#include <linux/etherdevice.h>
  29#include <linux/if_ether.h>
  30
  31#include <net/sock.h>
  32#include <net/ip.h>
  33#include <net/icmp.h>
  34#include <net/protocol.h>
  35#include <net/ip_tunnels.h>
  36#include <net/arp.h>
  37#include <net/checksum.h>
  38#include <net/dsfield.h>
  39#include <net/inet_ecn.h>
  40#include <net/xfrm.h>
  41#include <net/net_namespace.h>
  42#include <net/netns/generic.h>
  43#include <net/rtnetlink.h>
  44#include <net/gre.h>
  45#include <net/dst_metadata.h>
  46#include <net/erspan.h>
  47#include <net/inet_dscp.h>
  48
  49/*
  50   Problems & solutions
  51   --------------------
  52
  53   1. The most important issue is detecting local dead loops.
  54   They would cause complete host lockup in transmit, which
  55   would be "resolved" by stack overflow or, if queueing is enabled,
  56   with infinite looping in net_bh.
  57
  58   We cannot track such dead loops during route installation,
  59   it is infeasible task. The most general solutions would be
  60   to keep skb->encapsulation counter (sort of local ttl),
  61   and silently drop packet when it expires. It is a good
  62   solution, but it supposes maintaining new variable in ALL
  63   skb, even if no tunneling is used.
  64
  65   Current solution: xmit_recursion breaks dead loops. This is a percpu
  66   counter, since when we enter the first ndo_xmit(), cpu migration is
  67   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  68
  69   2. Networking dead loops would not kill routers, but would really
  70   kill network. IP hop limit plays role of "t->recursion" in this case,
  71   if we copy it from packet being encapsulated to upper header.
  72   It is very good solution, but it introduces two problems:
  73
  74   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  75     do not work over tunnels.
  76   - traceroute does not work. I planned to relay ICMP from tunnel,
  77     so that this problem would be solved and traceroute output
  78     would even more informative. This idea appeared to be wrong:
  79     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  80     true router now :-)), all routers (at least, in neighbourhood of mine)
  81     return only 8 bytes of payload. It is the end.
  82
  83   Hence, if we want that OSPF worked or traceroute said something reasonable,
  84   we should search for another solution.
  85
  86   One of them is to parse packet trying to detect inner encapsulation
  87   made by our node. It is difficult or even impossible, especially,
  88   taking into account fragmentation. TO be short, ttl is not solution at all.
  89
  90   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  91   We force DF flag on tunnels with preconfigured hop limit,
  92   that is ALL. :-) Well, it does not remove the problem completely,
  93   but exponential growth of network traffic is changed to linear
  94   (branches, that exceed pmtu are pruned) and tunnel mtu
  95   rapidly degrades to value <68, where looping stops.
  96   Yes, it is not good if there exists a router in the loop,
  97   which does not force DF, even when encapsulating packets have DF set.
  98   But it is not our problem! Nobody could accuse us, we made
  99   all that we could make. Even if it is your gated who injected
 100   fatal route to network, even if it were you who configured
 101   fatal static route: you are innocent. :-)
 102
 103   Alexey Kuznetsov.
 104 */
 105
 106static bool log_ecn_error = true;
 107module_param(log_ecn_error, bool, 0644);
 108MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 109
 110static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 111static const struct header_ops ipgre_header_ops;
 112
 113static int ipgre_tunnel_init(struct net_device *dev);
 114static void erspan_build_header(struct sk_buff *skb,
 115				u32 id, u32 index,
 116				bool truncate, bool is_ipv4);
 117
 118static unsigned int ipgre_net_id __read_mostly;
 119static unsigned int gre_tap_net_id __read_mostly;
 120static unsigned int erspan_net_id __read_mostly;
 121
 122static int ipgre_err(struct sk_buff *skb, u32 info,
 123		     const struct tnl_ptk_info *tpi)
 124{
 125
 126	/* All the routers (except for Linux) return only
 127	   8 bytes of packet payload. It means, that precise relaying of
 128	   ICMP in the real Internet is absolutely infeasible.
 129
 130	   Moreover, Cisco "wise men" put GRE key to the third word
 131	   in GRE header. It makes impossible maintaining even soft
 132	   state for keyed GRE tunnels with enabled checksum. Tell
 133	   them "thank you".
 134
 135	   Well, I wonder, rfc1812 was written by Cisco employee,
 136	   what the hell these idiots break standards established
 137	   by themselves???
 138	   */
 139	struct net *net = dev_net(skb->dev);
 140	struct ip_tunnel_net *itn;
 141	const struct iphdr *iph;
 142	const int type = icmp_hdr(skb)->type;
 143	const int code = icmp_hdr(skb)->code;
 144	unsigned int data_len = 0;
 145	struct ip_tunnel *t;
 146
 147	if (tpi->proto == htons(ETH_P_TEB))
 148		itn = net_generic(net, gre_tap_net_id);
 149	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
 150		 tpi->proto == htons(ETH_P_ERSPAN2))
 151		itn = net_generic(net, erspan_net_id);
 152	else
 153		itn = net_generic(net, ipgre_net_id);
 154
 155	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 156	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 157			     iph->daddr, iph->saddr, tpi->key);
 158
 159	if (!t)
 160		return -ENOENT;
 161
 162	switch (type) {
 163	default:
 164	case ICMP_PARAMETERPROB:
 165		return 0;
 166
 167	case ICMP_DEST_UNREACH:
 168		switch (code) {
 169		case ICMP_SR_FAILED:
 170		case ICMP_PORT_UNREACH:
 171			/* Impossible event. */
 172			return 0;
 173		default:
 174			/* All others are translated to HOST_UNREACH.
 175			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 176			   I believe they are just ether pollution. --ANK
 177			 */
 178			break;
 179		}
 180		break;
 181
 182	case ICMP_TIME_EXCEEDED:
 183		if (code != ICMP_EXC_TTL)
 184			return 0;
 185		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
 186		break;
 187
 188	case ICMP_REDIRECT:
 189		break;
 190	}
 191
 
 
 
 
 
 
 
 
 
 
 
 
 192#if IS_ENABLED(CONFIG_IPV6)
 193	if (tpi->proto == htons(ETH_P_IPV6) &&
 194	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 195					type, data_len))
 196		return 0;
 197#endif
 198
 199	if (t->parms.iph.daddr == 0 ||
 200	    ipv4_is_multicast(t->parms.iph.daddr))
 201		return 0;
 202
 203	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 204		return 0;
 205
 206	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 207		t->err_count++;
 208	else
 209		t->err_count = 1;
 210	t->err_time = jiffies;
 211
 212	return 0;
 213}
 214
 215static void gre_err(struct sk_buff *skb, u32 info)
 216{
 217	/* All the routers (except for Linux) return only
 218	 * 8 bytes of packet payload. It means, that precise relaying of
 219	 * ICMP in the real Internet is absolutely infeasible.
 220	 *
 221	 * Moreover, Cisco "wise men" put GRE key to the third word
 222	 * in GRE header. It makes impossible maintaining even soft
 223	 * state for keyed
 224	 * GRE tunnels with enabled checksum. Tell them "thank you".
 225	 *
 226	 * Well, I wonder, rfc1812 was written by Cisco employee,
 227	 * what the hell these idiots break standards established
 228	 * by themselves???
 229	 */
 230
 231	const struct iphdr *iph = (struct iphdr *)skb->data;
 232	const int type = icmp_hdr(skb)->type;
 233	const int code = icmp_hdr(skb)->code;
 234	struct tnl_ptk_info tpi;
 
 235
 236	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
 237			     iph->ihl * 4) < 0)
 238		return;
 
 
 239
 240	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 241		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 242				 skb->dev->ifindex, IPPROTO_GRE);
 243		return;
 244	}
 245	if (type == ICMP_REDIRECT) {
 246		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
 247			      IPPROTO_GRE);
 248		return;
 249	}
 250
 251	ipgre_err(skb, info, &tpi);
 252}
 253
 254static bool is_erspan_type1(int gre_hdr_len)
 255{
 256	/* Both ERSPAN type I (version 0) and type II (version 1) use
 257	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
 258	 * while type II has 8-byte.
 259	 */
 260	return gre_hdr_len == 4;
 261}
 262
 263static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 264		      int gre_hdr_len)
 265{
 266	struct net *net = dev_net(skb->dev);
 267	struct metadata_dst *tun_dst = NULL;
 268	struct erspan_base_hdr *ershdr;
 269	IP_TUNNEL_DECLARE_FLAGS(flags);
 270	struct ip_tunnel_net *itn;
 271	struct ip_tunnel *tunnel;
 272	const struct iphdr *iph;
 273	struct erspan_md2 *md2;
 274	int ver;
 275	int len;
 276
 277	ip_tunnel_flags_copy(flags, tpi->flags);
 
 
 
 
 
 278
 279	itn = net_generic(net, erspan_net_id);
 280	iph = ip_hdr(skb);
 281	if (is_erspan_type1(gre_hdr_len)) {
 282		ver = 0;
 283		__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
 284		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
 285					  iph->saddr, iph->daddr, 0);
 286	} else {
 287		if (unlikely(!pskb_may_pull(skb,
 288					    gre_hdr_len + sizeof(*ershdr))))
 289			return PACKET_REJECT;
 290
 291		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 292		ver = ershdr->ver;
 293		iph = ip_hdr(skb);
 294		__set_bit(IP_TUNNEL_KEY_BIT, flags);
 295		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
 296					  iph->saddr, iph->daddr, tpi->key);
 297	}
 298
 299	if (tunnel) {
 300		if (is_erspan_type1(gre_hdr_len))
 301			len = gre_hdr_len;
 302		else
 303			len = gre_hdr_len + erspan_hdr_len(ver);
 304
 305		if (unlikely(!pskb_may_pull(skb, len)))
 306			return PACKET_REJECT;
 307
 
 
 
 308		if (__iptunnel_pull_header(skb,
 309					   len,
 310					   htons(ETH_P_TEB),
 311					   false, false) < 0)
 312			goto drop;
 313
 314		if (tunnel->collect_md) {
 315			struct erspan_metadata *pkt_md, *md;
 316			struct ip_tunnel_info *info;
 317			unsigned char *gh;
 318			__be64 tun_id;
 
 319
 320			__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
 321			ip_tunnel_flags_copy(flags, tpi->flags);
 322			tun_id = key32_to_tunnel_id(tpi->key);
 323
 324			tun_dst = ip_tun_rx_dst(skb, flags,
 325						tun_id, sizeof(*md));
 326			if (!tun_dst)
 327				return PACKET_REJECT;
 328
 329			/* skb can be uncloned in __iptunnel_pull_header, so
 330			 * old pkt_md is no longer valid and we need to reset
 331			 * it
 332			 */
 333			gh = skb_network_header(skb) +
 334			     skb_network_header_len(skb);
 335			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
 336							    sizeof(*ershdr));
 337			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 338			md->version = ver;
 339			md2 = &md->u.md2;
 340			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
 341						       ERSPAN_V2_MDSIZE);
 342
 343			info = &tun_dst->u.tun_info;
 344			__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
 345				  info->key.tun_flags);
 346			info->options_len = sizeof(*md);
 347		}
 348
 349		skb_reset_mac_header(skb);
 350		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 351		return PACKET_RCVD;
 352	}
 353	return PACKET_REJECT;
 354
 355drop:
 356	kfree_skb(skb);
 357	return PACKET_RCVD;
 358}
 359
 360static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 361		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 362{
 363	struct metadata_dst *tun_dst = NULL;
 364	const struct iphdr *iph;
 365	struct ip_tunnel *tunnel;
 366
 367	iph = ip_hdr(skb);
 368	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 369				  iph->saddr, iph->daddr, tpi->key);
 370
 371	if (tunnel) {
 372		const struct iphdr *tnl_params;
 373
 374		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
 375					   raw_proto, false) < 0)
 376			goto drop;
 377
 378		/* Special case for ipgre_header_parse(), which expects the
 379		 * mac_header to point to the outer IP header.
 380		 */
 381		if (tunnel->dev->header_ops == &ipgre_header_ops)
 382			skb_pop_mac_header(skb);
 383		else
 384			skb_reset_mac_header(skb);
 385
 386		tnl_params = &tunnel->parms.iph;
 387		if (tunnel->collect_md || tnl_params->daddr == 0) {
 388			IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 389			__be64 tun_id;
 390
 391			__set_bit(IP_TUNNEL_CSUM_BIT, flags);
 392			__set_bit(IP_TUNNEL_KEY_BIT, flags);
 393			ip_tunnel_flags_and(flags, tpi->flags, flags);
 394
 395			tun_id = key32_to_tunnel_id(tpi->key);
 396			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 397			if (!tun_dst)
 398				return PACKET_REJECT;
 399		}
 400
 401		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 402		return PACKET_RCVD;
 403	}
 404	return PACKET_NEXT;
 405
 406drop:
 407	kfree_skb(skb);
 408	return PACKET_RCVD;
 409}
 410
 411static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 412		     int hdr_len)
 413{
 414	struct net *net = dev_net(skb->dev);
 415	struct ip_tunnel_net *itn;
 416	int res;
 417
 418	if (tpi->proto == htons(ETH_P_TEB))
 419		itn = net_generic(net, gre_tap_net_id);
 420	else
 421		itn = net_generic(net, ipgre_net_id);
 422
 423	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
 424	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
 425		/* ipgre tunnels in collect metadata mode should receive
 426		 * also ETH_P_TEB traffic.
 427		 */
 428		itn = net_generic(net, ipgre_net_id);
 429		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
 430	}
 431	return res;
 432}
 433
 434static int gre_rcv(struct sk_buff *skb)
 435{
 436	struct tnl_ptk_info tpi;
 437	bool csum_err = false;
 438	int hdr_len;
 439
 440#ifdef CONFIG_NET_IPGRE_BROADCAST
 441	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 442		/* Looped back packet, drop it! */
 443		if (rt_is_output_route(skb_rtable(skb)))
 444			goto drop;
 445	}
 446#endif
 447
 448	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 449	if (hdr_len < 0)
 450		goto drop;
 451
 452	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
 453		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 454		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 455			return 0;
 456		goto out;
 457	}
 458
 459	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 460		return 0;
 461
 462out:
 463	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 464drop:
 465	kfree_skb(skb);
 466	return 0;
 467}
 468
 469static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 470		       const struct iphdr *tnl_params,
 471		       __be16 proto)
 472{
 473	struct ip_tunnel *tunnel = netdev_priv(dev);
 474	IP_TUNNEL_DECLARE_FLAGS(flags);
 475
 476	ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
 
 477
 478	/* Push GRE header. */
 479	gre_build_header(skb, tunnel->tun_hlen,
 480			 flags, proto, tunnel->parms.o_key,
 481			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
 482			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 483
 484	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 485}
 486
 487static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 488{
 489	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 490}
 491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 493			__be16 proto)
 494{
 495	struct ip_tunnel *tunnel = netdev_priv(dev);
 496	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 497	struct ip_tunnel_info *tun_info;
 498	const struct ip_tunnel_key *key;
 
 
 499	int tunnel_hlen;
 
 500
 501	tun_info = skb_tunnel_info(skb);
 502	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 503		     ip_tunnel_info_af(tun_info) != AF_INET))
 504		goto err_free_skb;
 505
 506	key = &tun_info->key;
 507	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 508
 509	if (skb_cow_head(skb, dev->needed_headroom))
 510		goto err_free_skb;
 
 511
 512	/* Push Tunnel header. */
 513	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 514					      tunnel->parms.o_flags)))
 515		goto err_free_skb;
 516
 517	__set_bit(IP_TUNNEL_CSUM_BIT, flags);
 518	__set_bit(IP_TUNNEL_KEY_BIT, flags);
 519	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
 520	ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
 521
 
 
 522	gre_build_header(skb, tunnel_hlen, flags, proto,
 523			 tunnel_id_to_key32(tun_info->key.tun_id),
 524			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
 525			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 526
 527	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 528
 
 
 529	return;
 530
 
 
 531err_free_skb:
 532	kfree_skb(skb);
 533	DEV_STATS_INC(dev, tx_dropped);
 534}
 535
 536static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 
 537{
 538	struct ip_tunnel *tunnel = netdev_priv(dev);
 539	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 540	struct ip_tunnel_info *tun_info;
 541	const struct ip_tunnel_key *key;
 542	struct erspan_metadata *md;
 
 543	bool truncate = false;
 544	__be16 proto;
 545	int tunnel_hlen;
 546	int version;
 547	int nhoff;
 548
 549	tun_info = skb_tunnel_info(skb);
 550	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 551		     ip_tunnel_info_af(tun_info) != AF_INET))
 552		goto err_free_skb;
 553
 554	key = &tun_info->key;
 555	if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
 556		goto err_free_skb;
 557	if (tun_info->options_len < sizeof(*md))
 558		goto err_free_skb;
 559	md = ip_tunnel_info_opts(tun_info);
 
 
 560
 561	/* ERSPAN has fixed 8 byte GRE header */
 562	version = md->version;
 563	tunnel_hlen = 8 + erspan_hdr_len(version);
 564
 565	if (skb_cow_head(skb, dev->needed_headroom))
 566		goto err_free_skb;
 
 567
 568	if (gre_handle_offloads(skb, false))
 569		goto err_free_skb;
 570
 571	if (skb->len > dev->mtu + dev->hard_header_len) {
 572		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 573			goto err_free_skb;
 574		truncate = true;
 575	}
 576
 577	nhoff = skb_network_offset(skb);
 578	if (skb->protocol == htons(ETH_P_IP) &&
 579	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 580		truncate = true;
 581
 582	if (skb->protocol == htons(ETH_P_IPV6)) {
 583		int thoff;
 584
 585		if (skb_transport_header_was_set(skb))
 586			thoff = skb_transport_offset(skb);
 587		else
 588			thoff = nhoff + sizeof(struct ipv6hdr);
 589		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
 590			truncate = true;
 591	}
 592
 593	if (version == 1) {
 594		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 595				    ntohl(md->u.index), truncate, true);
 596		proto = htons(ETH_P_ERSPAN);
 597	} else if (version == 2) {
 598		erspan_build_header_v2(skb,
 599				       ntohl(tunnel_id_to_key32(key->tun_id)),
 600				       md->u.md2.dir,
 601				       get_hwid(&md->u.md2),
 602				       truncate, true);
 603		proto = htons(ETH_P_ERSPAN2);
 604	} else {
 605		goto err_free_skb;
 606	}
 607
 608	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
 609	gre_build_header(skb, 8, flags, proto, 0,
 610			 htonl(atomic_fetch_inc(&tunnel->o_seqno)));
 611
 612	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 613
 
 
 614	return;
 615
 
 
 616err_free_skb:
 617	kfree_skb(skb);
 618	DEV_STATS_INC(dev, tx_dropped);
 619}
 620
 621static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 622{
 623	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 624	const struct ip_tunnel_key *key;
 625	struct rtable *rt;
 626	struct flowi4 fl4;
 627
 628	if (ip_tunnel_info_af(info) != AF_INET)
 629		return -EINVAL;
 630
 631	key = &info->key;
 632	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
 633			    tunnel_id_to_key32(key->tun_id),
 634			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
 635			    skb->mark, skb_get_hash(skb), key->flow_flags);
 636	rt = ip_route_output_key(dev_net(dev), &fl4);
 637	if (IS_ERR(rt))
 638		return PTR_ERR(rt);
 639
 640	ip_rt_put(rt);
 641	info->key.u.ipv4.src = fl4.saddr;
 642	return 0;
 643}
 644
 645static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 646			      struct net_device *dev)
 647{
 648	struct ip_tunnel *tunnel = netdev_priv(dev);
 649	const struct iphdr *tnl_params;
 650
 651	if (!pskb_inet_may_pull(skb))
 652		goto free_skb;
 653
 654	if (tunnel->collect_md) {
 655		gre_fb_xmit(skb, dev, skb->protocol);
 656		return NETDEV_TX_OK;
 657	}
 658
 659	if (dev->header_ops) {
 660		int pull_len = tunnel->hlen + sizeof(struct iphdr);
 661
 662		if (skb_cow_head(skb, 0))
 663			goto free_skb;
 664
 665		if (!pskb_may_pull(skb, pull_len))
 666			goto free_skb;
 667
 668		tnl_params = (const struct iphdr *)skb->data;
 669
 670		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
 671		skb_pull(skb, pull_len);
 
 
 672		skb_reset_mac_header(skb);
 673
 674		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 675		    skb_checksum_start(skb) < skb->data)
 676			goto free_skb;
 677	} else {
 678		if (skb_cow_head(skb, dev->needed_headroom))
 679			goto free_skb;
 680
 681		tnl_params = &tunnel->parms.iph;
 682	}
 683
 684	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 685					      tunnel->parms.o_flags)))
 686		goto free_skb;
 687
 688	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 689	return NETDEV_TX_OK;
 690
 691free_skb:
 692	kfree_skb(skb);
 693	DEV_STATS_INC(dev, tx_dropped);
 694	return NETDEV_TX_OK;
 695}
 696
 697static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 698			       struct net_device *dev)
 699{
 700	struct ip_tunnel *tunnel = netdev_priv(dev);
 701	bool truncate = false;
 702	__be16 proto;
 703
 704	if (!pskb_inet_may_pull(skb))
 705		goto free_skb;
 706
 707	if (tunnel->collect_md) {
 708		erspan_fb_xmit(skb, dev);
 709		return NETDEV_TX_OK;
 710	}
 711
 712	if (gre_handle_offloads(skb, false))
 713		goto free_skb;
 714
 715	if (skb_cow_head(skb, dev->needed_headroom))
 716		goto free_skb;
 717
 718	if (skb->len > dev->mtu + dev->hard_header_len) {
 719		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 720			goto free_skb;
 721		truncate = true;
 722	}
 723
 724	/* Push ERSPAN header */
 725	if (tunnel->erspan_ver == 0) {
 726		proto = htons(ETH_P_ERSPAN);
 727		__clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
 728	} else if (tunnel->erspan_ver == 1) {
 729		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 730				    tunnel->index,
 731				    truncate, true);
 732		proto = htons(ETH_P_ERSPAN);
 733	} else if (tunnel->erspan_ver == 2) {
 734		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 735				       tunnel->dir, tunnel->hwid,
 736				       truncate, true);
 737		proto = htons(ETH_P_ERSPAN2);
 738	} else {
 739		goto free_skb;
 740	}
 741
 742	__clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
 743	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 744	return NETDEV_TX_OK;
 745
 746free_skb:
 747	kfree_skb(skb);
 748	DEV_STATS_INC(dev, tx_dropped);
 749	return NETDEV_TX_OK;
 750}
 751
 752static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 753				struct net_device *dev)
 754{
 755	struct ip_tunnel *tunnel = netdev_priv(dev);
 756
 757	if (!pskb_inet_may_pull(skb))
 758		goto free_skb;
 759
 760	if (tunnel->collect_md) {
 761		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 762		return NETDEV_TX_OK;
 763	}
 764
 765	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 766					      tunnel->parms.o_flags)))
 767		goto free_skb;
 768
 769	if (skb_cow_head(skb, dev->needed_headroom))
 770		goto free_skb;
 771
 772	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 773	return NETDEV_TX_OK;
 774
 775free_skb:
 776	kfree_skb(skb);
 777	DEV_STATS_INC(dev, tx_dropped);
 778	return NETDEV_TX_OK;
 779}
 780
 781static void ipgre_link_update(struct net_device *dev, bool set_mtu)
 782{
 783	struct ip_tunnel *tunnel = netdev_priv(dev);
 784	int len;
 785
 786	len = tunnel->tun_hlen;
 787	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 788	len = tunnel->tun_hlen - len;
 789	tunnel->hlen = tunnel->hlen + len;
 790
 791	if (dev->header_ops)
 792		dev->hard_header_len += len;
 793	else
 794		dev->needed_headroom += len;
 795
 796	if (set_mtu)
 797		WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
 798
 799	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
 800	    (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
 801	     tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
 802		dev->features &= ~NETIF_F_GSO_SOFTWARE;
 
 
 
 
 
 
 
 803		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 804	} else {
 805		dev->features |= NETIF_F_GSO_SOFTWARE;
 806		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 807	}
 808}
 809
 810static int ipgre_tunnel_ctl(struct net_device *dev,
 811			    struct ip_tunnel_parm_kern *p,
 812			    int cmd)
 813{
 814	__be16 i_flags, o_flags;
 815	int err;
 816
 817	if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
 818	    !ip_tunnel_flags_is_be16_compat(p->o_flags))
 819		return -EOVERFLOW;
 820
 821	i_flags = ip_tunnel_flags_to_be16(p->i_flags);
 822	o_flags = ip_tunnel_flags_to_be16(p->o_flags);
 823
 824	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 825		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
 826		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
 827		    ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
 828			return -EINVAL;
 829	}
 830
 831	gre_flags_to_tnl_flags(p->i_flags, i_flags);
 832	gre_flags_to_tnl_flags(p->o_flags, o_flags);
 833
 834	err = ip_tunnel_ctl(dev, p, cmd);
 835	if (err)
 836		return err;
 837
 838	if (cmd == SIOCCHGTUNNEL) {
 839		struct ip_tunnel *t = netdev_priv(dev);
 840
 841		ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
 842		ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
 843
 844		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
 845			ipgre_link_update(dev, true);
 846	}
 847
 848	i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
 849	ip_tunnel_flags_from_be16(p->i_flags, i_flags);
 850	o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
 851	ip_tunnel_flags_from_be16(p->o_flags, o_flags);
 
 852
 853	return 0;
 854}
 855
 856/* Nice toy. Unfortunately, useless in real life :-)
 857   It allows to construct virtual multiprotocol broadcast "LAN"
 858   over the Internet, provided multicast routing is tuned.
 859
 860
 861   I have no idea was this bicycle invented before me,
 862   so that I had to set ARPHRD_IPGRE to a random value.
 863   I have an impression, that Cisco could make something similar,
 864   but this feature is apparently missing in IOS<=11.2(8).
 865
 866   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 867   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 868
 869   ping -t 255 224.66.66.66
 870
 871   If nobody answers, mbone does not work.
 872
 873   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 874   ip addr add 10.66.66.<somewhat>/24 dev Universe
 875   ifconfig Universe up
 876   ifconfig Universe add fe80::<Your_real_addr>/10
 877   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 878   ftp 10.66.66.66
 879   ...
 880   ftp fec0:6666:6666::193.233.7.65
 881   ...
 882 */
 883static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 884			unsigned short type,
 885			const void *daddr, const void *saddr, unsigned int len)
 886{
 887	struct ip_tunnel *t = netdev_priv(dev);
 888	struct iphdr *iph;
 889	struct gre_base_hdr *greh;
 890
 891	iph = skb_push(skb, t->hlen + sizeof(*iph));
 892	greh = (struct gre_base_hdr *)(iph+1);
 893	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 894	greh->protocol = htons(type);
 895
 896	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 897
 898	/* Set the source hardware address. */
 899	if (saddr)
 900		memcpy(&iph->saddr, saddr, 4);
 901	if (daddr)
 902		memcpy(&iph->daddr, daddr, 4);
 903	if (iph->daddr)
 904		return t->hlen + sizeof(*iph);
 905
 906	return -(t->hlen + sizeof(*iph));
 907}
 908
 909static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 910{
 911	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 912	memcpy(haddr, &iph->saddr, 4);
 913	return 4;
 914}
 915
 916static const struct header_ops ipgre_header_ops = {
 917	.create	= ipgre_header,
 918	.parse	= ipgre_header_parse,
 919};
 920
 921#ifdef CONFIG_NET_IPGRE_BROADCAST
 922static int ipgre_open(struct net_device *dev)
 923{
 924	struct ip_tunnel *t = netdev_priv(dev);
 925
 926	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 927		struct flowi4 fl4;
 928		struct rtable *rt;
 929
 930		rt = ip_route_output_gre(t->net, &fl4,
 931					 t->parms.iph.daddr,
 932					 t->parms.iph.saddr,
 933					 t->parms.o_key,
 934					 t->parms.iph.tos & INET_DSCP_MASK,
 935					 t->parms.link);
 936		if (IS_ERR(rt))
 937			return -EADDRNOTAVAIL;
 938		dev = rt->dst.dev;
 939		ip_rt_put(rt);
 940		if (!__in_dev_get_rtnl(dev))
 941			return -EADDRNOTAVAIL;
 942		t->mlink = dev->ifindex;
 943		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 944	}
 945	return 0;
 946}
 947
 948static int ipgre_close(struct net_device *dev)
 949{
 950	struct ip_tunnel *t = netdev_priv(dev);
 951
 952	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 953		struct in_device *in_dev;
 954		in_dev = inetdev_by_index(t->net, t->mlink);
 955		if (in_dev)
 956			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 957	}
 958	return 0;
 959}
 960#endif
 961
 962static const struct net_device_ops ipgre_netdev_ops = {
 963	.ndo_init		= ipgre_tunnel_init,
 964	.ndo_uninit		= ip_tunnel_uninit,
 965#ifdef CONFIG_NET_IPGRE_BROADCAST
 966	.ndo_open		= ipgre_open,
 967	.ndo_stop		= ipgre_close,
 968#endif
 969	.ndo_start_xmit		= ipgre_xmit,
 970	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
 971	.ndo_change_mtu		= ip_tunnel_change_mtu,
 972	.ndo_get_stats64	= dev_get_tstats64,
 973	.ndo_get_iflink		= ip_tunnel_get_iflink,
 974	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
 975};
 976
 977#define GRE_FEATURES (NETIF_F_SG |		\
 978		      NETIF_F_FRAGLIST |	\
 979		      NETIF_F_HIGHDMA |		\
 980		      NETIF_F_HW_CSUM)
 981
 982static void ipgre_tunnel_setup(struct net_device *dev)
 983{
 984	dev->netdev_ops		= &ipgre_netdev_ops;
 985	dev->type		= ARPHRD_IPGRE;
 986	ip_tunnel_setup(dev, ipgre_net_id);
 987}
 988
 989static void __gre_tunnel_init(struct net_device *dev)
 990{
 991	struct ip_tunnel *tunnel;
 
 992
 993	tunnel = netdev_priv(dev);
 994	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 995	tunnel->parms.iph.protocol = IPPROTO_GRE;
 996
 997	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 998	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
 
 999
1000	dev->features		|= GRE_FEATURES;
1001	dev->hw_features	|= GRE_FEATURES;
1002
1003	/* TCP offload with GRE SEQ is not supported, nor can we support 2
1004	 * levels of outer headers requiring an update.
1005	 */
1006	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
1007		return;
1008	if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
1009	    tunnel->encap.type != TUNNEL_ENCAP_NONE)
1010		return;
 
 
1011
1012	dev->features |= NETIF_F_GSO_SOFTWARE;
1013	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1014
1015	dev->lltx = true;
 
1016}
1017
1018static int ipgre_tunnel_init(struct net_device *dev)
1019{
1020	struct ip_tunnel *tunnel = netdev_priv(dev);
1021	struct iphdr *iph = &tunnel->parms.iph;
1022
1023	__gre_tunnel_init(dev);
1024
1025	__dev_addr_set(dev, &iph->saddr, 4);
1026	memcpy(dev->broadcast, &iph->daddr, 4);
1027
1028	dev->flags		= IFF_NOARP;
1029	netif_keep_dst(dev);
1030	dev->addr_len		= 4;
1031
1032	if (iph->daddr && !tunnel->collect_md) {
1033#ifdef CONFIG_NET_IPGRE_BROADCAST
1034		if (ipv4_is_multicast(iph->daddr)) {
1035			if (!iph->saddr)
1036				return -EINVAL;
1037			dev->flags = IFF_BROADCAST;
1038			dev->header_ops = &ipgre_header_ops;
1039			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1040			dev->needed_headroom = 0;
1041		}
1042#endif
1043	} else if (!tunnel->collect_md) {
1044		dev->header_ops = &ipgre_header_ops;
1045		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1046		dev->needed_headroom = 0;
1047	}
1048
1049	return ip_tunnel_init(dev);
1050}
1051
1052static const struct gre_protocol ipgre_protocol = {
1053	.handler     = gre_rcv,
1054	.err_handler = gre_err,
1055};
1056
1057static int __net_init ipgre_init_net(struct net *net)
1058{
1059	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1060}
1061
1062static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
1063					     struct list_head *dev_to_kill)
1064{
1065	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
1066			      dev_to_kill);
1067}
1068
1069static struct pernet_operations ipgre_net_ops = {
1070	.init = ipgre_init_net,
1071	.exit_batch_rtnl = ipgre_exit_batch_rtnl,
1072	.id   = &ipgre_net_id,
1073	.size = sizeof(struct ip_tunnel_net),
1074};
1075
1076static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1077				 struct netlink_ext_ack *extack)
1078{
1079	__be16 flags;
1080
1081	if (!data)
1082		return 0;
1083
1084	flags = 0;
1085	if (data[IFLA_GRE_IFLAGS])
1086		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1087	if (data[IFLA_GRE_OFLAGS])
1088		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1089	if (flags & (GRE_VERSION|GRE_ROUTING))
1090		return -EINVAL;
1091
1092	if (data[IFLA_GRE_COLLECT_METADATA] &&
1093	    data[IFLA_GRE_ENCAP_TYPE] &&
1094	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1095		return -EINVAL;
1096
1097	return 0;
1098}
1099
1100static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1101			      struct netlink_ext_ack *extack)
1102{
1103	__be32 daddr;
1104
1105	if (tb[IFLA_ADDRESS]) {
1106		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1107			return -EINVAL;
1108		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1109			return -EADDRNOTAVAIL;
1110	}
1111
1112	if (!data)
1113		goto out;
1114
1115	if (data[IFLA_GRE_REMOTE]) {
1116		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1117		if (!daddr)
1118			return -EINVAL;
1119	}
1120
1121out:
1122	return ipgre_tunnel_validate(tb, data, extack);
1123}
1124
1125static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1126			   struct netlink_ext_ack *extack)
1127{
1128	__be16 flags = 0;
1129	int ret;
1130
1131	if (!data)
1132		return 0;
1133
1134	ret = ipgre_tap_validate(tb, data, extack);
1135	if (ret)
1136		return ret;
1137
1138	if (data[IFLA_GRE_ERSPAN_VER] &&
1139	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1140		return 0;
1141
1142	/* ERSPAN type II/III should only have GRE sequence and key flag */
1143	if (data[IFLA_GRE_OFLAGS])
1144		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1145	if (data[IFLA_GRE_IFLAGS])
1146		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1147	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1148	    flags != (GRE_SEQ | GRE_KEY))
1149		return -EINVAL;
1150
1151	/* ERSPAN Session ID only has 10-bit. Since we reuse
1152	 * 32-bit key field as ID, check it's range.
1153	 */
1154	if (data[IFLA_GRE_IKEY] &&
1155	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1156		return -EINVAL;
1157
1158	if (data[IFLA_GRE_OKEY] &&
1159	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1160		return -EINVAL;
1161
1162	return 0;
1163}
1164
1165static int ipgre_netlink_parms(struct net_device *dev,
1166				struct nlattr *data[],
1167				struct nlattr *tb[],
1168				struct ip_tunnel_parm_kern *parms,
1169				__u32 *fwmark)
1170{
1171	struct ip_tunnel *t = netdev_priv(dev);
1172
1173	memset(parms, 0, sizeof(*parms));
1174
1175	parms->iph.protocol = IPPROTO_GRE;
1176
1177	if (!data)
1178		return 0;
1179
1180	if (data[IFLA_GRE_LINK])
1181		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1182
1183	if (data[IFLA_GRE_IFLAGS])
1184		gre_flags_to_tnl_flags(parms->i_flags,
1185				       nla_get_be16(data[IFLA_GRE_IFLAGS]));
1186
1187	if (data[IFLA_GRE_OFLAGS])
1188		gre_flags_to_tnl_flags(parms->o_flags,
1189				       nla_get_be16(data[IFLA_GRE_OFLAGS]));
1190
1191	if (data[IFLA_GRE_IKEY])
1192		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1193
1194	if (data[IFLA_GRE_OKEY])
1195		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1196
1197	if (data[IFLA_GRE_LOCAL])
1198		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1199
1200	if (data[IFLA_GRE_REMOTE])
1201		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1202
1203	if (data[IFLA_GRE_TTL])
1204		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1205
1206	if (data[IFLA_GRE_TOS])
1207		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1208
1209	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1210		if (t->ignore_df)
1211			return -EINVAL;
1212		parms->iph.frag_off = htons(IP_DF);
1213	}
1214
1215	if (data[IFLA_GRE_COLLECT_METADATA]) {
1216		t->collect_md = true;
1217		if (dev->type == ARPHRD_IPGRE)
1218			dev->type = ARPHRD_NONE;
1219	}
1220
1221	if (data[IFLA_GRE_IGNORE_DF]) {
1222		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1223		  && (parms->iph.frag_off & htons(IP_DF)))
1224			return -EINVAL;
1225		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1226	}
1227
1228	if (data[IFLA_GRE_FWMARK])
1229		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1230
1231	return 0;
1232}
1233
1234static int erspan_netlink_parms(struct net_device *dev,
1235				struct nlattr *data[],
1236				struct nlattr *tb[],
1237				struct ip_tunnel_parm_kern *parms,
1238				__u32 *fwmark)
1239{
1240	struct ip_tunnel *t = netdev_priv(dev);
1241	int err;
1242
1243	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1244	if (err)
1245		return err;
1246	if (!data)
1247		return 0;
1248
1249	if (data[IFLA_GRE_ERSPAN_VER]) {
1250		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1251
1252		if (t->erspan_ver > 2)
1253			return -EINVAL;
1254	}
1255
1256	if (t->erspan_ver == 1) {
1257		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1258			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1259			if (t->index & ~INDEX_MASK)
1260				return -EINVAL;
1261		}
1262	} else if (t->erspan_ver == 2) {
1263		if (data[IFLA_GRE_ERSPAN_DIR]) {
1264			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1265			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1266				return -EINVAL;
1267		}
1268		if (data[IFLA_GRE_ERSPAN_HWID]) {
1269			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1270			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1271				return -EINVAL;
1272		}
1273	}
1274
1275	return 0;
1276}
1277
1278/* This function returns true when ENCAP attributes are present in the nl msg */
1279static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1280				      struct ip_tunnel_encap *ipencap)
1281{
1282	bool ret = false;
1283
1284	memset(ipencap, 0, sizeof(*ipencap));
1285
1286	if (!data)
1287		return ret;
1288
1289	if (data[IFLA_GRE_ENCAP_TYPE]) {
1290		ret = true;
1291		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1292	}
1293
1294	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1295		ret = true;
1296		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1297	}
1298
1299	if (data[IFLA_GRE_ENCAP_SPORT]) {
1300		ret = true;
1301		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1302	}
1303
1304	if (data[IFLA_GRE_ENCAP_DPORT]) {
1305		ret = true;
1306		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1307	}
1308
1309	return ret;
1310}
1311
1312static int gre_tap_init(struct net_device *dev)
1313{
1314	__gre_tunnel_init(dev);
1315	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1316	netif_keep_dst(dev);
1317
1318	return ip_tunnel_init(dev);
1319}
1320
1321static const struct net_device_ops gre_tap_netdev_ops = {
1322	.ndo_init		= gre_tap_init,
1323	.ndo_uninit		= ip_tunnel_uninit,
1324	.ndo_start_xmit		= gre_tap_xmit,
1325	.ndo_set_mac_address 	= eth_mac_addr,
1326	.ndo_validate_addr	= eth_validate_addr,
1327	.ndo_change_mtu		= ip_tunnel_change_mtu,
1328	.ndo_get_stats64	= dev_get_tstats64,
1329	.ndo_get_iflink		= ip_tunnel_get_iflink,
1330	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1331};
1332
1333static int erspan_tunnel_init(struct net_device *dev)
1334{
1335	struct ip_tunnel *tunnel = netdev_priv(dev);
 
1336
1337	if (tunnel->erspan_ver == 0)
1338		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1339	else
1340		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1341
1342	tunnel->parms.iph.protocol = IPPROTO_GRE;
1343	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1344		       erspan_hdr_len(tunnel->erspan_ver);
 
1345
1346	dev->features		|= GRE_FEATURES;
1347	dev->hw_features	|= GRE_FEATURES;
1348	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1349	netif_keep_dst(dev);
1350
1351	return ip_tunnel_init(dev);
1352}
1353
1354static const struct net_device_ops erspan_netdev_ops = {
1355	.ndo_init		= erspan_tunnel_init,
1356	.ndo_uninit		= ip_tunnel_uninit,
1357	.ndo_start_xmit		= erspan_xmit,
1358	.ndo_set_mac_address	= eth_mac_addr,
1359	.ndo_validate_addr	= eth_validate_addr,
1360	.ndo_change_mtu		= ip_tunnel_change_mtu,
1361	.ndo_get_stats64	= dev_get_tstats64,
1362	.ndo_get_iflink		= ip_tunnel_get_iflink,
1363	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1364};
1365
1366static void ipgre_tap_setup(struct net_device *dev)
1367{
1368	ether_setup(dev);
1369	dev->max_mtu = 0;
1370	dev->netdev_ops	= &gre_tap_netdev_ops;
1371	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1372	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1373	ip_tunnel_setup(dev, gre_tap_net_id);
1374}
1375
1376static int
1377ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1378{
1379	struct ip_tunnel_encap ipencap;
1380
1381	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1382		struct ip_tunnel *t = netdev_priv(dev);
1383		int err = ip_tunnel_encap_setup(t, &ipencap);
1384
1385		if (err < 0)
1386			return err;
1387	}
1388
1389	return 0;
1390}
 
1391
1392static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1393			 struct nlattr *tb[], struct nlattr *data[],
1394			 struct netlink_ext_ack *extack)
1395{
1396	struct ip_tunnel_parm_kern p;
 
1397	__u32 fwmark = 0;
1398	int err;
1399
1400	err = ipgre_newlink_encap_setup(dev, data);
1401	if (err)
1402		return err;
 
 
 
 
1403
1404	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1405	if (err < 0)
1406		return err;
1407	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1408}
1409
1410static int erspan_newlink(struct net *src_net, struct net_device *dev,
1411			  struct nlattr *tb[], struct nlattr *data[],
1412			  struct netlink_ext_ack *extack)
1413{
1414	struct ip_tunnel_parm_kern p;
1415	__u32 fwmark = 0;
1416	int err;
1417
1418	err = ipgre_newlink_encap_setup(dev, data);
1419	if (err)
1420		return err;
1421
1422	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1423	if (err)
1424		return err;
1425	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1426}
1427
1428static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1429			    struct nlattr *data[],
1430			    struct netlink_ext_ack *extack)
1431{
1432	struct ip_tunnel *t = netdev_priv(dev);
1433	struct ip_tunnel_parm_kern p;
1434	__u32 fwmark = t->fwmark;
 
1435	int err;
1436
1437	err = ipgre_newlink_encap_setup(dev, data);
1438	if (err)
1439		return err;
 
 
 
1440
1441	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1442	if (err < 0)
1443		return err;
1444
1445	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1446	if (err < 0)
1447		return err;
1448
1449	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1450	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1451
1452	ipgre_link_update(dev, !tb[IFLA_MTU]);
1453
1454	return 0;
1455}
1456
1457static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1458			     struct nlattr *data[],
1459			     struct netlink_ext_ack *extack)
1460{
1461	struct ip_tunnel *t = netdev_priv(dev);
1462	struct ip_tunnel_parm_kern p;
1463	__u32 fwmark = t->fwmark;
1464	int err;
1465
1466	err = ipgre_newlink_encap_setup(dev, data);
1467	if (err)
1468		return err;
1469
1470	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1471	if (err < 0)
1472		return err;
1473
1474	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1475	if (err < 0)
1476		return err;
1477
1478	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1479	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1480
1481	return 0;
1482}
1483
1484static size_t ipgre_get_size(const struct net_device *dev)
1485{
1486	return
1487		/* IFLA_GRE_LINK */
1488		nla_total_size(4) +
1489		/* IFLA_GRE_IFLAGS */
1490		nla_total_size(2) +
1491		/* IFLA_GRE_OFLAGS */
1492		nla_total_size(2) +
1493		/* IFLA_GRE_IKEY */
1494		nla_total_size(4) +
1495		/* IFLA_GRE_OKEY */
1496		nla_total_size(4) +
1497		/* IFLA_GRE_LOCAL */
1498		nla_total_size(4) +
1499		/* IFLA_GRE_REMOTE */
1500		nla_total_size(4) +
1501		/* IFLA_GRE_TTL */
1502		nla_total_size(1) +
1503		/* IFLA_GRE_TOS */
1504		nla_total_size(1) +
1505		/* IFLA_GRE_PMTUDISC */
1506		nla_total_size(1) +
1507		/* IFLA_GRE_ENCAP_TYPE */
1508		nla_total_size(2) +
1509		/* IFLA_GRE_ENCAP_FLAGS */
1510		nla_total_size(2) +
1511		/* IFLA_GRE_ENCAP_SPORT */
1512		nla_total_size(2) +
1513		/* IFLA_GRE_ENCAP_DPORT */
1514		nla_total_size(2) +
1515		/* IFLA_GRE_COLLECT_METADATA */
1516		nla_total_size(0) +
1517		/* IFLA_GRE_IGNORE_DF */
1518		nla_total_size(1) +
1519		/* IFLA_GRE_FWMARK */
1520		nla_total_size(4) +
1521		/* IFLA_GRE_ERSPAN_INDEX */
1522		nla_total_size(4) +
1523		/* IFLA_GRE_ERSPAN_VER */
1524		nla_total_size(1) +
1525		/* IFLA_GRE_ERSPAN_DIR */
1526		nla_total_size(1) +
1527		/* IFLA_GRE_ERSPAN_HWID */
1528		nla_total_size(2) +
1529		0;
1530}
1531
1532static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1533{
1534	struct ip_tunnel *t = netdev_priv(dev);
1535	struct ip_tunnel_parm_kern *p = &t->parms;
1536	IP_TUNNEL_DECLARE_FLAGS(o_flags);
1537
1538	ip_tunnel_flags_copy(o_flags, p->o_flags);
1539
1540	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1541	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1542			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1543	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1544			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1545	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1546	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1547	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1548	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1549	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1550	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1551	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1552		       !!(p->iph.frag_off & htons(IP_DF))) ||
1553	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1554		goto nla_put_failure;
1555
1556	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1557			t->encap.type) ||
1558	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1559			 t->encap.sport) ||
1560	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1561			 t->encap.dport) ||
1562	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1563			t->encap.flags))
1564		goto nla_put_failure;
1565
1566	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1567		goto nla_put_failure;
1568
1569	if (t->collect_md) {
1570		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1571			goto nla_put_failure;
1572	}
1573
1574	return 0;
 
1575
1576nla_put_failure:
1577	return -EMSGSIZE;
1578}
1579
1580static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1581{
1582	struct ip_tunnel *t = netdev_priv(dev);
1583
1584	if (t->erspan_ver <= 2) {
1585		if (t->erspan_ver != 0 && !t->collect_md)
1586			__set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
1587
1588		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1589			goto nla_put_failure;
1590
1591		if (t->erspan_ver == 1) {
1592			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1593				goto nla_put_failure;
1594		} else if (t->erspan_ver == 2) {
1595			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1596				goto nla_put_failure;
1597			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1598				goto nla_put_failure;
1599		}
1600	}
1601
1602	return ipgre_fill_info(skb, dev);
1603
1604nla_put_failure:
1605	return -EMSGSIZE;
1606}
1607
1608static void erspan_setup(struct net_device *dev)
1609{
1610	struct ip_tunnel *t = netdev_priv(dev);
1611
1612	ether_setup(dev);
1613	dev->max_mtu = 0;
1614	dev->netdev_ops = &erspan_netdev_ops;
1615	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1616	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1617	ip_tunnel_setup(dev, erspan_net_id);
1618	t->erspan_ver = 1;
1619}
1620
1621static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1622	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1623	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1624	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1625	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1626	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1627	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1628	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1629	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1630	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1631	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1632	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1633	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1634	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1635	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1636	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1637	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1638	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1639	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1640	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1641	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1642	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1643};
1644
1645static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1646	.kind		= "gre",
1647	.maxtype	= IFLA_GRE_MAX,
1648	.policy		= ipgre_policy,
1649	.priv_size	= sizeof(struct ip_tunnel),
1650	.setup		= ipgre_tunnel_setup,
1651	.validate	= ipgre_tunnel_validate,
1652	.newlink	= ipgre_newlink,
1653	.changelink	= ipgre_changelink,
1654	.dellink	= ip_tunnel_dellink,
1655	.get_size	= ipgre_get_size,
1656	.fill_info	= ipgre_fill_info,
1657	.get_link_net	= ip_tunnel_get_link_net,
1658};
1659
1660static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1661	.kind		= "gretap",
1662	.maxtype	= IFLA_GRE_MAX,
1663	.policy		= ipgre_policy,
1664	.priv_size	= sizeof(struct ip_tunnel),
1665	.setup		= ipgre_tap_setup,
1666	.validate	= ipgre_tap_validate,
1667	.newlink	= ipgre_newlink,
1668	.changelink	= ipgre_changelink,
1669	.dellink	= ip_tunnel_dellink,
1670	.get_size	= ipgre_get_size,
1671	.fill_info	= ipgre_fill_info,
1672	.get_link_net	= ip_tunnel_get_link_net,
1673};
1674
1675static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1676	.kind		= "erspan",
1677	.maxtype	= IFLA_GRE_MAX,
1678	.policy		= ipgre_policy,
1679	.priv_size	= sizeof(struct ip_tunnel),
1680	.setup		= erspan_setup,
1681	.validate	= erspan_validate,
1682	.newlink	= erspan_newlink,
1683	.changelink	= erspan_changelink,
1684	.dellink	= ip_tunnel_dellink,
1685	.get_size	= ipgre_get_size,
1686	.fill_info	= erspan_fill_info,
1687	.get_link_net	= ip_tunnel_get_link_net,
1688};
1689
1690struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1691					u8 name_assign_type)
1692{
1693	struct nlattr *tb[IFLA_MAX + 1];
1694	struct net_device *dev;
1695	LIST_HEAD(list_kill);
1696	struct ip_tunnel *t;
1697	int err;
1698
1699	memset(&tb, 0, sizeof(tb));
1700
1701	dev = rtnl_create_link(net, name, name_assign_type,
1702			       &ipgre_tap_ops, tb, NULL);
1703	if (IS_ERR(dev))
1704		return dev;
1705
1706	/* Configure flow based GRE device. */
1707	t = netdev_priv(dev);
1708	t->collect_md = true;
1709
1710	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1711	if (err < 0) {
1712		free_netdev(dev);
1713		return ERR_PTR(err);
1714	}
1715
1716	/* openvswitch users expect packet sizes to be unrestricted,
1717	 * so set the largest MTU we can.
1718	 */
1719	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1720	if (err)
1721		goto out;
1722
1723	err = rtnl_configure_link(dev, NULL, 0, NULL);
1724	if (err < 0)
1725		goto out;
1726
1727	return dev;
1728out:
1729	ip_tunnel_dellink(dev, &list_kill);
1730	unregister_netdevice_many(&list_kill);
1731	return ERR_PTR(err);
1732}
1733EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1734
1735static int __net_init ipgre_tap_init_net(struct net *net)
1736{
1737	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1738}
1739
1740static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
1741						 struct list_head *dev_to_kill)
1742{
1743	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
1744			      dev_to_kill);
1745}
1746
1747static struct pernet_operations ipgre_tap_net_ops = {
1748	.init = ipgre_tap_init_net,
1749	.exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
1750	.id   = &gre_tap_net_id,
1751	.size = sizeof(struct ip_tunnel_net),
1752};
1753
1754static int __net_init erspan_init_net(struct net *net)
1755{
1756	return ip_tunnel_init_net(net, erspan_net_id,
1757				  &erspan_link_ops, "erspan0");
1758}
1759
1760static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
1761					      struct list_head *dev_to_kill)
1762{
1763	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
1764			      dev_to_kill);
1765}
1766
1767static struct pernet_operations erspan_net_ops = {
1768	.init = erspan_init_net,
1769	.exit_batch_rtnl = erspan_exit_batch_rtnl,
1770	.id   = &erspan_net_id,
1771	.size = sizeof(struct ip_tunnel_net),
1772};
1773
1774static int __init ipgre_init(void)
1775{
1776	int err;
1777
1778	pr_info("GRE over IPv4 tunneling driver\n");
1779
1780	err = register_pernet_device(&ipgre_net_ops);
1781	if (err < 0)
1782		return err;
1783
1784	err = register_pernet_device(&ipgre_tap_net_ops);
1785	if (err < 0)
1786		goto pnet_tap_failed;
1787
1788	err = register_pernet_device(&erspan_net_ops);
1789	if (err < 0)
1790		goto pnet_erspan_failed;
1791
1792	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1793	if (err < 0) {
1794		pr_info("%s: can't add protocol\n", __func__);
1795		goto add_proto_failed;
1796	}
1797
1798	err = rtnl_link_register(&ipgre_link_ops);
1799	if (err < 0)
1800		goto rtnl_link_failed;
1801
1802	err = rtnl_link_register(&ipgre_tap_ops);
1803	if (err < 0)
1804		goto tap_ops_failed;
1805
1806	err = rtnl_link_register(&erspan_link_ops);
1807	if (err < 0)
1808		goto erspan_link_failed;
1809
1810	return 0;
1811
1812erspan_link_failed:
1813	rtnl_link_unregister(&ipgre_tap_ops);
1814tap_ops_failed:
1815	rtnl_link_unregister(&ipgre_link_ops);
1816rtnl_link_failed:
1817	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1818add_proto_failed:
1819	unregister_pernet_device(&erspan_net_ops);
1820pnet_erspan_failed:
1821	unregister_pernet_device(&ipgre_tap_net_ops);
1822pnet_tap_failed:
1823	unregister_pernet_device(&ipgre_net_ops);
1824	return err;
1825}
1826
1827static void __exit ipgre_fini(void)
1828{
1829	rtnl_link_unregister(&ipgre_tap_ops);
1830	rtnl_link_unregister(&ipgre_link_ops);
1831	rtnl_link_unregister(&erspan_link_ops);
1832	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1833	unregister_pernet_device(&ipgre_tap_net_ops);
1834	unregister_pernet_device(&ipgre_net_ops);
1835	unregister_pernet_device(&erspan_net_ops);
1836}
1837
1838module_init(ipgre_init);
1839module_exit(ipgre_fini);
1840MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1841MODULE_LICENSE("GPL");
1842MODULE_ALIAS_RTNL_LINK("gre");
1843MODULE_ALIAS_RTNL_LINK("gretap");
1844MODULE_ALIAS_RTNL_LINK("erspan");
1845MODULE_ALIAS_NETDEV("gre0");
1846MODULE_ALIAS_NETDEV("gretap0");
1847MODULE_ALIAS_NETDEV("erspan0");