Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *	Linux NET3:	GRE over IP protocol decoder.
   3 *
   4 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
   5 *
   6 *	This program is free software; you can redistribute it and/or
   7 *	modify it under the terms of the GNU General Public License
   8 *	as published by the Free Software Foundation; either version
   9 *	2 of the License, or (at your option) any later version.
  10 *
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/capability.h>
  16#include <linux/module.h>
  17#include <linux/types.h>
  18#include <linux/kernel.h>
  19#include <linux/slab.h>
  20#include <asm/uaccess.h>
  21#include <linux/skbuff.h>
  22#include <linux/netdevice.h>
  23#include <linux/in.h>
  24#include <linux/tcp.h>
  25#include <linux/udp.h>
  26#include <linux/if_arp.h>
  27#include <linux/if_vlan.h>
  28#include <linux/init.h>
  29#include <linux/in6.h>
  30#include <linux/inetdevice.h>
  31#include <linux/igmp.h>
  32#include <linux/netfilter_ipv4.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_ether.h>
  35
  36#include <net/sock.h>
  37#include <net/ip.h>
  38#include <net/icmp.h>
  39#include <net/protocol.h>
  40#include <net/ip_tunnels.h>
  41#include <net/arp.h>
  42#include <net/checksum.h>
  43#include <net/dsfield.h>
  44#include <net/inet_ecn.h>
  45#include <net/xfrm.h>
  46#include <net/net_namespace.h>
  47#include <net/netns/generic.h>
  48#include <net/rtnetlink.h>
  49#include <net/gre.h>
  50#include <net/dst_metadata.h>
  51
  52#if IS_ENABLED(CONFIG_IPV6)
  53#include <net/ipv6.h>
  54#include <net/ip6_fib.h>
  55#include <net/ip6_route.h>
  56#endif
  57
  58/*
  59   Problems & solutions
  60   --------------------
  61
  62   1. The most important issue is detecting local dead loops.
  63   They would cause complete host lockup in transmit, which
  64   would be "resolved" by stack overflow or, if queueing is enabled,
  65   with infinite looping in net_bh.
  66
  67   We cannot track such dead loops during route installation,
  68   it is infeasible task. The most general solutions would be
  69   to keep skb->encapsulation counter (sort of local ttl),
  70   and silently drop packet when it expires. It is a good
  71   solution, but it supposes maintaining new variable in ALL
  72   skb, even if no tunneling is used.
  73
  74   Current solution: xmit_recursion breaks dead loops. This is a percpu
  75   counter, since when we enter the first ndo_xmit(), cpu migration is
  76   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  77
  78   2. Networking dead loops would not kill routers, but would really
  79   kill network. IP hop limit plays role of "t->recursion" in this case,
  80   if we copy it from packet being encapsulated to upper header.
  81   It is very good solution, but it introduces two problems:
  82
  83   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  84     do not work over tunnels.
  85   - traceroute does not work. I planned to relay ICMP from tunnel,
  86     so that this problem would be solved and traceroute output
  87     would even more informative. This idea appeared to be wrong:
  88     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  89     true router now :-)), all routers (at least, in neighbourhood of mine)
  90     return only 8 bytes of payload. It is the end.
  91
  92   Hence, if we want that OSPF worked or traceroute said something reasonable,
  93   we should search for another solution.
  94
  95   One of them is to parse packet trying to detect inner encapsulation
  96   made by our node. It is difficult or even impossible, especially,
  97   taking into account fragmentation. TO be short, ttl is not solution at all.
  98
  99   Current solution: The solution was UNEXPECTEDLY SIMPLE.
 100   We force DF flag on tunnels with preconfigured hop limit,
 101   that is ALL. :-) Well, it does not remove the problem completely,
 102   but exponential growth of network traffic is changed to linear
 103   (branches, that exceed pmtu are pruned) and tunnel mtu
 104   rapidly degrades to value <68, where looping stops.
 105   Yes, it is not good if there exists a router in the loop,
 106   which does not force DF, even when encapsulating packets have DF set.
 107   But it is not our problem! Nobody could accuse us, we made
 108   all that we could make. Even if it is your gated who injected
 109   fatal route to network, even if it were you who configured
 110   fatal static route: you are innocent. :-)
 111
 112   Alexey Kuznetsov.
 113 */
 114
 115static bool log_ecn_error = true;
 116module_param(log_ecn_error, bool, 0644);
 117MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 118
 119static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 120static int ipgre_tunnel_init(struct net_device *dev);
 121
 122static int ipgre_net_id __read_mostly;
 123static int gre_tap_net_id __read_mostly;
 124
 125static int ip_gre_calc_hlen(__be16 o_flags)
 126{
 127	int addend = 4;
 128
 129	if (o_flags & TUNNEL_CSUM)
 130		addend += 4;
 131	if (o_flags & TUNNEL_KEY)
 132		addend += 4;
 133	if (o_flags & TUNNEL_SEQ)
 134		addend += 4;
 135	return addend;
 136}
 137
 138static __be16 gre_flags_to_tnl_flags(__be16 flags)
 139{
 140	__be16 tflags = 0;
 141
 142	if (flags & GRE_CSUM)
 143		tflags |= TUNNEL_CSUM;
 144	if (flags & GRE_ROUTING)
 145		tflags |= TUNNEL_ROUTING;
 146	if (flags & GRE_KEY)
 147		tflags |= TUNNEL_KEY;
 148	if (flags & GRE_SEQ)
 149		tflags |= TUNNEL_SEQ;
 150	if (flags & GRE_STRICT)
 151		tflags |= TUNNEL_STRICT;
 152	if (flags & GRE_REC)
 153		tflags |= TUNNEL_REC;
 154	if (flags & GRE_VERSION)
 155		tflags |= TUNNEL_VERSION;
 156
 157	return tflags;
 158}
 159
 160static __be16 tnl_flags_to_gre_flags(__be16 tflags)
 161{
 162	__be16 flags = 0;
 163
 164	if (tflags & TUNNEL_CSUM)
 165		flags |= GRE_CSUM;
 166	if (tflags & TUNNEL_ROUTING)
 167		flags |= GRE_ROUTING;
 168	if (tflags & TUNNEL_KEY)
 169		flags |= GRE_KEY;
 170	if (tflags & TUNNEL_SEQ)
 171		flags |= GRE_SEQ;
 172	if (tflags & TUNNEL_STRICT)
 173		flags |= GRE_STRICT;
 174	if (tflags & TUNNEL_REC)
 175		flags |= GRE_REC;
 176	if (tflags & TUNNEL_VERSION)
 177		flags |= GRE_VERSION;
 178
 179	return flags;
 180}
 181
 182/* Fills in tpi and returns header length to be pulled. */
 183static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 184			    bool *csum_err)
 185{
 186	const struct gre_base_hdr *greh;
 187	__be32 *options;
 188	int hdr_len;
 189
 190	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
 191		return -EINVAL;
 192
 193	greh = (struct gre_base_hdr *)skb_transport_header(skb);
 194	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
 195		return -EINVAL;
 196
 197	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
 198	hdr_len = ip_gre_calc_hlen(tpi->flags);
 199
 200	if (!pskb_may_pull(skb, hdr_len))
 201		return -EINVAL;
 202
 203	greh = (struct gre_base_hdr *)skb_transport_header(skb);
 204	tpi->proto = greh->protocol;
 205
 206	options = (__be32 *)(greh + 1);
 207	if (greh->flags & GRE_CSUM) {
 208		if (skb_checksum_simple_validate(skb)) {
 209			*csum_err = true;
 210			return -EINVAL;
 211		}
 212
 213		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
 214					 null_compute_pseudo);
 215		options++;
 216	}
 217
 218	if (greh->flags & GRE_KEY) {
 219		tpi->key = *options;
 220		options++;
 221	} else {
 222		tpi->key = 0;
 223	}
 224	if (unlikely(greh->flags & GRE_SEQ)) {
 225		tpi->seq = *options;
 226		options++;
 227	} else {
 228		tpi->seq = 0;
 229	}
 230	/* WCCP version 1 and 2 protocol decoding.
 231	 * - Change protocol to IP
 232	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
 233	 */
 234	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
 235		tpi->proto = htons(ETH_P_IP);
 236		if ((*(u8 *)options & 0xF0) != 0x40) {
 237			hdr_len += 4;
 238			if (!pskb_may_pull(skb, hdr_len))
 239				return -EINVAL;
 240		}
 241	}
 242	return hdr_len;
 243}
 244
 245static void ipgre_err(struct sk_buff *skb, u32 info,
 246		      const struct tnl_ptk_info *tpi)
 247{
 248
 249	/* All the routers (except for Linux) return only
 250	   8 bytes of packet payload. It means, that precise relaying of
 251	   ICMP in the real Internet is absolutely infeasible.
 252
 253	   Moreover, Cisco "wise men" put GRE key to the third word
 254	   in GRE header. It makes impossible maintaining even soft
 255	   state for keyed GRE tunnels with enabled checksum. Tell
 256	   them "thank you".
 257
 258	   Well, I wonder, rfc1812 was written by Cisco employee,
 259	   what the hell these idiots break standards established
 260	   by themselves???
 261	   */
 262	struct net *net = dev_net(skb->dev);
 263	struct ip_tunnel_net *itn;
 264	const struct iphdr *iph;
 265	const int type = icmp_hdr(skb)->type;
 266	const int code = icmp_hdr(skb)->code;
 
 267	struct ip_tunnel *t;
 268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 269	switch (type) {
 270	default:
 271	case ICMP_PARAMETERPROB:
 272		return;
 273
 274	case ICMP_DEST_UNREACH:
 275		switch (code) {
 276		case ICMP_SR_FAILED:
 277		case ICMP_PORT_UNREACH:
 278			/* Impossible event. */
 279			return;
 280		default:
 281			/* All others are translated to HOST_UNREACH.
 282			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 283			   I believe they are just ether pollution. --ANK
 284			 */
 285			break;
 286		}
 287		break;
 288
 289	case ICMP_TIME_EXCEEDED:
 290		if (code != ICMP_EXC_TTL)
 291			return;
 
 292		break;
 293
 294	case ICMP_REDIRECT:
 295		break;
 296	}
 297
 298	if (tpi->proto == htons(ETH_P_TEB))
 299		itn = net_generic(net, gre_tap_net_id);
 300	else
 301		itn = net_generic(net, ipgre_net_id);
 302
 303	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 304	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 305			     iph->daddr, iph->saddr, tpi->key);
 306
 307	if (!t)
 308		return;
 309
 310	if (t->parms.iph.daddr == 0 ||
 311	    ipv4_is_multicast(t->parms.iph.daddr))
 312		return;
 313
 314	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 315		return;
 316
 317	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 318		t->err_count++;
 319	else
 320		t->err_count = 1;
 321	t->err_time = jiffies;
 
 
 322}
 323
 324static void gre_err(struct sk_buff *skb, u32 info)
 325{
 326	/* All the routers (except for Linux) return only
 327	 * 8 bytes of packet payload. It means, that precise relaying of
 328	 * ICMP in the real Internet is absolutely infeasible.
 329	 *
 330	 * Moreover, Cisco "wise men" put GRE key to the third word
 331	 * in GRE header. It makes impossible maintaining even soft
 332	 * state for keyed
 333	 * GRE tunnels with enabled checksum. Tell them "thank you".
 334	 *
 335	 * Well, I wonder, rfc1812 was written by Cisco employee,
 336	 * what the hell these idiots break standards established
 337	 * by themselves???
 338	 */
 339
 
 340	const int type = icmp_hdr(skb)->type;
 341	const int code = icmp_hdr(skb)->code;
 342	struct tnl_ptk_info tpi;
 343	bool csum_err = false;
 344
 345	if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
 346		if (!csum_err)		/* ignore csum errors. */
 347			return;
 348	}
 349
 350	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 351		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 352				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
 353		return;
 354	}
 355	if (type == ICMP_REDIRECT) {
 356		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
 357			      IPPROTO_GRE, 0);
 358		return;
 359	}
 360
 361	ipgre_err(skb, info, &tpi);
 362}
 363
 364static __be64 key_to_tunnel_id(__be32 key)
 365{
 366#ifdef __BIG_ENDIAN
 367	return (__force __be64)((__force u32)key);
 368#else
 369	return (__force __be64)((__force u64)key << 32);
 370#endif
 371}
 372
 373/* Returns the least-significant 32 bits of a __be64. */
 374static __be32 tunnel_id_to_key(__be64 x)
 375{
 376#ifdef __BIG_ENDIAN
 377	return (__force __be32)x;
 378#else
 379	return (__force __be32)((__force u64)x >> 32);
 380#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381}
 382
 383static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 
 384{
 385	struct net *net = dev_net(skb->dev);
 386	struct metadata_dst *tun_dst = NULL;
 387	struct ip_tunnel_net *itn;
 388	const struct iphdr *iph;
 389	struct ip_tunnel *tunnel;
 390
 391	if (tpi->proto == htons(ETH_P_TEB))
 392		itn = net_generic(net, gre_tap_net_id);
 393	else
 394		itn = net_generic(net, ipgre_net_id);
 395
 396	iph = ip_hdr(skb);
 397	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 398				  iph->saddr, iph->daddr, tpi->key);
 399
 400	if (tunnel) {
 401		if (tunnel->dev->type != ARPHRD_NONE)
 
 
 
 
 
 
 
 
 
 402			skb_pop_mac_header(skb);
 403		else
 404			skb_reset_mac_header(skb);
 405		if (tunnel->collect_md) {
 
 
 406			__be16 flags;
 407			__be64 tun_id;
 408
 409			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
 410			tun_id = key_to_tunnel_id(tpi->key);
 411			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 412			if (!tun_dst)
 413				return PACKET_REJECT;
 414		}
 415
 416		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 417		return PACKET_RCVD;
 418	}
 419	return PACKET_REJECT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420}
 421
 422static int gre_rcv(struct sk_buff *skb)
 423{
 424	struct tnl_ptk_info tpi;
 425	bool csum_err = false;
 426	int hdr_len;
 427
 428#ifdef CONFIG_NET_IPGRE_BROADCAST
 429	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 430		/* Looped back packet, drop it! */
 431		if (rt_is_output_route(skb_rtable(skb)))
 432			goto drop;
 433	}
 434#endif
 435
 436	hdr_len = parse_gre_header(skb, &tpi, &csum_err);
 437	if (hdr_len < 0)
 438		goto drop;
 439	if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false) < 0)
 440		goto drop;
 441
 442	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
 
 
 
 
 
 
 
 443		return 0;
 444
 
 445	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 446drop:
 447	kfree_skb(skb);
 448	return 0;
 449}
 450
 451static __sum16 gre_checksum(struct sk_buff *skb)
 452{
 453	__wsum csum;
 454
 455	if (skb->ip_summed == CHECKSUM_PARTIAL)
 456		csum = lco_csum(skb);
 457	else
 458		csum = skb_checksum(skb, 0, skb->len, 0);
 459	return csum_fold(csum);
 460}
 461
 462static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
 463			 __be16 proto, __be32 key, __be32 seq)
 464{
 465	struct gre_base_hdr *greh;
 466
 467	skb_push(skb, hdr_len);
 468
 469	skb_reset_transport_header(skb);
 470	greh = (struct gre_base_hdr *)skb->data;
 471	greh->flags = tnl_flags_to_gre_flags(flags);
 472	greh->protocol = proto;
 473
 474	if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
 475		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
 476
 477		if (flags & TUNNEL_SEQ) {
 478			*ptr = seq;
 479			ptr--;
 480		}
 481		if (flags & TUNNEL_KEY) {
 482			*ptr = key;
 483			ptr--;
 484		}
 485		if (flags & TUNNEL_CSUM &&
 486		    !(skb_shinfo(skb)->gso_type &
 487		      (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
 488			*ptr = 0;
 489			*(__sum16 *)ptr = gre_checksum(skb);
 490		}
 491	}
 492}
 493
 494static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 495		       const struct iphdr *tnl_params,
 496		       __be16 proto)
 497{
 498	struct ip_tunnel *tunnel = netdev_priv(dev);
 499
 500	if (tunnel->parms.o_flags & TUNNEL_SEQ)
 501		tunnel->o_seqno++;
 502
 503	/* Push GRE header. */
 504	build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
 505		     proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
 506
 507	skb_set_inner_protocol(skb, proto);
 508	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 509}
 510
 511static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
 512					   bool csum)
 513{
 514	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 515}
 516
 517static struct rtable *gre_get_rt(struct sk_buff *skb,
 518				 struct net_device *dev,
 519				 struct flowi4 *fl,
 520				 const struct ip_tunnel_key *key)
 521{
 522	struct net *net = dev_net(dev);
 523
 524	memset(fl, 0, sizeof(*fl));
 525	fl->daddr = key->u.ipv4.dst;
 526	fl->saddr = key->u.ipv4.src;
 527	fl->flowi4_tos = RT_TOS(key->tos);
 528	fl->flowi4_mark = skb->mark;
 529	fl->flowi4_proto = IPPROTO_GRE;
 530
 531	return ip_route_output_key(net, fl);
 532}
 533
 534static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 535			__be16 proto)
 536{
 
 537	struct ip_tunnel_info *tun_info;
 538	const struct ip_tunnel_key *key;
 539	struct rtable *rt = NULL;
 540	struct flowi4 fl;
 541	int min_headroom;
 542	int tunnel_hlen;
 543	__be16 df, flags;
 544	bool use_cache;
 545	int err;
 546
 547	tun_info = skb_tunnel_info(skb);
 548	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 549		     ip_tunnel_info_af(tun_info) != AF_INET))
 550		goto err_free_skb;
 551
 552	key = &tun_info->key;
 553	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
 554	if (use_cache)
 555		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
 556	if (!rt) {
 557		rt = gre_get_rt(skb, dev, &fl, key);
 558		if (IS_ERR(rt))
 559				goto err_free_skb;
 560		if (use_cache)
 561			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
 562					  fl.saddr);
 563	}
 564
 565	tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
 566
 567	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
 568			+ tunnel_hlen + sizeof(struct iphdr);
 569	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
 570		int head_delta = SKB_DATA_ALIGN(min_headroom -
 571						skb_headroom(skb) +
 572						16);
 573		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
 574				       0, GFP_ATOMIC);
 575		if (unlikely(err))
 576			goto err_free_rt;
 577	}
 578
 579	/* Push Tunnel header. */
 580	skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
 581	if (IS_ERR(skb)) {
 582		skb = NULL;
 583		goto err_free_rt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584	}
 585
 586	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
 587	build_header(skb, tunnel_hlen, flags, proto,
 588		     tunnel_id_to_key(tun_info->key.tun_id), 0);
 589
 590	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
 591
 592	iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
 593		      key->tos, key->ttl, df, false);
 594	return;
 595
 596err_free_rt:
 597	ip_rt_put(rt);
 598err_free_skb:
 599	kfree_skb(skb);
 600	dev->stats.tx_dropped++;
 601}
 602
 603static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 604{
 605	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
 606	struct rtable *rt;
 607	struct flowi4 fl4;
 608
 609	if (ip_tunnel_info_af(info) != AF_INET)
 610		return -EINVAL;
 611
 612	rt = gre_get_rt(skb, dev, &fl4, &info->key);
 
 
 
 
 
 613	if (IS_ERR(rt))
 614		return PTR_ERR(rt);
 615
 616	ip_rt_put(rt);
 617	info->key.u.ipv4.src = fl4.saddr;
 618	return 0;
 619}
 620
 621static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 622			      struct net_device *dev)
 623{
 624	struct ip_tunnel *tunnel = netdev_priv(dev);
 625	const struct iphdr *tnl_params;
 626
 
 
 
 627	if (tunnel->collect_md) {
 628		gre_fb_xmit(skb, dev, skb->protocol);
 629		return NETDEV_TX_OK;
 630	}
 631
 632	if (dev->header_ops) {
 633		/* Need space for new headers */
 634		if (skb_cow_head(skb, dev->needed_headroom -
 635				      (tunnel->hlen + sizeof(struct iphdr))))
 636			goto free_skb;
 637
 638		tnl_params = (const struct iphdr *)skb->data;
 639
 640		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
 641		 * to gre header.
 642		 */
 643		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
 
 644		skb_reset_mac_header(skb);
 
 
 
 
 645	} else {
 646		if (skb_cow_head(skb, dev->needed_headroom))
 647			goto free_skb;
 648
 649		tnl_params = &tunnel->parms.iph;
 650	}
 651
 652	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
 653	if (IS_ERR(skb))
 654		goto out;
 655
 656	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 657	return NETDEV_TX_OK;
 658
 659free_skb:
 660	kfree_skb(skb);
 661out:
 662	dev->stats.tx_dropped++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 663	return NETDEV_TX_OK;
 664}
 665
 666static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 667				struct net_device *dev)
 668{
 669	struct ip_tunnel *tunnel = netdev_priv(dev);
 670
 
 
 
 671	if (tunnel->collect_md) {
 672		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 673		return NETDEV_TX_OK;
 674	}
 675
 676	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
 677	if (IS_ERR(skb))
 678		goto out;
 679
 680	if (skb_cow_head(skb, dev->needed_headroom))
 681		goto free_skb;
 682
 683	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 684	return NETDEV_TX_OK;
 685
 686free_skb:
 687	kfree_skb(skb);
 688out:
 689	dev->stats.tx_dropped++;
 690	return NETDEV_TX_OK;
 691}
 692
 693static int ipgre_tunnel_ioctl(struct net_device *dev,
 694			      struct ifreq *ifr, int cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695{
 696	int err;
 697	struct ip_tunnel_parm p;
 698
 699	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 700		return -EFAULT;
 701	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 702		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
 703		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
 704		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
 705			return -EINVAL;
 706	}
 707	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
 708	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
 709
 710	err = ip_tunnel_ioctl(dev, &p, cmd);
 
 
 
 711	if (err)
 712		return err;
 713
 714	p.i_flags = tnl_flags_to_gre_flags(p.i_flags);
 715	p.o_flags = tnl_flags_to_gre_flags(p.o_flags);
 
 
 
 
 
 
 
 716
 717	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
 718		return -EFAULT;
 719	return 0;
 720}
 721
 722/* Nice toy. Unfortunately, useless in real life :-)
 723   It allows to construct virtual multiprotocol broadcast "LAN"
 724   over the Internet, provided multicast routing is tuned.
 725
 726
 727   I have no idea was this bicycle invented before me,
 728   so that I had to set ARPHRD_IPGRE to a random value.
 729   I have an impression, that Cisco could make something similar,
 730   but this feature is apparently missing in IOS<=11.2(8).
 731
 732   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 733   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 734
 735   ping -t 255 224.66.66.66
 736
 737   If nobody answers, mbone does not work.
 738
 739   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 740   ip addr add 10.66.66.<somewhat>/24 dev Universe
 741   ifconfig Universe up
 742   ifconfig Universe add fe80::<Your_real_addr>/10
 743   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 744   ftp 10.66.66.66
 745   ...
 746   ftp fec0:6666:6666::193.233.7.65
 747   ...
 748 */
 749static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 750			unsigned short type,
 751			const void *daddr, const void *saddr, unsigned int len)
 752{
 753	struct ip_tunnel *t = netdev_priv(dev);
 754	struct iphdr *iph;
 755	struct gre_base_hdr *greh;
 756
 757	iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
 758	greh = (struct gre_base_hdr *)(iph+1);
 759	greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags);
 760	greh->protocol = htons(type);
 761
 762	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 763
 764	/* Set the source hardware address. */
 765	if (saddr)
 766		memcpy(&iph->saddr, saddr, 4);
 767	if (daddr)
 768		memcpy(&iph->daddr, daddr, 4);
 769	if (iph->daddr)
 770		return t->hlen + sizeof(*iph);
 771
 772	return -(t->hlen + sizeof(*iph));
 773}
 774
 775static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 776{
 777	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 778	memcpy(haddr, &iph->saddr, 4);
 779	return 4;
 780}
 781
 782static const struct header_ops ipgre_header_ops = {
 783	.create	= ipgre_header,
 784	.parse	= ipgre_header_parse,
 785};
 786
 787#ifdef CONFIG_NET_IPGRE_BROADCAST
 788static int ipgre_open(struct net_device *dev)
 789{
 790	struct ip_tunnel *t = netdev_priv(dev);
 791
 792	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 793		struct flowi4 fl4;
 794		struct rtable *rt;
 795
 796		rt = ip_route_output_gre(t->net, &fl4,
 797					 t->parms.iph.daddr,
 798					 t->parms.iph.saddr,
 799					 t->parms.o_key,
 800					 RT_TOS(t->parms.iph.tos),
 801					 t->parms.link);
 802		if (IS_ERR(rt))
 803			return -EADDRNOTAVAIL;
 804		dev = rt->dst.dev;
 805		ip_rt_put(rt);
 806		if (!__in_dev_get_rtnl(dev))
 807			return -EADDRNOTAVAIL;
 808		t->mlink = dev->ifindex;
 809		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 810	}
 811	return 0;
 812}
 813
 814static int ipgre_close(struct net_device *dev)
 815{
 816	struct ip_tunnel *t = netdev_priv(dev);
 817
 818	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 819		struct in_device *in_dev;
 820		in_dev = inetdev_by_index(t->net, t->mlink);
 821		if (in_dev)
 822			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 823	}
 824	return 0;
 825}
 826#endif
 827
 828static const struct net_device_ops ipgre_netdev_ops = {
 829	.ndo_init		= ipgre_tunnel_init,
 830	.ndo_uninit		= ip_tunnel_uninit,
 831#ifdef CONFIG_NET_IPGRE_BROADCAST
 832	.ndo_open		= ipgre_open,
 833	.ndo_stop		= ipgre_close,
 834#endif
 835	.ndo_start_xmit		= ipgre_xmit,
 836	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
 837	.ndo_change_mtu		= ip_tunnel_change_mtu,
 838	.ndo_get_stats64	= ip_tunnel_get_stats64,
 839	.ndo_get_iflink		= ip_tunnel_get_iflink,
 
 840};
 841
 842#define GRE_FEATURES (NETIF_F_SG |		\
 843		      NETIF_F_FRAGLIST |	\
 844		      NETIF_F_HIGHDMA |		\
 845		      NETIF_F_HW_CSUM)
 846
 847static void ipgre_tunnel_setup(struct net_device *dev)
 848{
 849	dev->netdev_ops		= &ipgre_netdev_ops;
 850	dev->type		= ARPHRD_IPGRE;
 851	ip_tunnel_setup(dev, ipgre_net_id);
 852}
 853
 854static void __gre_tunnel_init(struct net_device *dev)
 855{
 856	struct ip_tunnel *tunnel;
 857	int t_hlen;
 858
 859	tunnel = netdev_priv(dev);
 860	tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
 861	tunnel->parms.iph.protocol = IPPROTO_GRE;
 862
 863	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 
 864
 865	t_hlen = tunnel->hlen + sizeof(struct iphdr);
 866
 867	dev->needed_headroom	= LL_MAX_HEADER + t_hlen + 4;
 868	dev->mtu		= ETH_DATA_LEN - t_hlen - 4;
 869
 870	dev->features		|= GRE_FEATURES;
 871	dev->hw_features	|= GRE_FEATURES;
 872
 873	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
 874		/* TCP offload with GRE SEQ is not supported, nor
 875		 * can we support 2 levels of outer headers requiring
 876		 * an update.
 877		 */
 878		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
 879		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
 880			dev->features    |= NETIF_F_GSO_SOFTWARE;
 881			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 882		}
 883
 884		/* Can use a lockless transmit, unless we generate
 885		 * output sequences
 886		 */
 887		dev->features |= NETIF_F_LLTX;
 888	}
 
 
 
 
 
 889}
 890
 891static int ipgre_tunnel_init(struct net_device *dev)
 892{
 893	struct ip_tunnel *tunnel = netdev_priv(dev);
 894	struct iphdr *iph = &tunnel->parms.iph;
 895
 896	__gre_tunnel_init(dev);
 897
 898	memcpy(dev->dev_addr, &iph->saddr, 4);
 899	memcpy(dev->broadcast, &iph->daddr, 4);
 900
 901	dev->flags		= IFF_NOARP;
 902	netif_keep_dst(dev);
 903	dev->addr_len		= 4;
 904
 905	if (iph->daddr && !tunnel->collect_md) {
 906#ifdef CONFIG_NET_IPGRE_BROADCAST
 907		if (ipv4_is_multicast(iph->daddr)) {
 908			if (!iph->saddr)
 909				return -EINVAL;
 910			dev->flags = IFF_BROADCAST;
 911			dev->header_ops = &ipgre_header_ops;
 
 
 912		}
 913#endif
 914	} else if (!tunnel->collect_md) {
 915		dev->header_ops = &ipgre_header_ops;
 
 
 916	}
 917
 918	return ip_tunnel_init(dev);
 919}
 920
 921static const struct gre_protocol ipgre_protocol = {
 922	.handler     = gre_rcv,
 923	.err_handler = gre_err,
 924};
 925
 926static int __net_init ipgre_init_net(struct net *net)
 927{
 928	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
 929}
 930
 931static void __net_exit ipgre_exit_net(struct net *net)
 
 932{
 933	struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
 934	ip_tunnel_delete_net(itn, &ipgre_link_ops);
 935}
 936
 937static struct pernet_operations ipgre_net_ops = {
 938	.init = ipgre_init_net,
 939	.exit = ipgre_exit_net,
 940	.id   = &ipgre_net_id,
 941	.size = sizeof(struct ip_tunnel_net),
 942};
 943
 944static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
 
 945{
 946	__be16 flags;
 947
 948	if (!data)
 949		return 0;
 950
 951	flags = 0;
 952	if (data[IFLA_GRE_IFLAGS])
 953		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
 954	if (data[IFLA_GRE_OFLAGS])
 955		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
 956	if (flags & (GRE_VERSION|GRE_ROUTING))
 957		return -EINVAL;
 958
 959	if (data[IFLA_GRE_COLLECT_METADATA] &&
 960	    data[IFLA_GRE_ENCAP_TYPE] &&
 961	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
 962		return -EINVAL;
 963
 964	return 0;
 965}
 966
 967static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
 
 968{
 969	__be32 daddr;
 970
 971	if (tb[IFLA_ADDRESS]) {
 972		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
 973			return -EINVAL;
 974		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
 975			return -EADDRNOTAVAIL;
 976	}
 977
 978	if (!data)
 979		goto out;
 980
 981	if (data[IFLA_GRE_REMOTE]) {
 982		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
 983		if (!daddr)
 984			return -EINVAL;
 985	}
 986
 987out:
 988	return ipgre_tunnel_validate(tb, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 989}
 990
 991static void ipgre_netlink_parms(struct net_device *dev,
 992				struct nlattr *data[],
 993				struct nlattr *tb[],
 994				struct ip_tunnel_parm *parms)
 
 995{
 
 
 996	memset(parms, 0, sizeof(*parms));
 997
 998	parms->iph.protocol = IPPROTO_GRE;
 999
1000	if (!data)
1001		return;
1002
1003	if (data[IFLA_GRE_LINK])
1004		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1005
1006	if (data[IFLA_GRE_IFLAGS])
1007		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1008
1009	if (data[IFLA_GRE_OFLAGS])
1010		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1011
1012	if (data[IFLA_GRE_IKEY])
1013		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1014
1015	if (data[IFLA_GRE_OKEY])
1016		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1017
1018	if (data[IFLA_GRE_LOCAL])
1019		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1020
1021	if (data[IFLA_GRE_REMOTE])
1022		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1023
1024	if (data[IFLA_GRE_TTL])
1025		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1026
1027	if (data[IFLA_GRE_TOS])
1028		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1029
1030	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
 
 
1031		parms->iph.frag_off = htons(IP_DF);
 
1032
1033	if (data[IFLA_GRE_COLLECT_METADATA]) {
1034		struct ip_tunnel *t = netdev_priv(dev);
1035
1036		t->collect_md = true;
1037		if (dev->type == ARPHRD_IPGRE)
1038			dev->type = ARPHRD_NONE;
1039	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040}
1041
1042/* This function returns true when ENCAP attributes are present in the nl msg */
1043static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1044				      struct ip_tunnel_encap *ipencap)
1045{
1046	bool ret = false;
1047
1048	memset(ipencap, 0, sizeof(*ipencap));
1049
1050	if (!data)
1051		return ret;
1052
1053	if (data[IFLA_GRE_ENCAP_TYPE]) {
1054		ret = true;
1055		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1056	}
1057
1058	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1059		ret = true;
1060		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1061	}
1062
1063	if (data[IFLA_GRE_ENCAP_SPORT]) {
1064		ret = true;
1065		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1066	}
1067
1068	if (data[IFLA_GRE_ENCAP_DPORT]) {
1069		ret = true;
1070		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1071	}
1072
1073	return ret;
1074}
1075
1076static int gre_tap_init(struct net_device *dev)
1077{
1078	__gre_tunnel_init(dev);
1079	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
1080
1081	return ip_tunnel_init(dev);
1082}
1083
1084static const struct net_device_ops gre_tap_netdev_ops = {
1085	.ndo_init		= gre_tap_init,
1086	.ndo_uninit		= ip_tunnel_uninit,
1087	.ndo_start_xmit		= gre_tap_xmit,
1088	.ndo_set_mac_address 	= eth_mac_addr,
1089	.ndo_validate_addr	= eth_validate_addr,
1090	.ndo_change_mtu		= ip_tunnel_change_mtu,
1091	.ndo_get_stats64	= ip_tunnel_get_stats64,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1092	.ndo_get_iflink		= ip_tunnel_get_iflink,
1093	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1094};
1095
1096static void ipgre_tap_setup(struct net_device *dev)
1097{
1098	ether_setup(dev);
 
1099	dev->netdev_ops	= &gre_tap_netdev_ops;
1100	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1101	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1102	ip_tunnel_setup(dev, gre_tap_net_id);
1103}
1104
1105static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1106			 struct nlattr *tb[], struct nlattr *data[])
1107{
1108	struct ip_tunnel_parm p;
1109	struct ip_tunnel_encap ipencap;
1110
1111	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1112		struct ip_tunnel *t = netdev_priv(dev);
1113		int err = ip_tunnel_encap_setup(t, &ipencap);
1114
1115		if (err < 0)
1116			return err;
1117	}
1118
1119	ipgre_netlink_parms(dev, data, tb, &p);
1120	return ip_tunnel_newlink(dev, tb, &p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121}
1122
1123static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1124			    struct nlattr *data[])
 
1125{
 
 
1126	struct ip_tunnel_parm p;
1127	struct ip_tunnel_encap ipencap;
1128
1129	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1130		struct ip_tunnel *t = netdev_priv(dev);
1131		int err = ip_tunnel_encap_setup(t, &ipencap);
1132
1133		if (err < 0)
1134			return err;
1135	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136
1137	ipgre_netlink_parms(dev, data, tb, &p);
1138	return ip_tunnel_changelink(dev, tb, &p);
 
 
1139}
1140
1141static size_t ipgre_get_size(const struct net_device *dev)
1142{
1143	return
1144		/* IFLA_GRE_LINK */
1145		nla_total_size(4) +
1146		/* IFLA_GRE_IFLAGS */
1147		nla_total_size(2) +
1148		/* IFLA_GRE_OFLAGS */
1149		nla_total_size(2) +
1150		/* IFLA_GRE_IKEY */
1151		nla_total_size(4) +
1152		/* IFLA_GRE_OKEY */
1153		nla_total_size(4) +
1154		/* IFLA_GRE_LOCAL */
1155		nla_total_size(4) +
1156		/* IFLA_GRE_REMOTE */
1157		nla_total_size(4) +
1158		/* IFLA_GRE_TTL */
1159		nla_total_size(1) +
1160		/* IFLA_GRE_TOS */
1161		nla_total_size(1) +
1162		/* IFLA_GRE_PMTUDISC */
1163		nla_total_size(1) +
1164		/* IFLA_GRE_ENCAP_TYPE */
1165		nla_total_size(2) +
1166		/* IFLA_GRE_ENCAP_FLAGS */
1167		nla_total_size(2) +
1168		/* IFLA_GRE_ENCAP_SPORT */
1169		nla_total_size(2) +
1170		/* IFLA_GRE_ENCAP_DPORT */
1171		nla_total_size(2) +
1172		/* IFLA_GRE_COLLECT_METADATA */
1173		nla_total_size(0) +
 
 
 
 
 
 
 
 
 
 
 
 
1174		0;
1175}
1176
1177static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1178{
1179	struct ip_tunnel *t = netdev_priv(dev);
1180	struct ip_tunnel_parm *p = &t->parms;
 
1181
1182	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1183	    nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) ||
1184	    nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
 
 
1185	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1186	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1187	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1188	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1189	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1190	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1191	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1192		       !!(p->iph.frag_off & htons(IP_DF))))
 
1193		goto nla_put_failure;
1194
1195	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1196			t->encap.type) ||
1197	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1198			 t->encap.sport) ||
1199	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1200			 t->encap.dport) ||
1201	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1202			t->encap.flags))
1203		goto nla_put_failure;
1204
 
 
 
1205	if (t->collect_md) {
1206		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1207			goto nla_put_failure;
1208	}
1209
1210	return 0;
1211
1212nla_put_failure:
1213	return -EMSGSIZE;
1214}
1215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1216static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1217	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1218	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1219	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1220	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1221	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1222	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1223	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1224	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1225	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1226	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1227	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1228	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1229	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1230	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1231	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
 
 
 
 
 
 
1232};
1233
1234static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1235	.kind		= "gre",
1236	.maxtype	= IFLA_GRE_MAX,
1237	.policy		= ipgre_policy,
1238	.priv_size	= sizeof(struct ip_tunnel),
1239	.setup		= ipgre_tunnel_setup,
1240	.validate	= ipgre_tunnel_validate,
1241	.newlink	= ipgre_newlink,
1242	.changelink	= ipgre_changelink,
1243	.dellink	= ip_tunnel_dellink,
1244	.get_size	= ipgre_get_size,
1245	.fill_info	= ipgre_fill_info,
1246	.get_link_net	= ip_tunnel_get_link_net,
1247};
1248
1249static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1250	.kind		= "gretap",
1251	.maxtype	= IFLA_GRE_MAX,
1252	.policy		= ipgre_policy,
1253	.priv_size	= sizeof(struct ip_tunnel),
1254	.setup		= ipgre_tap_setup,
1255	.validate	= ipgre_tap_validate,
1256	.newlink	= ipgre_newlink,
1257	.changelink	= ipgre_changelink,
1258	.dellink	= ip_tunnel_dellink,
1259	.get_size	= ipgre_get_size,
1260	.fill_info	= ipgre_fill_info,
1261	.get_link_net	= ip_tunnel_get_link_net,
1262};
1263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1264struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1265					u8 name_assign_type)
1266{
1267	struct nlattr *tb[IFLA_MAX + 1];
1268	struct net_device *dev;
 
1269	struct ip_tunnel *t;
1270	int err;
1271
1272	memset(&tb, 0, sizeof(tb));
1273
1274	dev = rtnl_create_link(net, name, name_assign_type,
1275			       &ipgre_tap_ops, tb);
1276	if (IS_ERR(dev))
1277		return dev;
1278
1279	/* Configure flow based GRE device. */
1280	t = netdev_priv(dev);
1281	t->collect_md = true;
1282
1283	err = ipgre_newlink(net, dev, tb, NULL);
1284	if (err < 0)
1285		goto out;
 
 
1286
1287	/* openvswitch users expect packet sizes to be unrestricted,
1288	 * so set the largest MTU we can.
1289	 */
1290	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1291	if (err)
1292		goto out;
1293
 
 
 
 
1294	return dev;
1295out:
1296	free_netdev(dev);
 
1297	return ERR_PTR(err);
1298}
1299EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1300
1301static int __net_init ipgre_tap_init_net(struct net *net)
1302{
1303	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1304}
1305
1306static void __net_exit ipgre_tap_exit_net(struct net *net)
 
1307{
1308	struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1309	ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1310}
1311
1312static struct pernet_operations ipgre_tap_net_ops = {
1313	.init = ipgre_tap_init_net,
1314	.exit = ipgre_tap_exit_net,
1315	.id   = &gre_tap_net_id,
1316	.size = sizeof(struct ip_tunnel_net),
1317};
1318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319static int __init ipgre_init(void)
1320{
1321	int err;
1322
1323	pr_info("GRE over IPv4 tunneling driver\n");
1324
1325	err = register_pernet_device(&ipgre_net_ops);
1326	if (err < 0)
1327		return err;
1328
1329	err = register_pernet_device(&ipgre_tap_net_ops);
1330	if (err < 0)
1331		goto pnet_tap_faied;
 
 
 
 
1332
1333	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1334	if (err < 0) {
1335		pr_info("%s: can't add protocol\n", __func__);
1336		goto add_proto_failed;
1337	}
1338
1339	err = rtnl_link_register(&ipgre_link_ops);
1340	if (err < 0)
1341		goto rtnl_link_failed;
1342
1343	err = rtnl_link_register(&ipgre_tap_ops);
1344	if (err < 0)
1345		goto tap_ops_failed;
1346
 
 
 
 
1347	return 0;
1348
 
 
1349tap_ops_failed:
1350	rtnl_link_unregister(&ipgre_link_ops);
1351rtnl_link_failed:
1352	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1353add_proto_failed:
 
 
1354	unregister_pernet_device(&ipgre_tap_net_ops);
1355pnet_tap_faied:
1356	unregister_pernet_device(&ipgre_net_ops);
1357	return err;
1358}
1359
1360static void __exit ipgre_fini(void)
1361{
1362	rtnl_link_unregister(&ipgre_tap_ops);
1363	rtnl_link_unregister(&ipgre_link_ops);
 
1364	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1365	unregister_pernet_device(&ipgre_tap_net_ops);
1366	unregister_pernet_device(&ipgre_net_ops);
 
1367}
1368
1369module_init(ipgre_init);
1370module_exit(ipgre_fini);
 
1371MODULE_LICENSE("GPL");
1372MODULE_ALIAS_RTNL_LINK("gre");
1373MODULE_ALIAS_RTNL_LINK("gretap");
 
1374MODULE_ALIAS_NETDEV("gre0");
1375MODULE_ALIAS_NETDEV("gretap0");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux NET3:	GRE over IP protocol decoder.
   4 *
   5 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
 
 
 
 
 
 
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/uaccess.h>
  16#include <linux/skbuff.h>
  17#include <linux/netdevice.h>
  18#include <linux/in.h>
  19#include <linux/tcp.h>
  20#include <linux/udp.h>
  21#include <linux/if_arp.h>
  22#include <linux/if_vlan.h>
  23#include <linux/init.h>
  24#include <linux/in6.h>
  25#include <linux/inetdevice.h>
  26#include <linux/igmp.h>
  27#include <linux/netfilter_ipv4.h>
  28#include <linux/etherdevice.h>
  29#include <linux/if_ether.h>
  30
  31#include <net/sock.h>
  32#include <net/ip.h>
  33#include <net/icmp.h>
  34#include <net/protocol.h>
  35#include <net/ip_tunnels.h>
  36#include <net/arp.h>
  37#include <net/checksum.h>
  38#include <net/dsfield.h>
  39#include <net/inet_ecn.h>
  40#include <net/xfrm.h>
  41#include <net/net_namespace.h>
  42#include <net/netns/generic.h>
  43#include <net/rtnetlink.h>
  44#include <net/gre.h>
  45#include <net/dst_metadata.h>
  46#include <net/erspan.h>
 
 
 
 
 
  47
  48/*
  49   Problems & solutions
  50   --------------------
  51
  52   1. The most important issue is detecting local dead loops.
  53   They would cause complete host lockup in transmit, which
  54   would be "resolved" by stack overflow or, if queueing is enabled,
  55   with infinite looping in net_bh.
  56
  57   We cannot track such dead loops during route installation,
  58   it is infeasible task. The most general solutions would be
  59   to keep skb->encapsulation counter (sort of local ttl),
  60   and silently drop packet when it expires. It is a good
  61   solution, but it supposes maintaining new variable in ALL
  62   skb, even if no tunneling is used.
  63
  64   Current solution: xmit_recursion breaks dead loops. This is a percpu
  65   counter, since when we enter the first ndo_xmit(), cpu migration is
  66   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  67
  68   2. Networking dead loops would not kill routers, but would really
  69   kill network. IP hop limit plays role of "t->recursion" in this case,
  70   if we copy it from packet being encapsulated to upper header.
  71   It is very good solution, but it introduces two problems:
  72
  73   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  74     do not work over tunnels.
  75   - traceroute does not work. I planned to relay ICMP from tunnel,
  76     so that this problem would be solved and traceroute output
  77     would even more informative. This idea appeared to be wrong:
  78     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  79     true router now :-)), all routers (at least, in neighbourhood of mine)
  80     return only 8 bytes of payload. It is the end.
  81
  82   Hence, if we want that OSPF worked or traceroute said something reasonable,
  83   we should search for another solution.
  84
  85   One of them is to parse packet trying to detect inner encapsulation
  86   made by our node. It is difficult or even impossible, especially,
  87   taking into account fragmentation. TO be short, ttl is not solution at all.
  88
  89   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  90   We force DF flag on tunnels with preconfigured hop limit,
  91   that is ALL. :-) Well, it does not remove the problem completely,
  92   but exponential growth of network traffic is changed to linear
  93   (branches, that exceed pmtu are pruned) and tunnel mtu
  94   rapidly degrades to value <68, where looping stops.
  95   Yes, it is not good if there exists a router in the loop,
  96   which does not force DF, even when encapsulating packets have DF set.
  97   But it is not our problem! Nobody could accuse us, we made
  98   all that we could make. Even if it is your gated who injected
  99   fatal route to network, even if it were you who configured
 100   fatal static route: you are innocent. :-)
 101
 102   Alexey Kuznetsov.
 103 */
 104
 105static bool log_ecn_error = true;
 106module_param(log_ecn_error, bool, 0644);
 107MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 108
 109static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 110static const struct header_ops ipgre_header_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111
 112static int ipgre_tunnel_init(struct net_device *dev);
 113static void erspan_build_header(struct sk_buff *skb,
 114				u32 id, u32 index,
 115				bool truncate, bool is_ipv4);
 116
 117static unsigned int ipgre_net_id __read_mostly;
 118static unsigned int gre_tap_net_id __read_mostly;
 119static unsigned int erspan_net_id __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 120
 121static int ipgre_err(struct sk_buff *skb, u32 info,
 122		     const struct tnl_ptk_info *tpi)
 123{
 124
 125	/* All the routers (except for Linux) return only
 126	   8 bytes of packet payload. It means, that precise relaying of
 127	   ICMP in the real Internet is absolutely infeasible.
 128
 129	   Moreover, Cisco "wise men" put GRE key to the third word
 130	   in GRE header. It makes impossible maintaining even soft
 131	   state for keyed GRE tunnels with enabled checksum. Tell
 132	   them "thank you".
 133
 134	   Well, I wonder, rfc1812 was written by Cisco employee,
 135	   what the hell these idiots break standards established
 136	   by themselves???
 137	   */
 138	struct net *net = dev_net(skb->dev);
 139	struct ip_tunnel_net *itn;
 140	const struct iphdr *iph;
 141	const int type = icmp_hdr(skb)->type;
 142	const int code = icmp_hdr(skb)->code;
 143	unsigned int data_len = 0;
 144	struct ip_tunnel *t;
 145
 146	if (tpi->proto == htons(ETH_P_TEB))
 147		itn = net_generic(net, gre_tap_net_id);
 148	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
 149		 tpi->proto == htons(ETH_P_ERSPAN2))
 150		itn = net_generic(net, erspan_net_id);
 151	else
 152		itn = net_generic(net, ipgre_net_id);
 153
 154	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 155	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 156			     iph->daddr, iph->saddr, tpi->key);
 157
 158	if (!t)
 159		return -ENOENT;
 160
 161	switch (type) {
 162	default:
 163	case ICMP_PARAMETERPROB:
 164		return 0;
 165
 166	case ICMP_DEST_UNREACH:
 167		switch (code) {
 168		case ICMP_SR_FAILED:
 169		case ICMP_PORT_UNREACH:
 170			/* Impossible event. */
 171			return 0;
 172		default:
 173			/* All others are translated to HOST_UNREACH.
 174			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 175			   I believe they are just ether pollution. --ANK
 176			 */
 177			break;
 178		}
 179		break;
 180
 181	case ICMP_TIME_EXCEEDED:
 182		if (code != ICMP_EXC_TTL)
 183			return 0;
 184		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
 185		break;
 186
 187	case ICMP_REDIRECT:
 188		break;
 189	}
 190
 191#if IS_ENABLED(CONFIG_IPV6)
 192	if (tpi->proto == htons(ETH_P_IPV6) &&
 193	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 194					type, data_len))
 195		return 0;
 196#endif
 
 
 
 
 
 197
 198	if (t->parms.iph.daddr == 0 ||
 199	    ipv4_is_multicast(t->parms.iph.daddr))
 200		return 0;
 201
 202	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 203		return 0;
 204
 205	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 206		t->err_count++;
 207	else
 208		t->err_count = 1;
 209	t->err_time = jiffies;
 210
 211	return 0;
 212}
 213
 214static void gre_err(struct sk_buff *skb, u32 info)
 215{
 216	/* All the routers (except for Linux) return only
 217	 * 8 bytes of packet payload. It means, that precise relaying of
 218	 * ICMP in the real Internet is absolutely infeasible.
 219	 *
 220	 * Moreover, Cisco "wise men" put GRE key to the third word
 221	 * in GRE header. It makes impossible maintaining even soft
 222	 * state for keyed
 223	 * GRE tunnels with enabled checksum. Tell them "thank you".
 224	 *
 225	 * Well, I wonder, rfc1812 was written by Cisco employee,
 226	 * what the hell these idiots break standards established
 227	 * by themselves???
 228	 */
 229
 230	const struct iphdr *iph = (struct iphdr *)skb->data;
 231	const int type = icmp_hdr(skb)->type;
 232	const int code = icmp_hdr(skb)->code;
 233	struct tnl_ptk_info tpi;
 
 234
 235	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
 236			     iph->ihl * 4) < 0)
 237		return;
 
 238
 239	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 240		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 241				 skb->dev->ifindex, IPPROTO_GRE);
 242		return;
 243	}
 244	if (type == ICMP_REDIRECT) {
 245		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
 246			      IPPROTO_GRE);
 247		return;
 248	}
 249
 250	ipgre_err(skb, info, &tpi);
 251}
 252
 253static bool is_erspan_type1(int gre_hdr_len)
 254{
 255	/* Both ERSPAN type I (version 0) and type II (version 1) use
 256	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
 257	 * while type II has 8-byte.
 258	 */
 259	return gre_hdr_len == 4;
 260}
 261
 262static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 263		      int gre_hdr_len)
 264{
 265	struct net *net = dev_net(skb->dev);
 266	struct metadata_dst *tun_dst = NULL;
 267	struct erspan_base_hdr *ershdr;
 268	struct ip_tunnel_net *itn;
 269	struct ip_tunnel *tunnel;
 270	const struct iphdr *iph;
 271	struct erspan_md2 *md2;
 272	int ver;
 273	int len;
 274
 275	itn = net_generic(net, erspan_net_id);
 276	iph = ip_hdr(skb);
 277	if (is_erspan_type1(gre_hdr_len)) {
 278		ver = 0;
 279		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 280					  tpi->flags | TUNNEL_NO_KEY,
 281					  iph->saddr, iph->daddr, 0);
 282	} else {
 283		if (unlikely(!pskb_may_pull(skb,
 284					    gre_hdr_len + sizeof(*ershdr))))
 285			return PACKET_REJECT;
 286
 287		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 288		ver = ershdr->ver;
 289		iph = ip_hdr(skb);
 290		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 291					  tpi->flags | TUNNEL_KEY,
 292					  iph->saddr, iph->daddr, tpi->key);
 293	}
 294
 295	if (tunnel) {
 296		if (is_erspan_type1(gre_hdr_len))
 297			len = gre_hdr_len;
 298		else
 299			len = gre_hdr_len + erspan_hdr_len(ver);
 300
 301		if (unlikely(!pskb_may_pull(skb, len)))
 302			return PACKET_REJECT;
 303
 304		if (__iptunnel_pull_header(skb,
 305					   len,
 306					   htons(ETH_P_TEB),
 307					   false, false) < 0)
 308			goto drop;
 309
 310		if (tunnel->collect_md) {
 311			struct erspan_metadata *pkt_md, *md;
 312			struct ip_tunnel_info *info;
 313			unsigned char *gh;
 314			__be64 tun_id;
 315			__be16 flags;
 316
 317			tpi->flags |= TUNNEL_KEY;
 318			flags = tpi->flags;
 319			tun_id = key32_to_tunnel_id(tpi->key);
 320
 321			tun_dst = ip_tun_rx_dst(skb, flags,
 322						tun_id, sizeof(*md));
 323			if (!tun_dst)
 324				return PACKET_REJECT;
 325
 326			/* skb can be uncloned in __iptunnel_pull_header, so
 327			 * old pkt_md is no longer valid and we need to reset
 328			 * it
 329			 */
 330			gh = skb_network_header(skb) +
 331			     skb_network_header_len(skb);
 332			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
 333							    sizeof(*ershdr));
 334			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 335			md->version = ver;
 336			md2 = &md->u.md2;
 337			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
 338						       ERSPAN_V2_MDSIZE);
 339
 340			info = &tun_dst->u.tun_info;
 341			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
 342			info->options_len = sizeof(*md);
 343		}
 344
 345		skb_reset_mac_header(skb);
 346		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 347		return PACKET_RCVD;
 348	}
 349	return PACKET_REJECT;
 350
 351drop:
 352	kfree_skb(skb);
 353	return PACKET_RCVD;
 354}
 355
 356static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 357		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 358{
 
 359	struct metadata_dst *tun_dst = NULL;
 
 360	const struct iphdr *iph;
 361	struct ip_tunnel *tunnel;
 362
 
 
 
 
 
 363	iph = ip_hdr(skb);
 364	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 365				  iph->saddr, iph->daddr, tpi->key);
 366
 367	if (tunnel) {
 368		const struct iphdr *tnl_params;
 369
 370		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
 371					   raw_proto, false) < 0)
 372			goto drop;
 373
 374		/* Special case for ipgre_header_parse(), which expects the
 375		 * mac_header to point to the outer IP header.
 376		 */
 377		if (tunnel->dev->header_ops == &ipgre_header_ops)
 378			skb_pop_mac_header(skb);
 379		else
 380			skb_reset_mac_header(skb);
 381
 382		tnl_params = &tunnel->parms.iph;
 383		if (tunnel->collect_md || tnl_params->daddr == 0) {
 384			__be16 flags;
 385			__be64 tun_id;
 386
 387			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
 388			tun_id = key32_to_tunnel_id(tpi->key);
 389			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 390			if (!tun_dst)
 391				return PACKET_REJECT;
 392		}
 393
 394		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 395		return PACKET_RCVD;
 396	}
 397	return PACKET_NEXT;
 398
 399drop:
 400	kfree_skb(skb);
 401	return PACKET_RCVD;
 402}
 403
 404static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 405		     int hdr_len)
 406{
 407	struct net *net = dev_net(skb->dev);
 408	struct ip_tunnel_net *itn;
 409	int res;
 410
 411	if (tpi->proto == htons(ETH_P_TEB))
 412		itn = net_generic(net, gre_tap_net_id);
 413	else
 414		itn = net_generic(net, ipgre_net_id);
 415
 416	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
 417	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
 418		/* ipgre tunnels in collect metadata mode should receive
 419		 * also ETH_P_TEB traffic.
 420		 */
 421		itn = net_generic(net, ipgre_net_id);
 422		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
 423	}
 424	return res;
 425}
 426
 427static int gre_rcv(struct sk_buff *skb)
 428{
 429	struct tnl_ptk_info tpi;
 430	bool csum_err = false;
 431	int hdr_len;
 432
 433#ifdef CONFIG_NET_IPGRE_BROADCAST
 434	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 435		/* Looped back packet, drop it! */
 436		if (rt_is_output_route(skb_rtable(skb)))
 437			goto drop;
 438	}
 439#endif
 440
 441	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 442	if (hdr_len < 0)
 443		goto drop;
 
 
 444
 445	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
 446		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 447		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 448			return 0;
 449		goto out;
 450	}
 451
 452	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 453		return 0;
 454
 455out:
 456	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 457drop:
 458	kfree_skb(skb);
 459	return 0;
 460}
 461
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 462static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 463		       const struct iphdr *tnl_params,
 464		       __be16 proto)
 465{
 466	struct ip_tunnel *tunnel = netdev_priv(dev);
 467	__be16 flags = tunnel->parms.o_flags;
 
 
 468
 469	/* Push GRE header. */
 470	gre_build_header(skb, tunnel->tun_hlen,
 471			 flags, proto, tunnel->parms.o_key,
 472			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 473
 
 474	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 475}
 476
 477static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 
 478{
 479	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 480}
 481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 483			__be16 proto)
 484{
 485	struct ip_tunnel *tunnel = netdev_priv(dev);
 486	struct ip_tunnel_info *tun_info;
 487	const struct ip_tunnel_key *key;
 
 
 
 488	int tunnel_hlen;
 489	__be16 flags;
 
 
 490
 491	tun_info = skb_tunnel_info(skb);
 492	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 493		     ip_tunnel_info_af(tun_info) != AF_INET))
 494		goto err_free_skb;
 495
 496	key = &tun_info->key;
 497	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 498
 499	if (skb_cow_head(skb, dev->needed_headroom))
 500		goto err_free_skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501
 502	/* Push Tunnel header. */
 503	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
 504		goto err_free_skb;
 505
 506	flags = tun_info->key.tun_flags &
 507		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
 508	gre_build_header(skb, tunnel_hlen, flags, proto,
 509			 tunnel_id_to_key32(tun_info->key.tun_id),
 510			 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 511
 512	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 513
 514	return;
 515
 516err_free_skb:
 517	kfree_skb(skb);
 518	DEV_STATS_INC(dev, tx_dropped);
 519}
 520
 521static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 522{
 523	struct ip_tunnel *tunnel = netdev_priv(dev);
 524	struct ip_tunnel_info *tun_info;
 525	const struct ip_tunnel_key *key;
 526	struct erspan_metadata *md;
 527	bool truncate = false;
 528	__be16 proto;
 529	int tunnel_hlen;
 530	int version;
 531	int nhoff;
 532
 533	tun_info = skb_tunnel_info(skb);
 534	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 535		     ip_tunnel_info_af(tun_info) != AF_INET))
 536		goto err_free_skb;
 537
 538	key = &tun_info->key;
 539	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
 540		goto err_free_skb;
 541	if (tun_info->options_len < sizeof(*md))
 542		goto err_free_skb;
 543	md = ip_tunnel_info_opts(tun_info);
 544
 545	/* ERSPAN has fixed 8 byte GRE header */
 546	version = md->version;
 547	tunnel_hlen = 8 + erspan_hdr_len(version);
 548
 549	if (skb_cow_head(skb, dev->needed_headroom))
 550		goto err_free_skb;
 551
 552	if (gre_handle_offloads(skb, false))
 553		goto err_free_skb;
 554
 555	if (skb->len > dev->mtu + dev->hard_header_len) {
 556		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 557			goto err_free_skb;
 558		truncate = true;
 559	}
 560
 561	nhoff = skb_network_offset(skb);
 562	if (skb->protocol == htons(ETH_P_IP) &&
 563	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 564		truncate = true;
 565
 566	if (skb->protocol == htons(ETH_P_IPV6)) {
 567		int thoff;
 568
 569		if (skb_transport_header_was_set(skb))
 570			thoff = skb_transport_offset(skb);
 571		else
 572			thoff = nhoff + sizeof(struct ipv6hdr);
 573		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
 574			truncate = true;
 575	}
 576
 577	if (version == 1) {
 578		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 579				    ntohl(md->u.index), truncate, true);
 580		proto = htons(ETH_P_ERSPAN);
 581	} else if (version == 2) {
 582		erspan_build_header_v2(skb,
 583				       ntohl(tunnel_id_to_key32(key->tun_id)),
 584				       md->u.md2.dir,
 585				       get_hwid(&md->u.md2),
 586				       truncate, true);
 587		proto = htons(ETH_P_ERSPAN2);
 588	} else {
 589		goto err_free_skb;
 590	}
 591
 592	gre_build_header(skb, 8, TUNNEL_SEQ,
 593			 proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
 
 594
 595	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 596
 
 
 597	return;
 598
 
 
 599err_free_skb:
 600	kfree_skb(skb);
 601	DEV_STATS_INC(dev, tx_dropped);
 602}
 603
 604static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 605{
 606	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 607	const struct ip_tunnel_key *key;
 608	struct rtable *rt;
 609	struct flowi4 fl4;
 610
 611	if (ip_tunnel_info_af(info) != AF_INET)
 612		return -EINVAL;
 613
 614	key = &info->key;
 615	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
 616			    tunnel_id_to_key32(key->tun_id),
 617			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
 618			    skb->mark, skb_get_hash(skb), key->flow_flags);
 619	rt = ip_route_output_key(dev_net(dev), &fl4);
 620	if (IS_ERR(rt))
 621		return PTR_ERR(rt);
 622
 623	ip_rt_put(rt);
 624	info->key.u.ipv4.src = fl4.saddr;
 625	return 0;
 626}
 627
 628static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 629			      struct net_device *dev)
 630{
 631	struct ip_tunnel *tunnel = netdev_priv(dev);
 632	const struct iphdr *tnl_params;
 633
 634	if (!pskb_inet_may_pull(skb))
 635		goto free_skb;
 636
 637	if (tunnel->collect_md) {
 638		gre_fb_xmit(skb, dev, skb->protocol);
 639		return NETDEV_TX_OK;
 640	}
 641
 642	if (dev->header_ops) {
 643		int pull_len = tunnel->hlen + sizeof(struct iphdr);
 644
 645		if (skb_cow_head(skb, 0))
 646			goto free_skb;
 647
 648		tnl_params = (const struct iphdr *)skb->data;
 649
 650		if (!pskb_network_may_pull(skb, pull_len))
 651			goto free_skb;
 652
 653		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
 654		skb_pull(skb, pull_len);
 655		skb_reset_mac_header(skb);
 656
 657		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 658		    skb_checksum_start(skb) < skb->data)
 659			goto free_skb;
 660	} else {
 661		if (skb_cow_head(skb, dev->needed_headroom))
 662			goto free_skb;
 663
 664		tnl_params = &tunnel->parms.iph;
 665	}
 666
 667	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 668		goto free_skb;
 
 669
 670	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 671	return NETDEV_TX_OK;
 672
 673free_skb:
 674	kfree_skb(skb);
 675	DEV_STATS_INC(dev, tx_dropped);
 676	return NETDEV_TX_OK;
 677}
 678
 679static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 680			       struct net_device *dev)
 681{
 682	struct ip_tunnel *tunnel = netdev_priv(dev);
 683	bool truncate = false;
 684	__be16 proto;
 685
 686	if (!pskb_inet_may_pull(skb))
 687		goto free_skb;
 688
 689	if (tunnel->collect_md) {
 690		erspan_fb_xmit(skb, dev);
 691		return NETDEV_TX_OK;
 692	}
 693
 694	if (gre_handle_offloads(skb, false))
 695		goto free_skb;
 696
 697	if (skb_cow_head(skb, dev->needed_headroom))
 698		goto free_skb;
 699
 700	if (skb->len > dev->mtu + dev->hard_header_len) {
 701		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 702			goto free_skb;
 703		truncate = true;
 704	}
 705
 706	/* Push ERSPAN header */
 707	if (tunnel->erspan_ver == 0) {
 708		proto = htons(ETH_P_ERSPAN);
 709		tunnel->parms.o_flags &= ~TUNNEL_SEQ;
 710	} else if (tunnel->erspan_ver == 1) {
 711		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 712				    tunnel->index,
 713				    truncate, true);
 714		proto = htons(ETH_P_ERSPAN);
 715	} else if (tunnel->erspan_ver == 2) {
 716		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 717				       tunnel->dir, tunnel->hwid,
 718				       truncate, true);
 719		proto = htons(ETH_P_ERSPAN2);
 720	} else {
 721		goto free_skb;
 722	}
 723
 724	tunnel->parms.o_flags &= ~TUNNEL_KEY;
 725	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 726	return NETDEV_TX_OK;
 727
 728free_skb:
 729	kfree_skb(skb);
 730	DEV_STATS_INC(dev, tx_dropped);
 731	return NETDEV_TX_OK;
 732}
 733
 734static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 735				struct net_device *dev)
 736{
 737	struct ip_tunnel *tunnel = netdev_priv(dev);
 738
 739	if (!pskb_inet_may_pull(skb))
 740		goto free_skb;
 741
 742	if (tunnel->collect_md) {
 743		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 744		return NETDEV_TX_OK;
 745	}
 746
 747	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 748		goto free_skb;
 
 749
 750	if (skb_cow_head(skb, dev->needed_headroom))
 751		goto free_skb;
 752
 753	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 754	return NETDEV_TX_OK;
 755
 756free_skb:
 757	kfree_skb(skb);
 758	DEV_STATS_INC(dev, tx_dropped);
 
 759	return NETDEV_TX_OK;
 760}
 761
 762static void ipgre_link_update(struct net_device *dev, bool set_mtu)
 763{
 764	struct ip_tunnel *tunnel = netdev_priv(dev);
 765	__be16 flags;
 766	int len;
 767
 768	len = tunnel->tun_hlen;
 769	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 770	len = tunnel->tun_hlen - len;
 771	tunnel->hlen = tunnel->hlen + len;
 772
 773	if (dev->header_ops)
 774		dev->hard_header_len += len;
 775	else
 776		dev->needed_headroom += len;
 777
 778	if (set_mtu)
 779		dev->mtu = max_t(int, dev->mtu - len, 68);
 780
 781	flags = tunnel->parms.o_flags;
 782
 783	if (flags & TUNNEL_SEQ ||
 784	    (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
 785		dev->features &= ~NETIF_F_GSO_SOFTWARE;
 786		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 787	} else {
 788		dev->features |= NETIF_F_GSO_SOFTWARE;
 789		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 790	}
 791}
 792
 793static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
 794			    int cmd)
 795{
 796	int err;
 
 797
 
 
 798	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 799		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
 800		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
 801		    ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
 802			return -EINVAL;
 803	}
 
 
 804
 805	p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
 806	p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
 807
 808	err = ip_tunnel_ctl(dev, p, cmd);
 809	if (err)
 810		return err;
 811
 812	if (cmd == SIOCCHGTUNNEL) {
 813		struct ip_tunnel *t = netdev_priv(dev);
 814
 815		t->parms.i_flags = p->i_flags;
 816		t->parms.o_flags = p->o_flags;
 817
 818		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
 819			ipgre_link_update(dev, true);
 820	}
 821
 822	p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
 823	p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
 824	return 0;
 825}
 826
 827/* Nice toy. Unfortunately, useless in real life :-)
 828   It allows to construct virtual multiprotocol broadcast "LAN"
 829   over the Internet, provided multicast routing is tuned.
 830
 831
 832   I have no idea was this bicycle invented before me,
 833   so that I had to set ARPHRD_IPGRE to a random value.
 834   I have an impression, that Cisco could make something similar,
 835   but this feature is apparently missing in IOS<=11.2(8).
 836
 837   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 838   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 839
 840   ping -t 255 224.66.66.66
 841
 842   If nobody answers, mbone does not work.
 843
 844   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 845   ip addr add 10.66.66.<somewhat>/24 dev Universe
 846   ifconfig Universe up
 847   ifconfig Universe add fe80::<Your_real_addr>/10
 848   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 849   ftp 10.66.66.66
 850   ...
 851   ftp fec0:6666:6666::193.233.7.65
 852   ...
 853 */
 854static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 855			unsigned short type,
 856			const void *daddr, const void *saddr, unsigned int len)
 857{
 858	struct ip_tunnel *t = netdev_priv(dev);
 859	struct iphdr *iph;
 860	struct gre_base_hdr *greh;
 861
 862	iph = skb_push(skb, t->hlen + sizeof(*iph));
 863	greh = (struct gre_base_hdr *)(iph+1);
 864	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 865	greh->protocol = htons(type);
 866
 867	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 868
 869	/* Set the source hardware address. */
 870	if (saddr)
 871		memcpy(&iph->saddr, saddr, 4);
 872	if (daddr)
 873		memcpy(&iph->daddr, daddr, 4);
 874	if (iph->daddr)
 875		return t->hlen + sizeof(*iph);
 876
 877	return -(t->hlen + sizeof(*iph));
 878}
 879
 880static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 881{
 882	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 883	memcpy(haddr, &iph->saddr, 4);
 884	return 4;
 885}
 886
 887static const struct header_ops ipgre_header_ops = {
 888	.create	= ipgre_header,
 889	.parse	= ipgre_header_parse,
 890};
 891
 892#ifdef CONFIG_NET_IPGRE_BROADCAST
 893static int ipgre_open(struct net_device *dev)
 894{
 895	struct ip_tunnel *t = netdev_priv(dev);
 896
 897	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 898		struct flowi4 fl4;
 899		struct rtable *rt;
 900
 901		rt = ip_route_output_gre(t->net, &fl4,
 902					 t->parms.iph.daddr,
 903					 t->parms.iph.saddr,
 904					 t->parms.o_key,
 905					 RT_TOS(t->parms.iph.tos),
 906					 t->parms.link);
 907		if (IS_ERR(rt))
 908			return -EADDRNOTAVAIL;
 909		dev = rt->dst.dev;
 910		ip_rt_put(rt);
 911		if (!__in_dev_get_rtnl(dev))
 912			return -EADDRNOTAVAIL;
 913		t->mlink = dev->ifindex;
 914		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 915	}
 916	return 0;
 917}
 918
 919static int ipgre_close(struct net_device *dev)
 920{
 921	struct ip_tunnel *t = netdev_priv(dev);
 922
 923	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 924		struct in_device *in_dev;
 925		in_dev = inetdev_by_index(t->net, t->mlink);
 926		if (in_dev)
 927			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 928	}
 929	return 0;
 930}
 931#endif
 932
 933static const struct net_device_ops ipgre_netdev_ops = {
 934	.ndo_init		= ipgre_tunnel_init,
 935	.ndo_uninit		= ip_tunnel_uninit,
 936#ifdef CONFIG_NET_IPGRE_BROADCAST
 937	.ndo_open		= ipgre_open,
 938	.ndo_stop		= ipgre_close,
 939#endif
 940	.ndo_start_xmit		= ipgre_xmit,
 941	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
 942	.ndo_change_mtu		= ip_tunnel_change_mtu,
 943	.ndo_get_stats64	= dev_get_tstats64,
 944	.ndo_get_iflink		= ip_tunnel_get_iflink,
 945	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
 946};
 947
 948#define GRE_FEATURES (NETIF_F_SG |		\
 949		      NETIF_F_FRAGLIST |	\
 950		      NETIF_F_HIGHDMA |		\
 951		      NETIF_F_HW_CSUM)
 952
 953static void ipgre_tunnel_setup(struct net_device *dev)
 954{
 955	dev->netdev_ops		= &ipgre_netdev_ops;
 956	dev->type		= ARPHRD_IPGRE;
 957	ip_tunnel_setup(dev, ipgre_net_id);
 958}
 959
 960static void __gre_tunnel_init(struct net_device *dev)
 961{
 962	struct ip_tunnel *tunnel;
 963	__be16 flags;
 964
 965	tunnel = netdev_priv(dev);
 966	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 967	tunnel->parms.iph.protocol = IPPROTO_GRE;
 968
 969	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 970	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
 971
 972	dev->features		|= GRE_FEATURES | NETIF_F_LLTX;
 
 
 
 
 
 973	dev->hw_features	|= GRE_FEATURES;
 974
 975	flags = tunnel->parms.o_flags;
 
 
 
 
 
 
 
 
 
 976
 977	/* TCP offload with GRE SEQ is not supported, nor can we support 2
 978	 * levels of outer headers requiring an update.
 979	 */
 980	if (flags & TUNNEL_SEQ)
 981		return;
 982	if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
 983		return;
 984
 985	dev->features |= NETIF_F_GSO_SOFTWARE;
 986	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 987}
 988
 989static int ipgre_tunnel_init(struct net_device *dev)
 990{
 991	struct ip_tunnel *tunnel = netdev_priv(dev);
 992	struct iphdr *iph = &tunnel->parms.iph;
 993
 994	__gre_tunnel_init(dev);
 995
 996	__dev_addr_set(dev, &iph->saddr, 4);
 997	memcpy(dev->broadcast, &iph->daddr, 4);
 998
 999	dev->flags		= IFF_NOARP;
1000	netif_keep_dst(dev);
1001	dev->addr_len		= 4;
1002
1003	if (iph->daddr && !tunnel->collect_md) {
1004#ifdef CONFIG_NET_IPGRE_BROADCAST
1005		if (ipv4_is_multicast(iph->daddr)) {
1006			if (!iph->saddr)
1007				return -EINVAL;
1008			dev->flags = IFF_BROADCAST;
1009			dev->header_ops = &ipgre_header_ops;
1010			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1011			dev->needed_headroom = 0;
1012		}
1013#endif
1014	} else if (!tunnel->collect_md) {
1015		dev->header_ops = &ipgre_header_ops;
1016		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1017		dev->needed_headroom = 0;
1018	}
1019
1020	return ip_tunnel_init(dev);
1021}
1022
1023static const struct gre_protocol ipgre_protocol = {
1024	.handler     = gre_rcv,
1025	.err_handler = gre_err,
1026};
1027
1028static int __net_init ipgre_init_net(struct net *net)
1029{
1030	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1031}
1032
1033static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
1034					     struct list_head *dev_to_kill)
1035{
1036	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
1037			      dev_to_kill);
1038}
1039
1040static struct pernet_operations ipgre_net_ops = {
1041	.init = ipgre_init_net,
1042	.exit_batch_rtnl = ipgre_exit_batch_rtnl,
1043	.id   = &ipgre_net_id,
1044	.size = sizeof(struct ip_tunnel_net),
1045};
1046
1047static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1048				 struct netlink_ext_ack *extack)
1049{
1050	__be16 flags;
1051
1052	if (!data)
1053		return 0;
1054
1055	flags = 0;
1056	if (data[IFLA_GRE_IFLAGS])
1057		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1058	if (data[IFLA_GRE_OFLAGS])
1059		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1060	if (flags & (GRE_VERSION|GRE_ROUTING))
1061		return -EINVAL;
1062
1063	if (data[IFLA_GRE_COLLECT_METADATA] &&
1064	    data[IFLA_GRE_ENCAP_TYPE] &&
1065	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1066		return -EINVAL;
1067
1068	return 0;
1069}
1070
1071static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1072			      struct netlink_ext_ack *extack)
1073{
1074	__be32 daddr;
1075
1076	if (tb[IFLA_ADDRESS]) {
1077		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1078			return -EINVAL;
1079		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1080			return -EADDRNOTAVAIL;
1081	}
1082
1083	if (!data)
1084		goto out;
1085
1086	if (data[IFLA_GRE_REMOTE]) {
1087		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1088		if (!daddr)
1089			return -EINVAL;
1090	}
1091
1092out:
1093	return ipgre_tunnel_validate(tb, data, extack);
1094}
1095
1096static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1097			   struct netlink_ext_ack *extack)
1098{
1099	__be16 flags = 0;
1100	int ret;
1101
1102	if (!data)
1103		return 0;
1104
1105	ret = ipgre_tap_validate(tb, data, extack);
1106	if (ret)
1107		return ret;
1108
1109	if (data[IFLA_GRE_ERSPAN_VER] &&
1110	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1111		return 0;
1112
1113	/* ERSPAN type II/III should only have GRE sequence and key flag */
1114	if (data[IFLA_GRE_OFLAGS])
1115		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1116	if (data[IFLA_GRE_IFLAGS])
1117		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1118	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1119	    flags != (GRE_SEQ | GRE_KEY))
1120		return -EINVAL;
1121
1122	/* ERSPAN Session ID only has 10-bit. Since we reuse
1123	 * 32-bit key field as ID, check it's range.
1124	 */
1125	if (data[IFLA_GRE_IKEY] &&
1126	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1127		return -EINVAL;
1128
1129	if (data[IFLA_GRE_OKEY] &&
1130	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1131		return -EINVAL;
1132
1133	return 0;
1134}
1135
1136static int ipgre_netlink_parms(struct net_device *dev,
1137				struct nlattr *data[],
1138				struct nlattr *tb[],
1139				struct ip_tunnel_parm *parms,
1140				__u32 *fwmark)
1141{
1142	struct ip_tunnel *t = netdev_priv(dev);
1143
1144	memset(parms, 0, sizeof(*parms));
1145
1146	parms->iph.protocol = IPPROTO_GRE;
1147
1148	if (!data)
1149		return 0;
1150
1151	if (data[IFLA_GRE_LINK])
1152		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1153
1154	if (data[IFLA_GRE_IFLAGS])
1155		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1156
1157	if (data[IFLA_GRE_OFLAGS])
1158		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1159
1160	if (data[IFLA_GRE_IKEY])
1161		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1162
1163	if (data[IFLA_GRE_OKEY])
1164		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1165
1166	if (data[IFLA_GRE_LOCAL])
1167		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1168
1169	if (data[IFLA_GRE_REMOTE])
1170		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1171
1172	if (data[IFLA_GRE_TTL])
1173		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1174
1175	if (data[IFLA_GRE_TOS])
1176		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1177
1178	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1179		if (t->ignore_df)
1180			return -EINVAL;
1181		parms->iph.frag_off = htons(IP_DF);
1182	}
1183
1184	if (data[IFLA_GRE_COLLECT_METADATA]) {
 
 
1185		t->collect_md = true;
1186		if (dev->type == ARPHRD_IPGRE)
1187			dev->type = ARPHRD_NONE;
1188	}
1189
1190	if (data[IFLA_GRE_IGNORE_DF]) {
1191		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1192		  && (parms->iph.frag_off & htons(IP_DF)))
1193			return -EINVAL;
1194		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1195	}
1196
1197	if (data[IFLA_GRE_FWMARK])
1198		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1199
1200	return 0;
1201}
1202
1203static int erspan_netlink_parms(struct net_device *dev,
1204				struct nlattr *data[],
1205				struct nlattr *tb[],
1206				struct ip_tunnel_parm *parms,
1207				__u32 *fwmark)
1208{
1209	struct ip_tunnel *t = netdev_priv(dev);
1210	int err;
1211
1212	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1213	if (err)
1214		return err;
1215	if (!data)
1216		return 0;
1217
1218	if (data[IFLA_GRE_ERSPAN_VER]) {
1219		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1220
1221		if (t->erspan_ver > 2)
1222			return -EINVAL;
1223	}
1224
1225	if (t->erspan_ver == 1) {
1226		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1227			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1228			if (t->index & ~INDEX_MASK)
1229				return -EINVAL;
1230		}
1231	} else if (t->erspan_ver == 2) {
1232		if (data[IFLA_GRE_ERSPAN_DIR]) {
1233			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1234			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1235				return -EINVAL;
1236		}
1237		if (data[IFLA_GRE_ERSPAN_HWID]) {
1238			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1239			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1240				return -EINVAL;
1241		}
1242	}
1243
1244	return 0;
1245}
1246
1247/* This function returns true when ENCAP attributes are present in the nl msg */
1248static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1249				      struct ip_tunnel_encap *ipencap)
1250{
1251	bool ret = false;
1252
1253	memset(ipencap, 0, sizeof(*ipencap));
1254
1255	if (!data)
1256		return ret;
1257
1258	if (data[IFLA_GRE_ENCAP_TYPE]) {
1259		ret = true;
1260		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1261	}
1262
1263	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1264		ret = true;
1265		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1266	}
1267
1268	if (data[IFLA_GRE_ENCAP_SPORT]) {
1269		ret = true;
1270		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1271	}
1272
1273	if (data[IFLA_GRE_ENCAP_DPORT]) {
1274		ret = true;
1275		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1276	}
1277
1278	return ret;
1279}
1280
1281static int gre_tap_init(struct net_device *dev)
1282{
1283	__gre_tunnel_init(dev);
1284	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1285	netif_keep_dst(dev);
1286
1287	return ip_tunnel_init(dev);
1288}
1289
1290static const struct net_device_ops gre_tap_netdev_ops = {
1291	.ndo_init		= gre_tap_init,
1292	.ndo_uninit		= ip_tunnel_uninit,
1293	.ndo_start_xmit		= gre_tap_xmit,
1294	.ndo_set_mac_address 	= eth_mac_addr,
1295	.ndo_validate_addr	= eth_validate_addr,
1296	.ndo_change_mtu		= ip_tunnel_change_mtu,
1297	.ndo_get_stats64	= dev_get_tstats64,
1298	.ndo_get_iflink		= ip_tunnel_get_iflink,
1299	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1300};
1301
1302static int erspan_tunnel_init(struct net_device *dev)
1303{
1304	struct ip_tunnel *tunnel = netdev_priv(dev);
1305
1306	if (tunnel->erspan_ver == 0)
1307		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1308	else
1309		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1310
1311	tunnel->parms.iph.protocol = IPPROTO_GRE;
1312	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1313		       erspan_hdr_len(tunnel->erspan_ver);
1314
1315	dev->features		|= GRE_FEATURES;
1316	dev->hw_features	|= GRE_FEATURES;
1317	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1318	netif_keep_dst(dev);
1319
1320	return ip_tunnel_init(dev);
1321}
1322
1323static const struct net_device_ops erspan_netdev_ops = {
1324	.ndo_init		= erspan_tunnel_init,
1325	.ndo_uninit		= ip_tunnel_uninit,
1326	.ndo_start_xmit		= erspan_xmit,
1327	.ndo_set_mac_address	= eth_mac_addr,
1328	.ndo_validate_addr	= eth_validate_addr,
1329	.ndo_change_mtu		= ip_tunnel_change_mtu,
1330	.ndo_get_stats64	= dev_get_tstats64,
1331	.ndo_get_iflink		= ip_tunnel_get_iflink,
1332	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1333};
1334
1335static void ipgre_tap_setup(struct net_device *dev)
1336{
1337	ether_setup(dev);
1338	dev->max_mtu = 0;
1339	dev->netdev_ops	= &gre_tap_netdev_ops;
1340	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1341	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1342	ip_tunnel_setup(dev, gre_tap_net_id);
1343}
1344
1345static int
1346ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1347{
 
1348	struct ip_tunnel_encap ipencap;
1349
1350	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1351		struct ip_tunnel *t = netdev_priv(dev);
1352		int err = ip_tunnel_encap_setup(t, &ipencap);
1353
1354		if (err < 0)
1355			return err;
1356	}
1357
1358	return 0;
1359}
1360
1361static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1362			 struct nlattr *tb[], struct nlattr *data[],
1363			 struct netlink_ext_ack *extack)
1364{
1365	struct ip_tunnel_parm p;
1366	__u32 fwmark = 0;
1367	int err;
1368
1369	err = ipgre_newlink_encap_setup(dev, data);
1370	if (err)
1371		return err;
1372
1373	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1374	if (err < 0)
1375		return err;
1376	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1377}
1378
1379static int erspan_newlink(struct net *src_net, struct net_device *dev,
1380			  struct nlattr *tb[], struct nlattr *data[],
1381			  struct netlink_ext_ack *extack)
1382{
1383	struct ip_tunnel_parm p;
1384	__u32 fwmark = 0;
1385	int err;
1386
1387	err = ipgre_newlink_encap_setup(dev, data);
1388	if (err)
1389		return err;
1390
1391	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1392	if (err)
1393		return err;
1394	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1395}
1396
1397static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1398			    struct nlattr *data[],
1399			    struct netlink_ext_ack *extack)
1400{
1401	struct ip_tunnel *t = netdev_priv(dev);
1402	__u32 fwmark = t->fwmark;
1403	struct ip_tunnel_parm p;
1404	int err;
1405
1406	err = ipgre_newlink_encap_setup(dev, data);
1407	if (err)
1408		return err;
1409
1410	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1411	if (err < 0)
1412		return err;
1413
1414	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1415	if (err < 0)
1416		return err;
1417
1418	t->parms.i_flags = p.i_flags;
1419	t->parms.o_flags = p.o_flags;
1420
1421	ipgre_link_update(dev, !tb[IFLA_MTU]);
1422
1423	return 0;
1424}
1425
1426static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1427			     struct nlattr *data[],
1428			     struct netlink_ext_ack *extack)
1429{
1430	struct ip_tunnel *t = netdev_priv(dev);
1431	__u32 fwmark = t->fwmark;
1432	struct ip_tunnel_parm p;
1433	int err;
1434
1435	err = ipgre_newlink_encap_setup(dev, data);
1436	if (err)
1437		return err;
1438
1439	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1440	if (err < 0)
1441		return err;
1442
1443	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1444	if (err < 0)
1445		return err;
1446
1447	t->parms.i_flags = p.i_flags;
1448	t->parms.o_flags = p.o_flags;
1449
1450	return 0;
1451}
1452
1453static size_t ipgre_get_size(const struct net_device *dev)
1454{
1455	return
1456		/* IFLA_GRE_LINK */
1457		nla_total_size(4) +
1458		/* IFLA_GRE_IFLAGS */
1459		nla_total_size(2) +
1460		/* IFLA_GRE_OFLAGS */
1461		nla_total_size(2) +
1462		/* IFLA_GRE_IKEY */
1463		nla_total_size(4) +
1464		/* IFLA_GRE_OKEY */
1465		nla_total_size(4) +
1466		/* IFLA_GRE_LOCAL */
1467		nla_total_size(4) +
1468		/* IFLA_GRE_REMOTE */
1469		nla_total_size(4) +
1470		/* IFLA_GRE_TTL */
1471		nla_total_size(1) +
1472		/* IFLA_GRE_TOS */
1473		nla_total_size(1) +
1474		/* IFLA_GRE_PMTUDISC */
1475		nla_total_size(1) +
1476		/* IFLA_GRE_ENCAP_TYPE */
1477		nla_total_size(2) +
1478		/* IFLA_GRE_ENCAP_FLAGS */
1479		nla_total_size(2) +
1480		/* IFLA_GRE_ENCAP_SPORT */
1481		nla_total_size(2) +
1482		/* IFLA_GRE_ENCAP_DPORT */
1483		nla_total_size(2) +
1484		/* IFLA_GRE_COLLECT_METADATA */
1485		nla_total_size(0) +
1486		/* IFLA_GRE_IGNORE_DF */
1487		nla_total_size(1) +
1488		/* IFLA_GRE_FWMARK */
1489		nla_total_size(4) +
1490		/* IFLA_GRE_ERSPAN_INDEX */
1491		nla_total_size(4) +
1492		/* IFLA_GRE_ERSPAN_VER */
1493		nla_total_size(1) +
1494		/* IFLA_GRE_ERSPAN_DIR */
1495		nla_total_size(1) +
1496		/* IFLA_GRE_ERSPAN_HWID */
1497		nla_total_size(2) +
1498		0;
1499}
1500
1501static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1502{
1503	struct ip_tunnel *t = netdev_priv(dev);
1504	struct ip_tunnel_parm *p = &t->parms;
1505	__be16 o_flags = p->o_flags;
1506
1507	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1508	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1509			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1510	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1511			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1512	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1513	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1514	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1515	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1516	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1517	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1518	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1519		       !!(p->iph.frag_off & htons(IP_DF))) ||
1520	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1521		goto nla_put_failure;
1522
1523	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1524			t->encap.type) ||
1525	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1526			 t->encap.sport) ||
1527	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1528			 t->encap.dport) ||
1529	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1530			t->encap.flags))
1531		goto nla_put_failure;
1532
1533	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1534		goto nla_put_failure;
1535
1536	if (t->collect_md) {
1537		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1538			goto nla_put_failure;
1539	}
1540
1541	return 0;
1542
1543nla_put_failure:
1544	return -EMSGSIZE;
1545}
1546
1547static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1548{
1549	struct ip_tunnel *t = netdev_priv(dev);
1550
1551	if (t->erspan_ver <= 2) {
1552		if (t->erspan_ver != 0 && !t->collect_md)
1553			t->parms.o_flags |= TUNNEL_KEY;
1554
1555		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1556			goto nla_put_failure;
1557
1558		if (t->erspan_ver == 1) {
1559			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1560				goto nla_put_failure;
1561		} else if (t->erspan_ver == 2) {
1562			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1563				goto nla_put_failure;
1564			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1565				goto nla_put_failure;
1566		}
1567	}
1568
1569	return ipgre_fill_info(skb, dev);
1570
1571nla_put_failure:
1572	return -EMSGSIZE;
1573}
1574
1575static void erspan_setup(struct net_device *dev)
1576{
1577	struct ip_tunnel *t = netdev_priv(dev);
1578
1579	ether_setup(dev);
1580	dev->max_mtu = 0;
1581	dev->netdev_ops = &erspan_netdev_ops;
1582	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1583	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1584	ip_tunnel_setup(dev, erspan_net_id);
1585	t->erspan_ver = 1;
1586}
1587
1588static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1589	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1590	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1591	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1592	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1593	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1594	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1595	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1596	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1597	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1598	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1599	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1600	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1601	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1602	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1603	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1604	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1605	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1606	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1607	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1608	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1609	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1610};
1611
1612static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1613	.kind		= "gre",
1614	.maxtype	= IFLA_GRE_MAX,
1615	.policy		= ipgre_policy,
1616	.priv_size	= sizeof(struct ip_tunnel),
1617	.setup		= ipgre_tunnel_setup,
1618	.validate	= ipgre_tunnel_validate,
1619	.newlink	= ipgre_newlink,
1620	.changelink	= ipgre_changelink,
1621	.dellink	= ip_tunnel_dellink,
1622	.get_size	= ipgre_get_size,
1623	.fill_info	= ipgre_fill_info,
1624	.get_link_net	= ip_tunnel_get_link_net,
1625};
1626
1627static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1628	.kind		= "gretap",
1629	.maxtype	= IFLA_GRE_MAX,
1630	.policy		= ipgre_policy,
1631	.priv_size	= sizeof(struct ip_tunnel),
1632	.setup		= ipgre_tap_setup,
1633	.validate	= ipgre_tap_validate,
1634	.newlink	= ipgre_newlink,
1635	.changelink	= ipgre_changelink,
1636	.dellink	= ip_tunnel_dellink,
1637	.get_size	= ipgre_get_size,
1638	.fill_info	= ipgre_fill_info,
1639	.get_link_net	= ip_tunnel_get_link_net,
1640};
1641
1642static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1643	.kind		= "erspan",
1644	.maxtype	= IFLA_GRE_MAX,
1645	.policy		= ipgre_policy,
1646	.priv_size	= sizeof(struct ip_tunnel),
1647	.setup		= erspan_setup,
1648	.validate	= erspan_validate,
1649	.newlink	= erspan_newlink,
1650	.changelink	= erspan_changelink,
1651	.dellink	= ip_tunnel_dellink,
1652	.get_size	= ipgre_get_size,
1653	.fill_info	= erspan_fill_info,
1654	.get_link_net	= ip_tunnel_get_link_net,
1655};
1656
1657struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1658					u8 name_assign_type)
1659{
1660	struct nlattr *tb[IFLA_MAX + 1];
1661	struct net_device *dev;
1662	LIST_HEAD(list_kill);
1663	struct ip_tunnel *t;
1664	int err;
1665
1666	memset(&tb, 0, sizeof(tb));
1667
1668	dev = rtnl_create_link(net, name, name_assign_type,
1669			       &ipgre_tap_ops, tb, NULL);
1670	if (IS_ERR(dev))
1671		return dev;
1672
1673	/* Configure flow based GRE device. */
1674	t = netdev_priv(dev);
1675	t->collect_md = true;
1676
1677	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1678	if (err < 0) {
1679		free_netdev(dev);
1680		return ERR_PTR(err);
1681	}
1682
1683	/* openvswitch users expect packet sizes to be unrestricted,
1684	 * so set the largest MTU we can.
1685	 */
1686	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1687	if (err)
1688		goto out;
1689
1690	err = rtnl_configure_link(dev, NULL, 0, NULL);
1691	if (err < 0)
1692		goto out;
1693
1694	return dev;
1695out:
1696	ip_tunnel_dellink(dev, &list_kill);
1697	unregister_netdevice_many(&list_kill);
1698	return ERR_PTR(err);
1699}
1700EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1701
1702static int __net_init ipgre_tap_init_net(struct net *net)
1703{
1704	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1705}
1706
1707static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
1708						 struct list_head *dev_to_kill)
1709{
1710	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
1711			      dev_to_kill);
1712}
1713
1714static struct pernet_operations ipgre_tap_net_ops = {
1715	.init = ipgre_tap_init_net,
1716	.exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
1717	.id   = &gre_tap_net_id,
1718	.size = sizeof(struct ip_tunnel_net),
1719};
1720
1721static int __net_init erspan_init_net(struct net *net)
1722{
1723	return ip_tunnel_init_net(net, erspan_net_id,
1724				  &erspan_link_ops, "erspan0");
1725}
1726
1727static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
1728					      struct list_head *dev_to_kill)
1729{
1730	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
1731			      dev_to_kill);
1732}
1733
1734static struct pernet_operations erspan_net_ops = {
1735	.init = erspan_init_net,
1736	.exit_batch_rtnl = erspan_exit_batch_rtnl,
1737	.id   = &erspan_net_id,
1738	.size = sizeof(struct ip_tunnel_net),
1739};
1740
1741static int __init ipgre_init(void)
1742{
1743	int err;
1744
1745	pr_info("GRE over IPv4 tunneling driver\n");
1746
1747	err = register_pernet_device(&ipgre_net_ops);
1748	if (err < 0)
1749		return err;
1750
1751	err = register_pernet_device(&ipgre_tap_net_ops);
1752	if (err < 0)
1753		goto pnet_tap_failed;
1754
1755	err = register_pernet_device(&erspan_net_ops);
1756	if (err < 0)
1757		goto pnet_erspan_failed;
1758
1759	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1760	if (err < 0) {
1761		pr_info("%s: can't add protocol\n", __func__);
1762		goto add_proto_failed;
1763	}
1764
1765	err = rtnl_link_register(&ipgre_link_ops);
1766	if (err < 0)
1767		goto rtnl_link_failed;
1768
1769	err = rtnl_link_register(&ipgre_tap_ops);
1770	if (err < 0)
1771		goto tap_ops_failed;
1772
1773	err = rtnl_link_register(&erspan_link_ops);
1774	if (err < 0)
1775		goto erspan_link_failed;
1776
1777	return 0;
1778
1779erspan_link_failed:
1780	rtnl_link_unregister(&ipgre_tap_ops);
1781tap_ops_failed:
1782	rtnl_link_unregister(&ipgre_link_ops);
1783rtnl_link_failed:
1784	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1785add_proto_failed:
1786	unregister_pernet_device(&erspan_net_ops);
1787pnet_erspan_failed:
1788	unregister_pernet_device(&ipgre_tap_net_ops);
1789pnet_tap_failed:
1790	unregister_pernet_device(&ipgre_net_ops);
1791	return err;
1792}
1793
1794static void __exit ipgre_fini(void)
1795{
1796	rtnl_link_unregister(&ipgre_tap_ops);
1797	rtnl_link_unregister(&ipgre_link_ops);
1798	rtnl_link_unregister(&erspan_link_ops);
1799	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1800	unregister_pernet_device(&ipgre_tap_net_ops);
1801	unregister_pernet_device(&ipgre_net_ops);
1802	unregister_pernet_device(&erspan_net_ops);
1803}
1804
1805module_init(ipgre_init);
1806module_exit(ipgre_fini);
1807MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1808MODULE_LICENSE("GPL");
1809MODULE_ALIAS_RTNL_LINK("gre");
1810MODULE_ALIAS_RTNL_LINK("gretap");
1811MODULE_ALIAS_RTNL_LINK("erspan");
1812MODULE_ALIAS_NETDEV("gre0");
1813MODULE_ALIAS_NETDEV("gretap0");
1814MODULE_ALIAS_NETDEV("erspan0");