Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux NET3:	GRE over IP protocol decoder.
   4 *
   5 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/uaccess.h>
  16#include <linux/skbuff.h>
  17#include <linux/netdevice.h>
  18#include <linux/in.h>
  19#include <linux/tcp.h>
  20#include <linux/udp.h>
  21#include <linux/if_arp.h>
  22#include <linux/if_vlan.h>
  23#include <linux/init.h>
  24#include <linux/in6.h>
  25#include <linux/inetdevice.h>
  26#include <linux/igmp.h>
  27#include <linux/netfilter_ipv4.h>
  28#include <linux/etherdevice.h>
  29#include <linux/if_ether.h>
  30
  31#include <net/sock.h>
  32#include <net/ip.h>
  33#include <net/icmp.h>
  34#include <net/protocol.h>
  35#include <net/ip_tunnels.h>
  36#include <net/arp.h>
  37#include <net/checksum.h>
  38#include <net/dsfield.h>
  39#include <net/inet_ecn.h>
  40#include <net/xfrm.h>
  41#include <net/net_namespace.h>
  42#include <net/netns/generic.h>
  43#include <net/rtnetlink.h>
  44#include <net/gre.h>
  45#include <net/dst_metadata.h>
  46#include <net/erspan.h>
  47#include <net/inet_dscp.h>
  48
  49/*
  50   Problems & solutions
  51   --------------------
  52
  53   1. The most important issue is detecting local dead loops.
  54   They would cause complete host lockup in transmit, which
  55   would be "resolved" by stack overflow or, if queueing is enabled,
  56   with infinite looping in net_bh.
  57
  58   We cannot track such dead loops during route installation,
  59   it is infeasible task. The most general solutions would be
  60   to keep skb->encapsulation counter (sort of local ttl),
  61   and silently drop packet when it expires. It is a good
  62   solution, but it supposes maintaining new variable in ALL
  63   skb, even if no tunneling is used.
  64
  65   Current solution: xmit_recursion breaks dead loops. This is a percpu
  66   counter, since when we enter the first ndo_xmit(), cpu migration is
  67   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  68
  69   2. Networking dead loops would not kill routers, but would really
  70   kill network. IP hop limit plays role of "t->recursion" in this case,
  71   if we copy it from packet being encapsulated to upper header.
  72   It is very good solution, but it introduces two problems:
  73
  74   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  75     do not work over tunnels.
  76   - traceroute does not work. I planned to relay ICMP from tunnel,
  77     so that this problem would be solved and traceroute output
  78     would even more informative. This idea appeared to be wrong:
  79     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  80     true router now :-)), all routers (at least, in neighbourhood of mine)
  81     return only 8 bytes of payload. It is the end.
  82
  83   Hence, if we want that OSPF worked or traceroute said something reasonable,
  84   we should search for another solution.
  85
  86   One of them is to parse packet trying to detect inner encapsulation
  87   made by our node. It is difficult or even impossible, especially,
  88   taking into account fragmentation. TO be short, ttl is not solution at all.
  89
  90   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  91   We force DF flag on tunnels with preconfigured hop limit,
  92   that is ALL. :-) Well, it does not remove the problem completely,
  93   but exponential growth of network traffic is changed to linear
  94   (branches, that exceed pmtu are pruned) and tunnel mtu
  95   rapidly degrades to value <68, where looping stops.
  96   Yes, it is not good if there exists a router in the loop,
  97   which does not force DF, even when encapsulating packets have DF set.
  98   But it is not our problem! Nobody could accuse us, we made
  99   all that we could make. Even if it is your gated who injected
 100   fatal route to network, even if it were you who configured
 101   fatal static route: you are innocent. :-)
 102
 103   Alexey Kuznetsov.
 104 */
 105
 106static bool log_ecn_error = true;
 107module_param(log_ecn_error, bool, 0644);
 108MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 109
 110static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 111static const struct header_ops ipgre_header_ops;
 112
 113static int ipgre_tunnel_init(struct net_device *dev);
 114static void erspan_build_header(struct sk_buff *skb,
 115				u32 id, u32 index,
 116				bool truncate, bool is_ipv4);
 117
 118static unsigned int ipgre_net_id __read_mostly;
 119static unsigned int gre_tap_net_id __read_mostly;
 120static unsigned int erspan_net_id __read_mostly;
 121
 122static int ipgre_err(struct sk_buff *skb, u32 info,
 123		     const struct tnl_ptk_info *tpi)
 124{
 125
 126	/* All the routers (except for Linux) return only
 127	   8 bytes of packet payload. It means, that precise relaying of
 128	   ICMP in the real Internet is absolutely infeasible.
 129
 130	   Moreover, Cisco "wise men" put GRE key to the third word
 131	   in GRE header. It makes impossible maintaining even soft
 132	   state for keyed GRE tunnels with enabled checksum. Tell
 133	   them "thank you".
 134
 135	   Well, I wonder, rfc1812 was written by Cisco employee,
 136	   what the hell these idiots break standards established
 137	   by themselves???
 138	   */
 139	struct net *net = dev_net(skb->dev);
 140	struct ip_tunnel_net *itn;
 141	const struct iphdr *iph;
 142	const int type = icmp_hdr(skb)->type;
 143	const int code = icmp_hdr(skb)->code;
 144	unsigned int data_len = 0;
 145	struct ip_tunnel *t;
 146
 147	if (tpi->proto == htons(ETH_P_TEB))
 148		itn = net_generic(net, gre_tap_net_id);
 149	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
 150		 tpi->proto == htons(ETH_P_ERSPAN2))
 151		itn = net_generic(net, erspan_net_id);
 152	else
 153		itn = net_generic(net, ipgre_net_id);
 154
 155	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 156	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 157			     iph->daddr, iph->saddr, tpi->key);
 158
 159	if (!t)
 160		return -ENOENT;
 161
 162	switch (type) {
 163	default:
 164	case ICMP_PARAMETERPROB:
 165		return 0;
 166
 167	case ICMP_DEST_UNREACH:
 168		switch (code) {
 169		case ICMP_SR_FAILED:
 170		case ICMP_PORT_UNREACH:
 171			/* Impossible event. */
 172			return 0;
 173		default:
 174			/* All others are translated to HOST_UNREACH.
 175			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 176			   I believe they are just ether pollution. --ANK
 177			 */
 178			break;
 179		}
 180		break;
 181
 182	case ICMP_TIME_EXCEEDED:
 183		if (code != ICMP_EXC_TTL)
 184			return 0;
 185		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
 186		break;
 187
 188	case ICMP_REDIRECT:
 189		break;
 190	}
 191
 192#if IS_ENABLED(CONFIG_IPV6)
 193	if (tpi->proto == htons(ETH_P_IPV6) &&
 194	    !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 195					type, data_len))
 196		return 0;
 197#endif
 198
 199	if (t->parms.iph.daddr == 0 ||
 200	    ipv4_is_multicast(t->parms.iph.daddr))
 201		return 0;
 202
 203	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 204		return 0;
 205
 206	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 207		t->err_count++;
 208	else
 209		t->err_count = 1;
 210	t->err_time = jiffies;
 211
 212	return 0;
 213}
 214
 215static void gre_err(struct sk_buff *skb, u32 info)
 216{
 217	/* All the routers (except for Linux) return only
 218	 * 8 bytes of packet payload. It means, that precise relaying of
 219	 * ICMP in the real Internet is absolutely infeasible.
 220	 *
 221	 * Moreover, Cisco "wise men" put GRE key to the third word
 222	 * in GRE header. It makes impossible maintaining even soft
 223	 * state for keyed
 224	 * GRE tunnels with enabled checksum. Tell them "thank you".
 225	 *
 226	 * Well, I wonder, rfc1812 was written by Cisco employee,
 227	 * what the hell these idiots break standards established
 228	 * by themselves???
 229	 */
 230
 231	const struct iphdr *iph = (struct iphdr *)skb->data;
 232	const int type = icmp_hdr(skb)->type;
 233	const int code = icmp_hdr(skb)->code;
 234	struct tnl_ptk_info tpi;
 235
 236	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
 237			     iph->ihl * 4) < 0)
 238		return;
 239
 240	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 241		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 242				 skb->dev->ifindex, IPPROTO_GRE);
 243		return;
 244	}
 245	if (type == ICMP_REDIRECT) {
 246		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
 247			      IPPROTO_GRE);
 248		return;
 249	}
 250
 251	ipgre_err(skb, info, &tpi);
 252}
 253
 254static bool is_erspan_type1(int gre_hdr_len)
 255{
 256	/* Both ERSPAN type I (version 0) and type II (version 1) use
 257	 * protocol 0x88BE, but the type I has only 4-byte GRE header,
 258	 * while type II has 8-byte.
 259	 */
 260	return gre_hdr_len == 4;
 261}
 262
 263static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 264		      int gre_hdr_len)
 265{
 266	struct net *net = dev_net(skb->dev);
 267	struct metadata_dst *tun_dst = NULL;
 268	struct erspan_base_hdr *ershdr;
 269	IP_TUNNEL_DECLARE_FLAGS(flags);
 270	struct ip_tunnel_net *itn;
 271	struct ip_tunnel *tunnel;
 272	const struct iphdr *iph;
 273	struct erspan_md2 *md2;
 274	int ver;
 275	int len;
 276
 277	ip_tunnel_flags_copy(flags, tpi->flags);
 278
 279	itn = net_generic(net, erspan_net_id);
 
 280	iph = ip_hdr(skb);
 281	if (is_erspan_type1(gre_hdr_len)) {
 282		ver = 0;
 283		__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
 284		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
 285					  iph->saddr, iph->daddr, 0);
 286	} else {
 287		if (unlikely(!pskb_may_pull(skb,
 288					    gre_hdr_len + sizeof(*ershdr))))
 289			return PACKET_REJECT;
 290
 291		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 292		ver = ershdr->ver;
 293		iph = ip_hdr(skb);
 294		__set_bit(IP_TUNNEL_KEY_BIT, flags);
 295		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
 296					  iph->saddr, iph->daddr, tpi->key);
 297	}
 298
 299	if (tunnel) {
 300		if (is_erspan_type1(gre_hdr_len))
 301			len = gre_hdr_len;
 302		else
 303			len = gre_hdr_len + erspan_hdr_len(ver);
 304
 305		if (unlikely(!pskb_may_pull(skb, len)))
 306			return PACKET_REJECT;
 307
 308		if (__iptunnel_pull_header(skb,
 309					   len,
 310					   htons(ETH_P_TEB),
 311					   false, false) < 0)
 312			goto drop;
 313
 314		if (tunnel->collect_md) {
 315			struct erspan_metadata *pkt_md, *md;
 316			struct ip_tunnel_info *info;
 317			unsigned char *gh;
 318			__be64 tun_id;
 
 319
 320			__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
 321			ip_tunnel_flags_copy(flags, tpi->flags);
 322			tun_id = key32_to_tunnel_id(tpi->key);
 323
 324			tun_dst = ip_tun_rx_dst(skb, flags,
 325						tun_id, sizeof(*md));
 326			if (!tun_dst)
 327				return PACKET_REJECT;
 328
 329			/* skb can be uncloned in __iptunnel_pull_header, so
 330			 * old pkt_md is no longer valid and we need to reset
 331			 * it
 332			 */
 333			gh = skb_network_header(skb) +
 334			     skb_network_header_len(skb);
 335			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
 336							    sizeof(*ershdr));
 337			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 338			md->version = ver;
 339			md2 = &md->u.md2;
 340			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
 341						       ERSPAN_V2_MDSIZE);
 342
 343			info = &tun_dst->u.tun_info;
 344			__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
 345				  info->key.tun_flags);
 346			info->options_len = sizeof(*md);
 347		}
 348
 349		skb_reset_mac_header(skb);
 350		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 351		return PACKET_RCVD;
 352	}
 353	return PACKET_REJECT;
 354
 355drop:
 356	kfree_skb(skb);
 357	return PACKET_RCVD;
 358}
 359
 360static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 361		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 362{
 363	struct metadata_dst *tun_dst = NULL;
 364	const struct iphdr *iph;
 365	struct ip_tunnel *tunnel;
 366
 367	iph = ip_hdr(skb);
 368	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 369				  iph->saddr, iph->daddr, tpi->key);
 370
 371	if (tunnel) {
 372		const struct iphdr *tnl_params;
 373
 374		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
 375					   raw_proto, false) < 0)
 376			goto drop;
 377
 378		/* Special case for ipgre_header_parse(), which expects the
 379		 * mac_header to point to the outer IP header.
 380		 */
 381		if (tunnel->dev->header_ops == &ipgre_header_ops)
 382			skb_pop_mac_header(skb);
 383		else
 384			skb_reset_mac_header(skb);
 385
 386		tnl_params = &tunnel->parms.iph;
 387		if (tunnel->collect_md || tnl_params->daddr == 0) {
 388			IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 389			__be64 tun_id;
 390
 391			__set_bit(IP_TUNNEL_CSUM_BIT, flags);
 392			__set_bit(IP_TUNNEL_KEY_BIT, flags);
 393			ip_tunnel_flags_and(flags, tpi->flags, flags);
 394
 395			tun_id = key32_to_tunnel_id(tpi->key);
 396			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 397			if (!tun_dst)
 398				return PACKET_REJECT;
 399		}
 400
 401		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 402		return PACKET_RCVD;
 403	}
 404	return PACKET_NEXT;
 405
 406drop:
 407	kfree_skb(skb);
 408	return PACKET_RCVD;
 409}
 410
 411static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 412		     int hdr_len)
 413{
 414	struct net *net = dev_net(skb->dev);
 415	struct ip_tunnel_net *itn;
 416	int res;
 417
 418	if (tpi->proto == htons(ETH_P_TEB))
 419		itn = net_generic(net, gre_tap_net_id);
 420	else
 421		itn = net_generic(net, ipgre_net_id);
 422
 423	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
 424	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
 425		/* ipgre tunnels in collect metadata mode should receive
 426		 * also ETH_P_TEB traffic.
 427		 */
 428		itn = net_generic(net, ipgre_net_id);
 429		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
 430	}
 431	return res;
 432}
 433
 434static int gre_rcv(struct sk_buff *skb)
 435{
 436	struct tnl_ptk_info tpi;
 437	bool csum_err = false;
 438	int hdr_len;
 439
 440#ifdef CONFIG_NET_IPGRE_BROADCAST
 441	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 442		/* Looped back packet, drop it! */
 443		if (rt_is_output_route(skb_rtable(skb)))
 444			goto drop;
 445	}
 446#endif
 447
 448	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 449	if (hdr_len < 0)
 450		goto drop;
 451
 452	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
 453		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 454		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 455			return 0;
 456		goto out;
 457	}
 458
 459	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 460		return 0;
 461
 462out:
 463	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 464drop:
 465	kfree_skb(skb);
 466	return 0;
 467}
 468
 469static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 470		       const struct iphdr *tnl_params,
 471		       __be16 proto)
 472{
 473	struct ip_tunnel *tunnel = netdev_priv(dev);
 474	IP_TUNNEL_DECLARE_FLAGS(flags);
 475
 476	ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
 
 477
 478	/* Push GRE header. */
 479	gre_build_header(skb, tunnel->tun_hlen,
 480			 flags, proto, tunnel->parms.o_key,
 481			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
 482			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 483
 484	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 485}
 486
 487static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 488{
 489	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 490}
 491
 492static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 493			__be16 proto)
 494{
 495	struct ip_tunnel *tunnel = netdev_priv(dev);
 496	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 497	struct ip_tunnel_info *tun_info;
 498	const struct ip_tunnel_key *key;
 499	int tunnel_hlen;
 
 500
 501	tun_info = skb_tunnel_info(skb);
 502	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 503		     ip_tunnel_info_af(tun_info) != AF_INET))
 504		goto err_free_skb;
 505
 506	key = &tun_info->key;
 507	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 508
 509	if (skb_cow_head(skb, dev->needed_headroom))
 510		goto err_free_skb;
 511
 512	/* Push Tunnel header. */
 513	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 514					      tunnel->parms.o_flags)))
 515		goto err_free_skb;
 516
 517	__set_bit(IP_TUNNEL_CSUM_BIT, flags);
 518	__set_bit(IP_TUNNEL_KEY_BIT, flags);
 519	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
 520	ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
 521
 522	gre_build_header(skb, tunnel_hlen, flags, proto,
 523			 tunnel_id_to_key32(tun_info->key.tun_id),
 524			 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
 525			 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
 526
 527	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 528
 529	return;
 530
 531err_free_skb:
 532	kfree_skb(skb);
 533	DEV_STATS_INC(dev, tx_dropped);
 534}
 535
 536static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 537{
 538	struct ip_tunnel *tunnel = netdev_priv(dev);
 539	IP_TUNNEL_DECLARE_FLAGS(flags) = { };
 540	struct ip_tunnel_info *tun_info;
 541	const struct ip_tunnel_key *key;
 542	struct erspan_metadata *md;
 543	bool truncate = false;
 544	__be16 proto;
 545	int tunnel_hlen;
 546	int version;
 547	int nhoff;
 
 548
 549	tun_info = skb_tunnel_info(skb);
 550	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 551		     ip_tunnel_info_af(tun_info) != AF_INET))
 552		goto err_free_skb;
 553
 554	key = &tun_info->key;
 555	if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
 556		goto err_free_skb;
 557	if (tun_info->options_len < sizeof(*md))
 558		goto err_free_skb;
 559	md = ip_tunnel_info_opts(tun_info);
 560
 561	/* ERSPAN has fixed 8 byte GRE header */
 562	version = md->version;
 563	tunnel_hlen = 8 + erspan_hdr_len(version);
 564
 565	if (skb_cow_head(skb, dev->needed_headroom))
 566		goto err_free_skb;
 567
 568	if (gre_handle_offloads(skb, false))
 569		goto err_free_skb;
 570
 571	if (skb->len > dev->mtu + dev->hard_header_len) {
 572		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 573			goto err_free_skb;
 574		truncate = true;
 575	}
 576
 577	nhoff = skb_network_offset(skb);
 578	if (skb->protocol == htons(ETH_P_IP) &&
 579	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 580		truncate = true;
 581
 582	if (skb->protocol == htons(ETH_P_IPV6)) {
 583		int thoff;
 584
 585		if (skb_transport_header_was_set(skb))
 586			thoff = skb_transport_offset(skb);
 587		else
 588			thoff = nhoff + sizeof(struct ipv6hdr);
 589		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
 590			truncate = true;
 591	}
 592
 593	if (version == 1) {
 594		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 595				    ntohl(md->u.index), truncate, true);
 596		proto = htons(ETH_P_ERSPAN);
 597	} else if (version == 2) {
 598		erspan_build_header_v2(skb,
 599				       ntohl(tunnel_id_to_key32(key->tun_id)),
 600				       md->u.md2.dir,
 601				       get_hwid(&md->u.md2),
 602				       truncate, true);
 603		proto = htons(ETH_P_ERSPAN2);
 604	} else {
 605		goto err_free_skb;
 606	}
 607
 608	__set_bit(IP_TUNNEL_SEQ_BIT, flags);
 609	gre_build_header(skb, 8, flags, proto, 0,
 610			 htonl(atomic_fetch_inc(&tunnel->o_seqno)));
 611
 612	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 613
 614	return;
 615
 616err_free_skb:
 617	kfree_skb(skb);
 618	DEV_STATS_INC(dev, tx_dropped);
 619}
 620
 621static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 622{
 623	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 624	const struct ip_tunnel_key *key;
 625	struct rtable *rt;
 626	struct flowi4 fl4;
 627
 628	if (ip_tunnel_info_af(info) != AF_INET)
 629		return -EINVAL;
 630
 631	key = &info->key;
 632	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
 633			    tunnel_id_to_key32(key->tun_id),
 634			    key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
 635			    skb->mark, skb_get_hash(skb), key->flow_flags);
 636	rt = ip_route_output_key(dev_net(dev), &fl4);
 637	if (IS_ERR(rt))
 638		return PTR_ERR(rt);
 639
 640	ip_rt_put(rt);
 641	info->key.u.ipv4.src = fl4.saddr;
 642	return 0;
 643}
 644
 645static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 646			      struct net_device *dev)
 647{
 648	struct ip_tunnel *tunnel = netdev_priv(dev);
 649	const struct iphdr *tnl_params;
 650
 651	if (!pskb_inet_may_pull(skb))
 652		goto free_skb;
 653
 654	if (tunnel->collect_md) {
 655		gre_fb_xmit(skb, dev, skb->protocol);
 656		return NETDEV_TX_OK;
 657	}
 658
 659	if (dev->header_ops) {
 660		int pull_len = tunnel->hlen + sizeof(struct iphdr);
 661
 662		if (skb_cow_head(skb, 0))
 663			goto free_skb;
 664
 665		if (!pskb_may_pull(skb, pull_len))
 666			goto free_skb;
 667
 668		tnl_params = (const struct iphdr *)skb->data;
 669
 670		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
 671		skb_pull(skb, pull_len);
 
 
 672		skb_reset_mac_header(skb);
 673
 674		if (skb->ip_summed == CHECKSUM_PARTIAL &&
 675		    skb_checksum_start(skb) < skb->data)
 676			goto free_skb;
 677	} else {
 678		if (skb_cow_head(skb, dev->needed_headroom))
 679			goto free_skb;
 680
 681		tnl_params = &tunnel->parms.iph;
 682	}
 683
 684	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 685					      tunnel->parms.o_flags)))
 686		goto free_skb;
 687
 688	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 689	return NETDEV_TX_OK;
 690
 691free_skb:
 692	kfree_skb(skb);
 693	DEV_STATS_INC(dev, tx_dropped);
 694	return NETDEV_TX_OK;
 695}
 696
 697static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 698			       struct net_device *dev)
 699{
 700	struct ip_tunnel *tunnel = netdev_priv(dev);
 701	bool truncate = false;
 702	__be16 proto;
 703
 704	if (!pskb_inet_may_pull(skb))
 705		goto free_skb;
 706
 707	if (tunnel->collect_md) {
 708		erspan_fb_xmit(skb, dev);
 709		return NETDEV_TX_OK;
 710	}
 711
 712	if (gre_handle_offloads(skb, false))
 713		goto free_skb;
 714
 715	if (skb_cow_head(skb, dev->needed_headroom))
 716		goto free_skb;
 717
 718	if (skb->len > dev->mtu + dev->hard_header_len) {
 719		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
 720			goto free_skb;
 721		truncate = true;
 722	}
 723
 724	/* Push ERSPAN header */
 725	if (tunnel->erspan_ver == 0) {
 726		proto = htons(ETH_P_ERSPAN);
 727		__clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
 728	} else if (tunnel->erspan_ver == 1) {
 729		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 730				    tunnel->index,
 731				    truncate, true);
 732		proto = htons(ETH_P_ERSPAN);
 733	} else if (tunnel->erspan_ver == 2) {
 734		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 735				       tunnel->dir, tunnel->hwid,
 736				       truncate, true);
 737		proto = htons(ETH_P_ERSPAN2);
 738	} else {
 739		goto free_skb;
 740	}
 741
 742	__clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
 743	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 744	return NETDEV_TX_OK;
 745
 746free_skb:
 747	kfree_skb(skb);
 748	DEV_STATS_INC(dev, tx_dropped);
 749	return NETDEV_TX_OK;
 750}
 751
 752static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 753				struct net_device *dev)
 754{
 755	struct ip_tunnel *tunnel = netdev_priv(dev);
 756
 757	if (!pskb_inet_may_pull(skb))
 758		goto free_skb;
 759
 760	if (tunnel->collect_md) {
 761		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 762		return NETDEV_TX_OK;
 763	}
 764
 765	if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
 766					      tunnel->parms.o_flags)))
 767		goto free_skb;
 768
 769	if (skb_cow_head(skb, dev->needed_headroom))
 770		goto free_skb;
 771
 772	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 773	return NETDEV_TX_OK;
 774
 775free_skb:
 776	kfree_skb(skb);
 777	DEV_STATS_INC(dev, tx_dropped);
 778	return NETDEV_TX_OK;
 779}
 780
 781static void ipgre_link_update(struct net_device *dev, bool set_mtu)
 782{
 783	struct ip_tunnel *tunnel = netdev_priv(dev);
 784	int len;
 785
 786	len = tunnel->tun_hlen;
 787	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 788	len = tunnel->tun_hlen - len;
 789	tunnel->hlen = tunnel->hlen + len;
 790
 791	if (dev->header_ops)
 792		dev->hard_header_len += len;
 793	else
 794		dev->needed_headroom += len;
 795
 796	if (set_mtu)
 797		WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
 798
 799	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
 800	    (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
 801	     tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
 802		dev->features &= ~NETIF_F_GSO_SOFTWARE;
 803		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 
 
 
 
 
 804	} else {
 805		dev->features |= NETIF_F_GSO_SOFTWARE;
 806		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 807	}
 808}
 809
 810static int ipgre_tunnel_ctl(struct net_device *dev,
 811			    struct ip_tunnel_parm_kern *p,
 812			    int cmd)
 813{
 814	__be16 i_flags, o_flags;
 815	int err;
 816
 817	if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
 818	    !ip_tunnel_flags_is_be16_compat(p->o_flags))
 819		return -EOVERFLOW;
 820
 821	i_flags = ip_tunnel_flags_to_be16(p->i_flags);
 822	o_flags = ip_tunnel_flags_to_be16(p->o_flags);
 823
 824	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 825		if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
 826		    p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
 827		    ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
 828			return -EINVAL;
 829	}
 830
 831	gre_flags_to_tnl_flags(p->i_flags, i_flags);
 832	gre_flags_to_tnl_flags(p->o_flags, o_flags);
 833
 834	err = ip_tunnel_ctl(dev, p, cmd);
 835	if (err)
 836		return err;
 837
 838	if (cmd == SIOCCHGTUNNEL) {
 839		struct ip_tunnel *t = netdev_priv(dev);
 840
 841		ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
 842		ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
 843
 844		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
 845			ipgre_link_update(dev, true);
 846	}
 847
 848	i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
 849	ip_tunnel_flags_from_be16(p->i_flags, i_flags);
 850	o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
 851	ip_tunnel_flags_from_be16(p->o_flags, o_flags);
 
 852
 853	return 0;
 854}
 855
 856/* Nice toy. Unfortunately, useless in real life :-)
 857   It allows to construct virtual multiprotocol broadcast "LAN"
 858   over the Internet, provided multicast routing is tuned.
 859
 860
 861   I have no idea was this bicycle invented before me,
 862   so that I had to set ARPHRD_IPGRE to a random value.
 863   I have an impression, that Cisco could make something similar,
 864   but this feature is apparently missing in IOS<=11.2(8).
 865
 866   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 867   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 868
 869   ping -t 255 224.66.66.66
 870
 871   If nobody answers, mbone does not work.
 872
 873   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 874   ip addr add 10.66.66.<somewhat>/24 dev Universe
 875   ifconfig Universe up
 876   ifconfig Universe add fe80::<Your_real_addr>/10
 877   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 878   ftp 10.66.66.66
 879   ...
 880   ftp fec0:6666:6666::193.233.7.65
 881   ...
 882 */
 883static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 884			unsigned short type,
 885			const void *daddr, const void *saddr, unsigned int len)
 886{
 887	struct ip_tunnel *t = netdev_priv(dev);
 888	struct iphdr *iph;
 889	struct gre_base_hdr *greh;
 890
 891	iph = skb_push(skb, t->hlen + sizeof(*iph));
 892	greh = (struct gre_base_hdr *)(iph+1);
 893	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 894	greh->protocol = htons(type);
 895
 896	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 897
 898	/* Set the source hardware address. */
 899	if (saddr)
 900		memcpy(&iph->saddr, saddr, 4);
 901	if (daddr)
 902		memcpy(&iph->daddr, daddr, 4);
 903	if (iph->daddr)
 904		return t->hlen + sizeof(*iph);
 905
 906	return -(t->hlen + sizeof(*iph));
 907}
 908
 909static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 910{
 911	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 912	memcpy(haddr, &iph->saddr, 4);
 913	return 4;
 914}
 915
 916static const struct header_ops ipgre_header_ops = {
 917	.create	= ipgre_header,
 918	.parse	= ipgre_header_parse,
 919};
 920
 921#ifdef CONFIG_NET_IPGRE_BROADCAST
 922static int ipgre_open(struct net_device *dev)
 923{
 924	struct ip_tunnel *t = netdev_priv(dev);
 925
 926	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 927		struct flowi4 fl4;
 928		struct rtable *rt;
 929
 930		rt = ip_route_output_gre(t->net, &fl4,
 931					 t->parms.iph.daddr,
 932					 t->parms.iph.saddr,
 933					 t->parms.o_key,
 934					 t->parms.iph.tos & INET_DSCP_MASK,
 935					 t->parms.link);
 936		if (IS_ERR(rt))
 937			return -EADDRNOTAVAIL;
 938		dev = rt->dst.dev;
 939		ip_rt_put(rt);
 940		if (!__in_dev_get_rtnl(dev))
 941			return -EADDRNOTAVAIL;
 942		t->mlink = dev->ifindex;
 943		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 944	}
 945	return 0;
 946}
 947
 948static int ipgre_close(struct net_device *dev)
 949{
 950	struct ip_tunnel *t = netdev_priv(dev);
 951
 952	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 953		struct in_device *in_dev;
 954		in_dev = inetdev_by_index(t->net, t->mlink);
 955		if (in_dev)
 956			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 957	}
 958	return 0;
 959}
 960#endif
 961
 962static const struct net_device_ops ipgre_netdev_ops = {
 963	.ndo_init		= ipgre_tunnel_init,
 964	.ndo_uninit		= ip_tunnel_uninit,
 965#ifdef CONFIG_NET_IPGRE_BROADCAST
 966	.ndo_open		= ipgre_open,
 967	.ndo_stop		= ipgre_close,
 968#endif
 969	.ndo_start_xmit		= ipgre_xmit,
 970	.ndo_siocdevprivate	= ip_tunnel_siocdevprivate,
 971	.ndo_change_mtu		= ip_tunnel_change_mtu,
 972	.ndo_get_stats64	= dev_get_tstats64,
 973	.ndo_get_iflink		= ip_tunnel_get_iflink,
 974	.ndo_tunnel_ctl		= ipgre_tunnel_ctl,
 975};
 976
 977#define GRE_FEATURES (NETIF_F_SG |		\
 978		      NETIF_F_FRAGLIST |	\
 979		      NETIF_F_HIGHDMA |		\
 980		      NETIF_F_HW_CSUM)
 981
 982static void ipgre_tunnel_setup(struct net_device *dev)
 983{
 984	dev->netdev_ops		= &ipgre_netdev_ops;
 985	dev->type		= ARPHRD_IPGRE;
 986	ip_tunnel_setup(dev, ipgre_net_id);
 987}
 988
 989static void __gre_tunnel_init(struct net_device *dev)
 990{
 991	struct ip_tunnel *tunnel;
 992
 993	tunnel = netdev_priv(dev);
 994	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 995	tunnel->parms.iph.protocol = IPPROTO_GRE;
 996
 997	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 998	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
 999
1000	dev->features		|= GRE_FEATURES;
1001	dev->hw_features	|= GRE_FEATURES;
1002
1003	/* TCP offload with GRE SEQ is not supported, nor can we support 2
1004	 * levels of outer headers requiring an update.
1005	 */
1006	if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
1007		return;
1008	if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
1009	    tunnel->encap.type != TUNNEL_ENCAP_NONE)
1010		return;
1011
1012	dev->features |= NETIF_F_GSO_SOFTWARE;
1013	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1014
1015	dev->lltx = true;
 
 
 
 
1016}
1017
1018static int ipgre_tunnel_init(struct net_device *dev)
1019{
1020	struct ip_tunnel *tunnel = netdev_priv(dev);
1021	struct iphdr *iph = &tunnel->parms.iph;
1022
1023	__gre_tunnel_init(dev);
1024
1025	__dev_addr_set(dev, &iph->saddr, 4);
1026	memcpy(dev->broadcast, &iph->daddr, 4);
1027
1028	dev->flags		= IFF_NOARP;
1029	netif_keep_dst(dev);
1030	dev->addr_len		= 4;
1031
1032	if (iph->daddr && !tunnel->collect_md) {
1033#ifdef CONFIG_NET_IPGRE_BROADCAST
1034		if (ipv4_is_multicast(iph->daddr)) {
1035			if (!iph->saddr)
1036				return -EINVAL;
1037			dev->flags = IFF_BROADCAST;
1038			dev->header_ops = &ipgre_header_ops;
1039			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1040			dev->needed_headroom = 0;
1041		}
1042#endif
1043	} else if (!tunnel->collect_md) {
1044		dev->header_ops = &ipgre_header_ops;
1045		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1046		dev->needed_headroom = 0;
1047	}
1048
1049	return ip_tunnel_init(dev);
1050}
1051
1052static const struct gre_protocol ipgre_protocol = {
1053	.handler     = gre_rcv,
1054	.err_handler = gre_err,
1055};
1056
1057static int __net_init ipgre_init_net(struct net *net)
1058{
1059	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1060}
1061
1062static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
1063					     struct list_head *dev_to_kill)
1064{
1065	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
1066			      dev_to_kill);
1067}
1068
1069static struct pernet_operations ipgre_net_ops = {
1070	.init = ipgre_init_net,
1071	.exit_batch_rtnl = ipgre_exit_batch_rtnl,
1072	.id   = &ipgre_net_id,
1073	.size = sizeof(struct ip_tunnel_net),
1074};
1075
1076static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1077				 struct netlink_ext_ack *extack)
1078{
1079	__be16 flags;
1080
1081	if (!data)
1082		return 0;
1083
1084	flags = 0;
1085	if (data[IFLA_GRE_IFLAGS])
1086		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1087	if (data[IFLA_GRE_OFLAGS])
1088		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1089	if (flags & (GRE_VERSION|GRE_ROUTING))
1090		return -EINVAL;
1091
1092	if (data[IFLA_GRE_COLLECT_METADATA] &&
1093	    data[IFLA_GRE_ENCAP_TYPE] &&
1094	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1095		return -EINVAL;
1096
1097	return 0;
1098}
1099
1100static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1101			      struct netlink_ext_ack *extack)
1102{
1103	__be32 daddr;
1104
1105	if (tb[IFLA_ADDRESS]) {
1106		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1107			return -EINVAL;
1108		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1109			return -EADDRNOTAVAIL;
1110	}
1111
1112	if (!data)
1113		goto out;
1114
1115	if (data[IFLA_GRE_REMOTE]) {
1116		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1117		if (!daddr)
1118			return -EINVAL;
1119	}
1120
1121out:
1122	return ipgre_tunnel_validate(tb, data, extack);
1123}
1124
1125static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1126			   struct netlink_ext_ack *extack)
1127{
1128	__be16 flags = 0;
1129	int ret;
1130
1131	if (!data)
1132		return 0;
1133
1134	ret = ipgre_tap_validate(tb, data, extack);
1135	if (ret)
1136		return ret;
1137
1138	if (data[IFLA_GRE_ERSPAN_VER] &&
1139	    nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1140		return 0;
1141
1142	/* ERSPAN type II/III should only have GRE sequence and key flag */
1143	if (data[IFLA_GRE_OFLAGS])
1144		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1145	if (data[IFLA_GRE_IFLAGS])
1146		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1147	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1148	    flags != (GRE_SEQ | GRE_KEY))
1149		return -EINVAL;
1150
1151	/* ERSPAN Session ID only has 10-bit. Since we reuse
1152	 * 32-bit key field as ID, check it's range.
1153	 */
1154	if (data[IFLA_GRE_IKEY] &&
1155	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1156		return -EINVAL;
1157
1158	if (data[IFLA_GRE_OKEY] &&
1159	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1160		return -EINVAL;
1161
1162	return 0;
1163}
1164
1165static int ipgre_netlink_parms(struct net_device *dev,
1166				struct nlattr *data[],
1167				struct nlattr *tb[],
1168				struct ip_tunnel_parm_kern *parms,
1169				__u32 *fwmark)
1170{
1171	struct ip_tunnel *t = netdev_priv(dev);
1172
1173	memset(parms, 0, sizeof(*parms));
1174
1175	parms->iph.protocol = IPPROTO_GRE;
1176
1177	if (!data)
1178		return 0;
1179
1180	if (data[IFLA_GRE_LINK])
1181		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1182
1183	if (data[IFLA_GRE_IFLAGS])
1184		gre_flags_to_tnl_flags(parms->i_flags,
1185				       nla_get_be16(data[IFLA_GRE_IFLAGS]));
1186
1187	if (data[IFLA_GRE_OFLAGS])
1188		gre_flags_to_tnl_flags(parms->o_flags,
1189				       nla_get_be16(data[IFLA_GRE_OFLAGS]));
1190
1191	if (data[IFLA_GRE_IKEY])
1192		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1193
1194	if (data[IFLA_GRE_OKEY])
1195		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1196
1197	if (data[IFLA_GRE_LOCAL])
1198		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1199
1200	if (data[IFLA_GRE_REMOTE])
1201		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1202
1203	if (data[IFLA_GRE_TTL])
1204		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1205
1206	if (data[IFLA_GRE_TOS])
1207		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1208
1209	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1210		if (t->ignore_df)
1211			return -EINVAL;
1212		parms->iph.frag_off = htons(IP_DF);
1213	}
1214
1215	if (data[IFLA_GRE_COLLECT_METADATA]) {
1216		t->collect_md = true;
1217		if (dev->type == ARPHRD_IPGRE)
1218			dev->type = ARPHRD_NONE;
1219	}
1220
1221	if (data[IFLA_GRE_IGNORE_DF]) {
1222		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1223		  && (parms->iph.frag_off & htons(IP_DF)))
1224			return -EINVAL;
1225		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1226	}
1227
1228	if (data[IFLA_GRE_FWMARK])
1229		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1230
1231	return 0;
1232}
1233
1234static int erspan_netlink_parms(struct net_device *dev,
1235				struct nlattr *data[],
1236				struct nlattr *tb[],
1237				struct ip_tunnel_parm_kern *parms,
1238				__u32 *fwmark)
1239{
1240	struct ip_tunnel *t = netdev_priv(dev);
1241	int err;
1242
1243	err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1244	if (err)
1245		return err;
1246	if (!data)
1247		return 0;
1248
1249	if (data[IFLA_GRE_ERSPAN_VER]) {
1250		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1251
1252		if (t->erspan_ver > 2)
1253			return -EINVAL;
1254	}
1255
1256	if (t->erspan_ver == 1) {
1257		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1258			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1259			if (t->index & ~INDEX_MASK)
1260				return -EINVAL;
1261		}
1262	} else if (t->erspan_ver == 2) {
1263		if (data[IFLA_GRE_ERSPAN_DIR]) {
1264			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1265			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1266				return -EINVAL;
1267		}
1268		if (data[IFLA_GRE_ERSPAN_HWID]) {
1269			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1270			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1271				return -EINVAL;
1272		}
1273	}
1274
1275	return 0;
1276}
1277
1278/* This function returns true when ENCAP attributes are present in the nl msg */
1279static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1280				      struct ip_tunnel_encap *ipencap)
1281{
1282	bool ret = false;
1283
1284	memset(ipencap, 0, sizeof(*ipencap));
1285
1286	if (!data)
1287		return ret;
1288
1289	if (data[IFLA_GRE_ENCAP_TYPE]) {
1290		ret = true;
1291		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1292	}
1293
1294	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1295		ret = true;
1296		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1297	}
1298
1299	if (data[IFLA_GRE_ENCAP_SPORT]) {
1300		ret = true;
1301		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1302	}
1303
1304	if (data[IFLA_GRE_ENCAP_DPORT]) {
1305		ret = true;
1306		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1307	}
1308
1309	return ret;
1310}
1311
1312static int gre_tap_init(struct net_device *dev)
1313{
1314	__gre_tunnel_init(dev);
1315	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1316	netif_keep_dst(dev);
1317
1318	return ip_tunnel_init(dev);
1319}
1320
1321static const struct net_device_ops gre_tap_netdev_ops = {
1322	.ndo_init		= gre_tap_init,
1323	.ndo_uninit		= ip_tunnel_uninit,
1324	.ndo_start_xmit		= gre_tap_xmit,
1325	.ndo_set_mac_address 	= eth_mac_addr,
1326	.ndo_validate_addr	= eth_validate_addr,
1327	.ndo_change_mtu		= ip_tunnel_change_mtu,
1328	.ndo_get_stats64	= dev_get_tstats64,
1329	.ndo_get_iflink		= ip_tunnel_get_iflink,
1330	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1331};
1332
1333static int erspan_tunnel_init(struct net_device *dev)
1334{
1335	struct ip_tunnel *tunnel = netdev_priv(dev);
1336
1337	if (tunnel->erspan_ver == 0)
1338		tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1339	else
1340		tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1341
1342	tunnel->parms.iph.protocol = IPPROTO_GRE;
1343	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1344		       erspan_hdr_len(tunnel->erspan_ver);
1345
1346	dev->features		|= GRE_FEATURES;
1347	dev->hw_features	|= GRE_FEATURES;
1348	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1349	netif_keep_dst(dev);
1350
1351	return ip_tunnel_init(dev);
1352}
1353
1354static const struct net_device_ops erspan_netdev_ops = {
1355	.ndo_init		= erspan_tunnel_init,
1356	.ndo_uninit		= ip_tunnel_uninit,
1357	.ndo_start_xmit		= erspan_xmit,
1358	.ndo_set_mac_address	= eth_mac_addr,
1359	.ndo_validate_addr	= eth_validate_addr,
1360	.ndo_change_mtu		= ip_tunnel_change_mtu,
1361	.ndo_get_stats64	= dev_get_tstats64,
1362	.ndo_get_iflink		= ip_tunnel_get_iflink,
1363	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1364};
1365
1366static void ipgre_tap_setup(struct net_device *dev)
1367{
1368	ether_setup(dev);
1369	dev->max_mtu = 0;
1370	dev->netdev_ops	= &gre_tap_netdev_ops;
1371	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1372	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1373	ip_tunnel_setup(dev, gre_tap_net_id);
1374}
1375
1376static int
1377ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
 
1378{
 
1379	struct ip_tunnel_encap ipencap;
 
 
1380
1381	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1382		struct ip_tunnel *t = netdev_priv(dev);
1383		int err = ip_tunnel_encap_setup(t, &ipencap);
1384
1385		if (err < 0)
1386			return err;
1387	}
1388
1389	return 0;
1390}
1391
1392static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1393			 struct nlattr *tb[], struct nlattr *data[],
1394			 struct netlink_ext_ack *extack)
1395{
1396	struct ip_tunnel_parm_kern p;
1397	__u32 fwmark = 0;
1398	int err;
1399
1400	err = ipgre_newlink_encap_setup(dev, data);
1401	if (err)
1402		return err;
1403
1404	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1405	if (err < 0)
1406		return err;
1407	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1408}
1409
1410static int erspan_newlink(struct net *src_net, struct net_device *dev,
1411			  struct nlattr *tb[], struct nlattr *data[],
1412			  struct netlink_ext_ack *extack)
1413{
1414	struct ip_tunnel_parm_kern p;
1415	__u32 fwmark = 0;
1416	int err;
1417
1418	err = ipgre_newlink_encap_setup(dev, data);
1419	if (err)
1420		return err;
1421
1422	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1423	if (err)
1424		return err;
1425	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1426}
1427
1428static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1429			    struct nlattr *data[],
1430			    struct netlink_ext_ack *extack)
1431{
1432	struct ip_tunnel *t = netdev_priv(dev);
1433	struct ip_tunnel_parm_kern p;
1434	__u32 fwmark = t->fwmark;
 
1435	int err;
1436
1437	err = ipgre_newlink_encap_setup(dev, data);
1438	if (err)
1439		return err;
 
 
 
1440
1441	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1442	if (err < 0)
1443		return err;
1444
1445	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1446	if (err < 0)
1447		return err;
1448
1449	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1450	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1451
1452	ipgre_link_update(dev, !tb[IFLA_MTU]);
1453
1454	return 0;
1455}
1456
1457static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1458			     struct nlattr *data[],
1459			     struct netlink_ext_ack *extack)
1460{
1461	struct ip_tunnel *t = netdev_priv(dev);
1462	struct ip_tunnel_parm_kern p;
1463	__u32 fwmark = t->fwmark;
1464	int err;
1465
1466	err = ipgre_newlink_encap_setup(dev, data);
1467	if (err)
1468		return err;
1469
1470	err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1471	if (err < 0)
1472		return err;
1473
1474	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1475	if (err < 0)
1476		return err;
1477
1478	ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1479	ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1480
1481	return 0;
1482}
1483
1484static size_t ipgre_get_size(const struct net_device *dev)
1485{
1486	return
1487		/* IFLA_GRE_LINK */
1488		nla_total_size(4) +
1489		/* IFLA_GRE_IFLAGS */
1490		nla_total_size(2) +
1491		/* IFLA_GRE_OFLAGS */
1492		nla_total_size(2) +
1493		/* IFLA_GRE_IKEY */
1494		nla_total_size(4) +
1495		/* IFLA_GRE_OKEY */
1496		nla_total_size(4) +
1497		/* IFLA_GRE_LOCAL */
1498		nla_total_size(4) +
1499		/* IFLA_GRE_REMOTE */
1500		nla_total_size(4) +
1501		/* IFLA_GRE_TTL */
1502		nla_total_size(1) +
1503		/* IFLA_GRE_TOS */
1504		nla_total_size(1) +
1505		/* IFLA_GRE_PMTUDISC */
1506		nla_total_size(1) +
1507		/* IFLA_GRE_ENCAP_TYPE */
1508		nla_total_size(2) +
1509		/* IFLA_GRE_ENCAP_FLAGS */
1510		nla_total_size(2) +
1511		/* IFLA_GRE_ENCAP_SPORT */
1512		nla_total_size(2) +
1513		/* IFLA_GRE_ENCAP_DPORT */
1514		nla_total_size(2) +
1515		/* IFLA_GRE_COLLECT_METADATA */
1516		nla_total_size(0) +
1517		/* IFLA_GRE_IGNORE_DF */
1518		nla_total_size(1) +
1519		/* IFLA_GRE_FWMARK */
1520		nla_total_size(4) +
1521		/* IFLA_GRE_ERSPAN_INDEX */
1522		nla_total_size(4) +
1523		/* IFLA_GRE_ERSPAN_VER */
1524		nla_total_size(1) +
1525		/* IFLA_GRE_ERSPAN_DIR */
1526		nla_total_size(1) +
1527		/* IFLA_GRE_ERSPAN_HWID */
1528		nla_total_size(2) +
1529		0;
1530}
1531
1532static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1533{
1534	struct ip_tunnel *t = netdev_priv(dev);
1535	struct ip_tunnel_parm_kern *p = &t->parms;
1536	IP_TUNNEL_DECLARE_FLAGS(o_flags);
 
 
 
 
 
 
 
1537
1538	ip_tunnel_flags_copy(o_flags, p->o_flags);
 
 
 
 
 
 
 
 
 
1539
1540	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1541	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1542			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1543	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1544			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1545	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1546	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1547	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1548	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1549	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1550	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1551	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1552		       !!(p->iph.frag_off & htons(IP_DF))) ||
1553	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1554		goto nla_put_failure;
1555
1556	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1557			t->encap.type) ||
1558	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1559			 t->encap.sport) ||
1560	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1561			 t->encap.dport) ||
1562	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1563			t->encap.flags))
1564		goto nla_put_failure;
1565
1566	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1567		goto nla_put_failure;
1568
1569	if (t->collect_md) {
1570		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1571			goto nla_put_failure;
1572	}
1573
1574	return 0;
1575
1576nla_put_failure:
1577	return -EMSGSIZE;
1578}
1579
1580static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1581{
1582	struct ip_tunnel *t = netdev_priv(dev);
1583
1584	if (t->erspan_ver <= 2) {
1585		if (t->erspan_ver != 0 && !t->collect_md)
1586			__set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
1587
1588		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1589			goto nla_put_failure;
1590
1591		if (t->erspan_ver == 1) {
1592			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1593				goto nla_put_failure;
1594		} else if (t->erspan_ver == 2) {
1595			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1596				goto nla_put_failure;
1597			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1598				goto nla_put_failure;
1599		}
1600	}
1601
1602	return ipgre_fill_info(skb, dev);
1603
1604nla_put_failure:
1605	return -EMSGSIZE;
1606}
1607
1608static void erspan_setup(struct net_device *dev)
1609{
1610	struct ip_tunnel *t = netdev_priv(dev);
1611
1612	ether_setup(dev);
1613	dev->max_mtu = 0;
1614	dev->netdev_ops = &erspan_netdev_ops;
1615	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1616	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1617	ip_tunnel_setup(dev, erspan_net_id);
1618	t->erspan_ver = 1;
1619}
1620
1621static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1622	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1623	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1624	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1625	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1626	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1627	[IFLA_GRE_LOCAL]	= { .len = sizeof_field(struct iphdr, saddr) },
1628	[IFLA_GRE_REMOTE]	= { .len = sizeof_field(struct iphdr, daddr) },
1629	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1630	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1631	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1632	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1633	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1634	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1635	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1636	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1637	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1638	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1639	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1640	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1641	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1642	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1643};
1644
1645static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1646	.kind		= "gre",
1647	.maxtype	= IFLA_GRE_MAX,
1648	.policy		= ipgre_policy,
1649	.priv_size	= sizeof(struct ip_tunnel),
1650	.setup		= ipgre_tunnel_setup,
1651	.validate	= ipgre_tunnel_validate,
1652	.newlink	= ipgre_newlink,
1653	.changelink	= ipgre_changelink,
1654	.dellink	= ip_tunnel_dellink,
1655	.get_size	= ipgre_get_size,
1656	.fill_info	= ipgre_fill_info,
1657	.get_link_net	= ip_tunnel_get_link_net,
1658};
1659
1660static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1661	.kind		= "gretap",
1662	.maxtype	= IFLA_GRE_MAX,
1663	.policy		= ipgre_policy,
1664	.priv_size	= sizeof(struct ip_tunnel),
1665	.setup		= ipgre_tap_setup,
1666	.validate	= ipgre_tap_validate,
1667	.newlink	= ipgre_newlink,
1668	.changelink	= ipgre_changelink,
1669	.dellink	= ip_tunnel_dellink,
1670	.get_size	= ipgre_get_size,
1671	.fill_info	= ipgre_fill_info,
1672	.get_link_net	= ip_tunnel_get_link_net,
1673};
1674
1675static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1676	.kind		= "erspan",
1677	.maxtype	= IFLA_GRE_MAX,
1678	.policy		= ipgre_policy,
1679	.priv_size	= sizeof(struct ip_tunnel),
1680	.setup		= erspan_setup,
1681	.validate	= erspan_validate,
1682	.newlink	= erspan_newlink,
1683	.changelink	= erspan_changelink,
1684	.dellink	= ip_tunnel_dellink,
1685	.get_size	= ipgre_get_size,
1686	.fill_info	= erspan_fill_info,
1687	.get_link_net	= ip_tunnel_get_link_net,
1688};
1689
1690struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1691					u8 name_assign_type)
1692{
1693	struct nlattr *tb[IFLA_MAX + 1];
1694	struct net_device *dev;
1695	LIST_HEAD(list_kill);
1696	struct ip_tunnel *t;
1697	int err;
1698
1699	memset(&tb, 0, sizeof(tb));
1700
1701	dev = rtnl_create_link(net, name, name_assign_type,
1702			       &ipgre_tap_ops, tb, NULL);
1703	if (IS_ERR(dev))
1704		return dev;
1705
1706	/* Configure flow based GRE device. */
1707	t = netdev_priv(dev);
1708	t->collect_md = true;
1709
1710	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1711	if (err < 0) {
1712		free_netdev(dev);
1713		return ERR_PTR(err);
1714	}
1715
1716	/* openvswitch users expect packet sizes to be unrestricted,
1717	 * so set the largest MTU we can.
1718	 */
1719	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1720	if (err)
1721		goto out;
1722
1723	err = rtnl_configure_link(dev, NULL, 0, NULL);
1724	if (err < 0)
1725		goto out;
1726
1727	return dev;
1728out:
1729	ip_tunnel_dellink(dev, &list_kill);
1730	unregister_netdevice_many(&list_kill);
1731	return ERR_PTR(err);
1732}
1733EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1734
1735static int __net_init ipgre_tap_init_net(struct net *net)
1736{
1737	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1738}
1739
1740static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
1741						 struct list_head *dev_to_kill)
1742{
1743	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
1744			      dev_to_kill);
1745}
1746
1747static struct pernet_operations ipgre_tap_net_ops = {
1748	.init = ipgre_tap_init_net,
1749	.exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
1750	.id   = &gre_tap_net_id,
1751	.size = sizeof(struct ip_tunnel_net),
1752};
1753
1754static int __net_init erspan_init_net(struct net *net)
1755{
1756	return ip_tunnel_init_net(net, erspan_net_id,
1757				  &erspan_link_ops, "erspan0");
1758}
1759
1760static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
1761					      struct list_head *dev_to_kill)
1762{
1763	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
1764			      dev_to_kill);
1765}
1766
1767static struct pernet_operations erspan_net_ops = {
1768	.init = erspan_init_net,
1769	.exit_batch_rtnl = erspan_exit_batch_rtnl,
1770	.id   = &erspan_net_id,
1771	.size = sizeof(struct ip_tunnel_net),
1772};
1773
1774static int __init ipgre_init(void)
1775{
1776	int err;
1777
1778	pr_info("GRE over IPv4 tunneling driver\n");
1779
1780	err = register_pernet_device(&ipgre_net_ops);
1781	if (err < 0)
1782		return err;
1783
1784	err = register_pernet_device(&ipgre_tap_net_ops);
1785	if (err < 0)
1786		goto pnet_tap_failed;
1787
1788	err = register_pernet_device(&erspan_net_ops);
1789	if (err < 0)
1790		goto pnet_erspan_failed;
1791
1792	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1793	if (err < 0) {
1794		pr_info("%s: can't add protocol\n", __func__);
1795		goto add_proto_failed;
1796	}
1797
1798	err = rtnl_link_register(&ipgre_link_ops);
1799	if (err < 0)
1800		goto rtnl_link_failed;
1801
1802	err = rtnl_link_register(&ipgre_tap_ops);
1803	if (err < 0)
1804		goto tap_ops_failed;
1805
1806	err = rtnl_link_register(&erspan_link_ops);
1807	if (err < 0)
1808		goto erspan_link_failed;
1809
1810	return 0;
1811
1812erspan_link_failed:
1813	rtnl_link_unregister(&ipgre_tap_ops);
1814tap_ops_failed:
1815	rtnl_link_unregister(&ipgre_link_ops);
1816rtnl_link_failed:
1817	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1818add_proto_failed:
1819	unregister_pernet_device(&erspan_net_ops);
1820pnet_erspan_failed:
1821	unregister_pernet_device(&ipgre_tap_net_ops);
1822pnet_tap_failed:
1823	unregister_pernet_device(&ipgre_net_ops);
1824	return err;
1825}
1826
1827static void __exit ipgre_fini(void)
1828{
1829	rtnl_link_unregister(&ipgre_tap_ops);
1830	rtnl_link_unregister(&ipgre_link_ops);
1831	rtnl_link_unregister(&erspan_link_ops);
1832	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1833	unregister_pernet_device(&ipgre_tap_net_ops);
1834	unregister_pernet_device(&ipgre_net_ops);
1835	unregister_pernet_device(&erspan_net_ops);
1836}
1837
1838module_init(ipgre_init);
1839module_exit(ipgre_fini);
1840MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1841MODULE_LICENSE("GPL");
1842MODULE_ALIAS_RTNL_LINK("gre");
1843MODULE_ALIAS_RTNL_LINK("gretap");
1844MODULE_ALIAS_RTNL_LINK("erspan");
1845MODULE_ALIAS_NETDEV("gre0");
1846MODULE_ALIAS_NETDEV("gretap0");
1847MODULE_ALIAS_NETDEV("erspan0");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	Linux NET3:	GRE over IP protocol decoder.
   4 *
   5 *	Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/capability.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/uaccess.h>
  16#include <linux/skbuff.h>
  17#include <linux/netdevice.h>
  18#include <linux/in.h>
  19#include <linux/tcp.h>
  20#include <linux/udp.h>
  21#include <linux/if_arp.h>
  22#include <linux/if_vlan.h>
  23#include <linux/init.h>
  24#include <linux/in6.h>
  25#include <linux/inetdevice.h>
  26#include <linux/igmp.h>
  27#include <linux/netfilter_ipv4.h>
  28#include <linux/etherdevice.h>
  29#include <linux/if_ether.h>
  30
  31#include <net/sock.h>
  32#include <net/ip.h>
  33#include <net/icmp.h>
  34#include <net/protocol.h>
  35#include <net/ip_tunnels.h>
  36#include <net/arp.h>
  37#include <net/checksum.h>
  38#include <net/dsfield.h>
  39#include <net/inet_ecn.h>
  40#include <net/xfrm.h>
  41#include <net/net_namespace.h>
  42#include <net/netns/generic.h>
  43#include <net/rtnetlink.h>
  44#include <net/gre.h>
  45#include <net/dst_metadata.h>
  46#include <net/erspan.h>
 
  47
  48/*
  49   Problems & solutions
  50   --------------------
  51
  52   1. The most important issue is detecting local dead loops.
  53   They would cause complete host lockup in transmit, which
  54   would be "resolved" by stack overflow or, if queueing is enabled,
  55   with infinite looping in net_bh.
  56
  57   We cannot track such dead loops during route installation,
  58   it is infeasible task. The most general solutions would be
  59   to keep skb->encapsulation counter (sort of local ttl),
  60   and silently drop packet when it expires. It is a good
  61   solution, but it supposes maintaining new variable in ALL
  62   skb, even if no tunneling is used.
  63
  64   Current solution: xmit_recursion breaks dead loops. This is a percpu
  65   counter, since when we enter the first ndo_xmit(), cpu migration is
  66   forbidden. We force an exit if this counter reaches RECURSION_LIMIT
  67
  68   2. Networking dead loops would not kill routers, but would really
  69   kill network. IP hop limit plays role of "t->recursion" in this case,
  70   if we copy it from packet being encapsulated to upper header.
  71   It is very good solution, but it introduces two problems:
  72
  73   - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
  74     do not work over tunnels.
  75   - traceroute does not work. I planned to relay ICMP from tunnel,
  76     so that this problem would be solved and traceroute output
  77     would even more informative. This idea appeared to be wrong:
  78     only Linux complies to rfc1812 now (yes, guys, Linux is the only
  79     true router now :-)), all routers (at least, in neighbourhood of mine)
  80     return only 8 bytes of payload. It is the end.
  81
  82   Hence, if we want that OSPF worked or traceroute said something reasonable,
  83   we should search for another solution.
  84
  85   One of them is to parse packet trying to detect inner encapsulation
  86   made by our node. It is difficult or even impossible, especially,
  87   taking into account fragmentation. TO be short, ttl is not solution at all.
  88
  89   Current solution: The solution was UNEXPECTEDLY SIMPLE.
  90   We force DF flag on tunnels with preconfigured hop limit,
  91   that is ALL. :-) Well, it does not remove the problem completely,
  92   but exponential growth of network traffic is changed to linear
  93   (branches, that exceed pmtu are pruned) and tunnel mtu
  94   rapidly degrades to value <68, where looping stops.
  95   Yes, it is not good if there exists a router in the loop,
  96   which does not force DF, even when encapsulating packets have DF set.
  97   But it is not our problem! Nobody could accuse us, we made
  98   all that we could make. Even if it is your gated who injected
  99   fatal route to network, even if it were you who configured
 100   fatal static route: you are innocent. :-)
 101
 102   Alexey Kuznetsov.
 103 */
 104
 105static bool log_ecn_error = true;
 106module_param(log_ecn_error, bool, 0644);
 107MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 108
 109static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 
 
 110static int ipgre_tunnel_init(struct net_device *dev);
 111static void erspan_build_header(struct sk_buff *skb,
 112				u32 id, u32 index,
 113				bool truncate, bool is_ipv4);
 114
 115static unsigned int ipgre_net_id __read_mostly;
 116static unsigned int gre_tap_net_id __read_mostly;
 117static unsigned int erspan_net_id __read_mostly;
 118
 119static int ipgre_err(struct sk_buff *skb, u32 info,
 120		     const struct tnl_ptk_info *tpi)
 121{
 122
 123	/* All the routers (except for Linux) return only
 124	   8 bytes of packet payload. It means, that precise relaying of
 125	   ICMP in the real Internet is absolutely infeasible.
 126
 127	   Moreover, Cisco "wise men" put GRE key to the third word
 128	   in GRE header. It makes impossible maintaining even soft
 129	   state for keyed GRE tunnels with enabled checksum. Tell
 130	   them "thank you".
 131
 132	   Well, I wonder, rfc1812 was written by Cisco employee,
 133	   what the hell these idiots break standards established
 134	   by themselves???
 135	   */
 136	struct net *net = dev_net(skb->dev);
 137	struct ip_tunnel_net *itn;
 138	const struct iphdr *iph;
 139	const int type = icmp_hdr(skb)->type;
 140	const int code = icmp_hdr(skb)->code;
 141	unsigned int data_len = 0;
 142	struct ip_tunnel *t;
 143
 144	if (tpi->proto == htons(ETH_P_TEB))
 145		itn = net_generic(net, gre_tap_net_id);
 146	else if (tpi->proto == htons(ETH_P_ERSPAN) ||
 147		 tpi->proto == htons(ETH_P_ERSPAN2))
 148		itn = net_generic(net, erspan_net_id);
 149	else
 150		itn = net_generic(net, ipgre_net_id);
 151
 152	iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
 153	t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 154			     iph->daddr, iph->saddr, tpi->key);
 155
 156	if (!t)
 157		return -ENOENT;
 158
 159	switch (type) {
 160	default:
 161	case ICMP_PARAMETERPROB:
 162		return 0;
 163
 164	case ICMP_DEST_UNREACH:
 165		switch (code) {
 166		case ICMP_SR_FAILED:
 167		case ICMP_PORT_UNREACH:
 168			/* Impossible event. */
 169			return 0;
 170		default:
 171			/* All others are translated to HOST_UNREACH.
 172			   rfc2003 contains "deep thoughts" about NET_UNREACH,
 173			   I believe they are just ether pollution. --ANK
 174			 */
 175			break;
 176		}
 177		break;
 178
 179	case ICMP_TIME_EXCEEDED:
 180		if (code != ICMP_EXC_TTL)
 181			return 0;
 182		data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
 183		break;
 184
 185	case ICMP_REDIRECT:
 186		break;
 187	}
 188
 189#if IS_ENABLED(CONFIG_IPV6)
 190       if (tpi->proto == htons(ETH_P_IPV6) &&
 191           !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
 192				       type, data_len))
 193               return 0;
 194#endif
 195
 196	if (t->parms.iph.daddr == 0 ||
 197	    ipv4_is_multicast(t->parms.iph.daddr))
 198		return 0;
 199
 200	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
 201		return 0;
 202
 203	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 204		t->err_count++;
 205	else
 206		t->err_count = 1;
 207	t->err_time = jiffies;
 208
 209	return 0;
 210}
 211
 212static void gre_err(struct sk_buff *skb, u32 info)
 213{
 214	/* All the routers (except for Linux) return only
 215	 * 8 bytes of packet payload. It means, that precise relaying of
 216	 * ICMP in the real Internet is absolutely infeasible.
 217	 *
 218	 * Moreover, Cisco "wise men" put GRE key to the third word
 219	 * in GRE header. It makes impossible maintaining even soft
 220	 * state for keyed
 221	 * GRE tunnels with enabled checksum. Tell them "thank you".
 222	 *
 223	 * Well, I wonder, rfc1812 was written by Cisco employee,
 224	 * what the hell these idiots break standards established
 225	 * by themselves???
 226	 */
 227
 228	const struct iphdr *iph = (struct iphdr *)skb->data;
 229	const int type = icmp_hdr(skb)->type;
 230	const int code = icmp_hdr(skb)->code;
 231	struct tnl_ptk_info tpi;
 232
 233	if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
 234			     iph->ihl * 4) < 0)
 235		return;
 236
 237	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 238		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
 239				 skb->dev->ifindex, IPPROTO_GRE);
 240		return;
 241	}
 242	if (type == ICMP_REDIRECT) {
 243		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
 244			      IPPROTO_GRE);
 245		return;
 246	}
 247
 248	ipgre_err(skb, info, &tpi);
 249}
 250
 
 
 
 
 
 
 
 
 
 251static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 252		      int gre_hdr_len)
 253{
 254	struct net *net = dev_net(skb->dev);
 255	struct metadata_dst *tun_dst = NULL;
 256	struct erspan_base_hdr *ershdr;
 
 257	struct ip_tunnel_net *itn;
 258	struct ip_tunnel *tunnel;
 259	const struct iphdr *iph;
 260	struct erspan_md2 *md2;
 261	int ver;
 262	int len;
 263
 
 
 264	itn = net_generic(net, erspan_net_id);
 265
 266	iph = ip_hdr(skb);
 267	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 268	ver = ershdr->ver;
 
 
 
 
 
 
 
 269
 270	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 271				  tpi->flags | TUNNEL_KEY,
 272				  iph->saddr, iph->daddr, tpi->key);
 
 
 
 
 273
 274	if (tunnel) {
 275		len = gre_hdr_len + erspan_hdr_len(ver);
 
 
 
 
 276		if (unlikely(!pskb_may_pull(skb, len)))
 277			return PACKET_REJECT;
 278
 279		if (__iptunnel_pull_header(skb,
 280					   len,
 281					   htons(ETH_P_TEB),
 282					   false, false) < 0)
 283			goto drop;
 284
 285		if (tunnel->collect_md) {
 286			struct erspan_metadata *pkt_md, *md;
 287			struct ip_tunnel_info *info;
 288			unsigned char *gh;
 289			__be64 tun_id;
 290			__be16 flags;
 291
 292			tpi->flags |= TUNNEL_KEY;
 293			flags = tpi->flags;
 294			tun_id = key32_to_tunnel_id(tpi->key);
 295
 296			tun_dst = ip_tun_rx_dst(skb, flags,
 297						tun_id, sizeof(*md));
 298			if (!tun_dst)
 299				return PACKET_REJECT;
 300
 301			/* skb can be uncloned in __iptunnel_pull_header, so
 302			 * old pkt_md is no longer valid and we need to reset
 303			 * it
 304			 */
 305			gh = skb_network_header(skb) +
 306			     skb_network_header_len(skb);
 307			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
 308							    sizeof(*ershdr));
 309			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
 310			md->version = ver;
 311			md2 = &md->u.md2;
 312			memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
 313						       ERSPAN_V2_MDSIZE);
 314
 315			info = &tun_dst->u.tun_info;
 316			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
 
 317			info->options_len = sizeof(*md);
 318		}
 319
 320		skb_reset_mac_header(skb);
 321		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 322		return PACKET_RCVD;
 323	}
 324	return PACKET_REJECT;
 325
 326drop:
 327	kfree_skb(skb);
 328	return PACKET_RCVD;
 329}
 330
 331static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 332		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 333{
 334	struct metadata_dst *tun_dst = NULL;
 335	const struct iphdr *iph;
 336	struct ip_tunnel *tunnel;
 337
 338	iph = ip_hdr(skb);
 339	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 340				  iph->saddr, iph->daddr, tpi->key);
 341
 342	if (tunnel) {
 
 
 343		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
 344					   raw_proto, false) < 0)
 345			goto drop;
 346
 347		if (tunnel->dev->type != ARPHRD_NONE)
 
 
 
 348			skb_pop_mac_header(skb);
 349		else
 350			skb_reset_mac_header(skb);
 351		if (tunnel->collect_md) {
 352			__be16 flags;
 
 
 353			__be64 tun_id;
 354
 355			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
 
 
 
 356			tun_id = key32_to_tunnel_id(tpi->key);
 357			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
 358			if (!tun_dst)
 359				return PACKET_REJECT;
 360		}
 361
 362		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 363		return PACKET_RCVD;
 364	}
 365	return PACKET_NEXT;
 366
 367drop:
 368	kfree_skb(skb);
 369	return PACKET_RCVD;
 370}
 371
 372static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 373		     int hdr_len)
 374{
 375	struct net *net = dev_net(skb->dev);
 376	struct ip_tunnel_net *itn;
 377	int res;
 378
 379	if (tpi->proto == htons(ETH_P_TEB))
 380		itn = net_generic(net, gre_tap_net_id);
 381	else
 382		itn = net_generic(net, ipgre_net_id);
 383
 384	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
 385	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
 386		/* ipgre tunnels in collect metadata mode should receive
 387		 * also ETH_P_TEB traffic.
 388		 */
 389		itn = net_generic(net, ipgre_net_id);
 390		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
 391	}
 392	return res;
 393}
 394
 395static int gre_rcv(struct sk_buff *skb)
 396{
 397	struct tnl_ptk_info tpi;
 398	bool csum_err = false;
 399	int hdr_len;
 400
 401#ifdef CONFIG_NET_IPGRE_BROADCAST
 402	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
 403		/* Looped back packet, drop it! */
 404		if (rt_is_output_route(skb_rtable(skb)))
 405			goto drop;
 406	}
 407#endif
 408
 409	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 410	if (hdr_len < 0)
 411		goto drop;
 412
 413	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
 414		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 415		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 416			return 0;
 417		goto out;
 418	}
 419
 420	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 421		return 0;
 422
 423out:
 424	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 425drop:
 426	kfree_skb(skb);
 427	return 0;
 428}
 429
 430static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 431		       const struct iphdr *tnl_params,
 432		       __be16 proto)
 433{
 434	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 435
 436	if (tunnel->parms.o_flags & TUNNEL_SEQ)
 437		tunnel->o_seqno++;
 438
 439	/* Push GRE header. */
 440	gre_build_header(skb, tunnel->tun_hlen,
 441			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
 442			 htonl(tunnel->o_seqno));
 
 443
 444	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 445}
 446
 447static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 448{
 449	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 450}
 451
 452static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 453			__be16 proto)
 454{
 455	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 456	struct ip_tunnel_info *tun_info;
 457	const struct ip_tunnel_key *key;
 458	int tunnel_hlen;
 459	__be16 flags;
 460
 461	tun_info = skb_tunnel_info(skb);
 462	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 463		     ip_tunnel_info_af(tun_info) != AF_INET))
 464		goto err_free_skb;
 465
 466	key = &tun_info->key;
 467	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 468
 469	if (skb_cow_head(skb, dev->needed_headroom))
 470		goto err_free_skb;
 471
 472	/* Push Tunnel header. */
 473	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
 
 474		goto err_free_skb;
 475
 476	flags = tun_info->key.tun_flags &
 477		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
 
 
 
 478	gre_build_header(skb, tunnel_hlen, flags, proto,
 479			 tunnel_id_to_key32(tun_info->key.tun_id),
 480			 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
 
 481
 482	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 483
 484	return;
 485
 486err_free_skb:
 487	kfree_skb(skb);
 488	dev->stats.tx_dropped++;
 489}
 490
 491static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 492{
 493	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 494	struct ip_tunnel_info *tun_info;
 495	const struct ip_tunnel_key *key;
 496	struct erspan_metadata *md;
 497	bool truncate = false;
 498	__be16 proto;
 499	int tunnel_hlen;
 500	int version;
 501	int nhoff;
 502	int thoff;
 503
 504	tun_info = skb_tunnel_info(skb);
 505	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
 506		     ip_tunnel_info_af(tun_info) != AF_INET))
 507		goto err_free_skb;
 508
 509	key = &tun_info->key;
 510	if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
 511		goto err_free_skb;
 512	if (tun_info->options_len < sizeof(*md))
 513		goto err_free_skb;
 514	md = ip_tunnel_info_opts(tun_info);
 515
 516	/* ERSPAN has fixed 8 byte GRE header */
 517	version = md->version;
 518	tunnel_hlen = 8 + erspan_hdr_len(version);
 519
 520	if (skb_cow_head(skb, dev->needed_headroom))
 521		goto err_free_skb;
 522
 523	if (gre_handle_offloads(skb, false))
 524		goto err_free_skb;
 525
 526	if (skb->len > dev->mtu + dev->hard_header_len) {
 527		pskb_trim(skb, dev->mtu + dev->hard_header_len);
 
 528		truncate = true;
 529	}
 530
 531	nhoff = skb_network_header(skb) - skb_mac_header(skb);
 532	if (skb->protocol == htons(ETH_P_IP) &&
 533	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
 534		truncate = true;
 535
 536	thoff = skb_transport_header(skb) - skb_mac_header(skb);
 537	if (skb->protocol == htons(ETH_P_IPV6) &&
 538	    (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
 539		truncate = true;
 
 
 
 
 
 
 540
 541	if (version == 1) {
 542		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 543				    ntohl(md->u.index), truncate, true);
 544		proto = htons(ETH_P_ERSPAN);
 545	} else if (version == 2) {
 546		erspan_build_header_v2(skb,
 547				       ntohl(tunnel_id_to_key32(key->tun_id)),
 548				       md->u.md2.dir,
 549				       get_hwid(&md->u.md2),
 550				       truncate, true);
 551		proto = htons(ETH_P_ERSPAN2);
 552	} else {
 553		goto err_free_skb;
 554	}
 555
 556	gre_build_header(skb, 8, TUNNEL_SEQ,
 557			 proto, 0, htonl(tunnel->o_seqno++));
 
 558
 559	ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
 560
 561	return;
 562
 563err_free_skb:
 564	kfree_skb(skb);
 565	dev->stats.tx_dropped++;
 566}
 567
 568static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 569{
 570	struct ip_tunnel_info *info = skb_tunnel_info(skb);
 571	const struct ip_tunnel_key *key;
 572	struct rtable *rt;
 573	struct flowi4 fl4;
 574
 575	if (ip_tunnel_info_af(info) != AF_INET)
 576		return -EINVAL;
 577
 578	key = &info->key;
 579	ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
 580			    tunnel_id_to_key32(key->tun_id), key->tos, 0,
 581			    skb->mark, skb_get_hash(skb));
 
 582	rt = ip_route_output_key(dev_net(dev), &fl4);
 583	if (IS_ERR(rt))
 584		return PTR_ERR(rt);
 585
 586	ip_rt_put(rt);
 587	info->key.u.ipv4.src = fl4.saddr;
 588	return 0;
 589}
 590
 591static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 592			      struct net_device *dev)
 593{
 594	struct ip_tunnel *tunnel = netdev_priv(dev);
 595	const struct iphdr *tnl_params;
 596
 597	if (!pskb_inet_may_pull(skb))
 598		goto free_skb;
 599
 600	if (tunnel->collect_md) {
 601		gre_fb_xmit(skb, dev, skb->protocol);
 602		return NETDEV_TX_OK;
 603	}
 604
 605	if (dev->header_ops) {
 606		/* Need space for new headers */
 607		if (skb_cow_head(skb, dev->needed_headroom -
 608				      (tunnel->hlen + sizeof(struct iphdr))))
 
 
 
 609			goto free_skb;
 610
 611		tnl_params = (const struct iphdr *)skb->data;
 612
 613		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
 614		 * to gre header.
 615		 */
 616		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
 617		skb_reset_mac_header(skb);
 
 
 
 
 618	} else {
 619		if (skb_cow_head(skb, dev->needed_headroom))
 620			goto free_skb;
 621
 622		tnl_params = &tunnel->parms.iph;
 623	}
 624
 625	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 
 626		goto free_skb;
 627
 628	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 629	return NETDEV_TX_OK;
 630
 631free_skb:
 632	kfree_skb(skb);
 633	dev->stats.tx_dropped++;
 634	return NETDEV_TX_OK;
 635}
 636
 637static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 638			       struct net_device *dev)
 639{
 640	struct ip_tunnel *tunnel = netdev_priv(dev);
 641	bool truncate = false;
 642	__be16 proto;
 643
 644	if (!pskb_inet_may_pull(skb))
 645		goto free_skb;
 646
 647	if (tunnel->collect_md) {
 648		erspan_fb_xmit(skb, dev);
 649		return NETDEV_TX_OK;
 650	}
 651
 652	if (gre_handle_offloads(skb, false))
 653		goto free_skb;
 654
 655	if (skb_cow_head(skb, dev->needed_headroom))
 656		goto free_skb;
 657
 658	if (skb->len > dev->mtu + dev->hard_header_len) {
 659		pskb_trim(skb, dev->mtu + dev->hard_header_len);
 
 660		truncate = true;
 661	}
 662
 663	/* Push ERSPAN header */
 664	if (tunnel->erspan_ver == 1) {
 
 
 
 665		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 666				    tunnel->index,
 667				    truncate, true);
 668		proto = htons(ETH_P_ERSPAN);
 669	} else if (tunnel->erspan_ver == 2) {
 670		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 671				       tunnel->dir, tunnel->hwid,
 672				       truncate, true);
 673		proto = htons(ETH_P_ERSPAN2);
 674	} else {
 675		goto free_skb;
 676	}
 677
 678	tunnel->parms.o_flags &= ~TUNNEL_KEY;
 679	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 680	return NETDEV_TX_OK;
 681
 682free_skb:
 683	kfree_skb(skb);
 684	dev->stats.tx_dropped++;
 685	return NETDEV_TX_OK;
 686}
 687
 688static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 689				struct net_device *dev)
 690{
 691	struct ip_tunnel *tunnel = netdev_priv(dev);
 692
 693	if (!pskb_inet_may_pull(skb))
 694		goto free_skb;
 695
 696	if (tunnel->collect_md) {
 697		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 698		return NETDEV_TX_OK;
 699	}
 700
 701	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
 
 702		goto free_skb;
 703
 704	if (skb_cow_head(skb, dev->needed_headroom))
 705		goto free_skb;
 706
 707	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
 708	return NETDEV_TX_OK;
 709
 710free_skb:
 711	kfree_skb(skb);
 712	dev->stats.tx_dropped++;
 713	return NETDEV_TX_OK;
 714}
 715
 716static void ipgre_link_update(struct net_device *dev, bool set_mtu)
 717{
 718	struct ip_tunnel *tunnel = netdev_priv(dev);
 719	int len;
 720
 721	len = tunnel->tun_hlen;
 722	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 723	len = tunnel->tun_hlen - len;
 724	tunnel->hlen = tunnel->hlen + len;
 725
 726	dev->needed_headroom = dev->needed_headroom + len;
 
 
 
 
 727	if (set_mtu)
 728		dev->mtu = max_t(int, dev->mtu - len, 68);
 729
 730	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
 731		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
 732		    tunnel->encap.type == TUNNEL_ENCAP_NONE) {
 733			dev->features |= NETIF_F_GSO_SOFTWARE;
 734			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 735		} else {
 736			dev->features &= ~NETIF_F_GSO_SOFTWARE;
 737			dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 738		}
 739		dev->features |= NETIF_F_LLTX;
 740	} else {
 741		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
 742		dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
 743	}
 744}
 745
 746static int ipgre_tunnel_ioctl(struct net_device *dev,
 747			      struct ifreq *ifr, int cmd)
 
 748{
 749	struct ip_tunnel_parm p;
 750	int err;
 751
 752	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 753		return -EFAULT;
 
 
 
 
 754
 755	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
 756		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
 757		    p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
 758		    ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
 759			return -EINVAL;
 760	}
 761
 762	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
 763	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
 764
 765	err = ip_tunnel_ioctl(dev, &p, cmd);
 766	if (err)
 767		return err;
 768
 769	if (cmd == SIOCCHGTUNNEL) {
 770		struct ip_tunnel *t = netdev_priv(dev);
 771
 772		t->parms.i_flags = p.i_flags;
 773		t->parms.o_flags = p.o_flags;
 774
 775		if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
 776			ipgre_link_update(dev, true);
 777	}
 778
 779	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
 780	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
 781
 782	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
 783		return -EFAULT;
 784
 785	return 0;
 786}
 787
 788/* Nice toy. Unfortunately, useless in real life :-)
 789   It allows to construct virtual multiprotocol broadcast "LAN"
 790   over the Internet, provided multicast routing is tuned.
 791
 792
 793   I have no idea was this bicycle invented before me,
 794   so that I had to set ARPHRD_IPGRE to a random value.
 795   I have an impression, that Cisco could make something similar,
 796   but this feature is apparently missing in IOS<=11.2(8).
 797
 798   I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
 799   with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
 800
 801   ping -t 255 224.66.66.66
 802
 803   If nobody answers, mbone does not work.
 804
 805   ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
 806   ip addr add 10.66.66.<somewhat>/24 dev Universe
 807   ifconfig Universe up
 808   ifconfig Universe add fe80::<Your_real_addr>/10
 809   ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
 810   ftp 10.66.66.66
 811   ...
 812   ftp fec0:6666:6666::193.233.7.65
 813   ...
 814 */
 815static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
 816			unsigned short type,
 817			const void *daddr, const void *saddr, unsigned int len)
 818{
 819	struct ip_tunnel *t = netdev_priv(dev);
 820	struct iphdr *iph;
 821	struct gre_base_hdr *greh;
 822
 823	iph = skb_push(skb, t->hlen + sizeof(*iph));
 824	greh = (struct gre_base_hdr *)(iph+1);
 825	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 826	greh->protocol = htons(type);
 827
 828	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 829
 830	/* Set the source hardware address. */
 831	if (saddr)
 832		memcpy(&iph->saddr, saddr, 4);
 833	if (daddr)
 834		memcpy(&iph->daddr, daddr, 4);
 835	if (iph->daddr)
 836		return t->hlen + sizeof(*iph);
 837
 838	return -(t->hlen + sizeof(*iph));
 839}
 840
 841static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 842{
 843	const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
 844	memcpy(haddr, &iph->saddr, 4);
 845	return 4;
 846}
 847
 848static const struct header_ops ipgre_header_ops = {
 849	.create	= ipgre_header,
 850	.parse	= ipgre_header_parse,
 851};
 852
 853#ifdef CONFIG_NET_IPGRE_BROADCAST
 854static int ipgre_open(struct net_device *dev)
 855{
 856	struct ip_tunnel *t = netdev_priv(dev);
 857
 858	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 859		struct flowi4 fl4;
 860		struct rtable *rt;
 861
 862		rt = ip_route_output_gre(t->net, &fl4,
 863					 t->parms.iph.daddr,
 864					 t->parms.iph.saddr,
 865					 t->parms.o_key,
 866					 RT_TOS(t->parms.iph.tos),
 867					 t->parms.link);
 868		if (IS_ERR(rt))
 869			return -EADDRNOTAVAIL;
 870		dev = rt->dst.dev;
 871		ip_rt_put(rt);
 872		if (!__in_dev_get_rtnl(dev))
 873			return -EADDRNOTAVAIL;
 874		t->mlink = dev->ifindex;
 875		ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
 876	}
 877	return 0;
 878}
 879
 880static int ipgre_close(struct net_device *dev)
 881{
 882	struct ip_tunnel *t = netdev_priv(dev);
 883
 884	if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
 885		struct in_device *in_dev;
 886		in_dev = inetdev_by_index(t->net, t->mlink);
 887		if (in_dev)
 888			ip_mc_dec_group(in_dev, t->parms.iph.daddr);
 889	}
 890	return 0;
 891}
 892#endif
 893
 894static const struct net_device_ops ipgre_netdev_ops = {
 895	.ndo_init		= ipgre_tunnel_init,
 896	.ndo_uninit		= ip_tunnel_uninit,
 897#ifdef CONFIG_NET_IPGRE_BROADCAST
 898	.ndo_open		= ipgre_open,
 899	.ndo_stop		= ipgre_close,
 900#endif
 901	.ndo_start_xmit		= ipgre_xmit,
 902	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
 903	.ndo_change_mtu		= ip_tunnel_change_mtu,
 904	.ndo_get_stats64	= ip_tunnel_get_stats64,
 905	.ndo_get_iflink		= ip_tunnel_get_iflink,
 
 906};
 907
 908#define GRE_FEATURES (NETIF_F_SG |		\
 909		      NETIF_F_FRAGLIST |	\
 910		      NETIF_F_HIGHDMA |		\
 911		      NETIF_F_HW_CSUM)
 912
 913static void ipgre_tunnel_setup(struct net_device *dev)
 914{
 915	dev->netdev_ops		= &ipgre_netdev_ops;
 916	dev->type		= ARPHRD_IPGRE;
 917	ip_tunnel_setup(dev, ipgre_net_id);
 918}
 919
 920static void __gre_tunnel_init(struct net_device *dev)
 921{
 922	struct ip_tunnel *tunnel;
 923
 924	tunnel = netdev_priv(dev);
 925	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 926	tunnel->parms.iph.protocol = IPPROTO_GRE;
 927
 928	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 
 929
 930	dev->features		|= GRE_FEATURES;
 931	dev->hw_features	|= GRE_FEATURES;
 932
 933	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
 934		/* TCP offload with GRE SEQ is not supported, nor
 935		 * can we support 2 levels of outer headers requiring
 936		 * an update.
 937		 */
 938		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
 939		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
 940			dev->features    |= NETIF_F_GSO_SOFTWARE;
 941			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 942		}
 
 943
 944		/* Can use a lockless transmit, unless we generate
 945		 * output sequences
 946		 */
 947		dev->features |= NETIF_F_LLTX;
 948	}
 949}
 950
 951static int ipgre_tunnel_init(struct net_device *dev)
 952{
 953	struct ip_tunnel *tunnel = netdev_priv(dev);
 954	struct iphdr *iph = &tunnel->parms.iph;
 955
 956	__gre_tunnel_init(dev);
 957
 958	memcpy(dev->dev_addr, &iph->saddr, 4);
 959	memcpy(dev->broadcast, &iph->daddr, 4);
 960
 961	dev->flags		= IFF_NOARP;
 962	netif_keep_dst(dev);
 963	dev->addr_len		= 4;
 964
 965	if (iph->daddr && !tunnel->collect_md) {
 966#ifdef CONFIG_NET_IPGRE_BROADCAST
 967		if (ipv4_is_multicast(iph->daddr)) {
 968			if (!iph->saddr)
 969				return -EINVAL;
 970			dev->flags = IFF_BROADCAST;
 971			dev->header_ops = &ipgre_header_ops;
 
 
 972		}
 973#endif
 974	} else if (!tunnel->collect_md) {
 975		dev->header_ops = &ipgre_header_ops;
 
 
 976	}
 977
 978	return ip_tunnel_init(dev);
 979}
 980
 981static const struct gre_protocol ipgre_protocol = {
 982	.handler     = gre_rcv,
 983	.err_handler = gre_err,
 984};
 985
 986static int __net_init ipgre_init_net(struct net *net)
 987{
 988	return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
 989}
 990
 991static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
 
 992{
 993	ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
 
 994}
 995
 996static struct pernet_operations ipgre_net_ops = {
 997	.init = ipgre_init_net,
 998	.exit_batch = ipgre_exit_batch_net,
 999	.id   = &ipgre_net_id,
1000	.size = sizeof(struct ip_tunnel_net),
1001};
1002
1003static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1004				 struct netlink_ext_ack *extack)
1005{
1006	__be16 flags;
1007
1008	if (!data)
1009		return 0;
1010
1011	flags = 0;
1012	if (data[IFLA_GRE_IFLAGS])
1013		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1014	if (data[IFLA_GRE_OFLAGS])
1015		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1016	if (flags & (GRE_VERSION|GRE_ROUTING))
1017		return -EINVAL;
1018
1019	if (data[IFLA_GRE_COLLECT_METADATA] &&
1020	    data[IFLA_GRE_ENCAP_TYPE] &&
1021	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1022		return -EINVAL;
1023
1024	return 0;
1025}
1026
1027static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1028			      struct netlink_ext_ack *extack)
1029{
1030	__be32 daddr;
1031
1032	if (tb[IFLA_ADDRESS]) {
1033		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1034			return -EINVAL;
1035		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1036			return -EADDRNOTAVAIL;
1037	}
1038
1039	if (!data)
1040		goto out;
1041
1042	if (data[IFLA_GRE_REMOTE]) {
1043		memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1044		if (!daddr)
1045			return -EINVAL;
1046	}
1047
1048out:
1049	return ipgre_tunnel_validate(tb, data, extack);
1050}
1051
1052static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1053			   struct netlink_ext_ack *extack)
1054{
1055	__be16 flags = 0;
1056	int ret;
1057
1058	if (!data)
1059		return 0;
1060
1061	ret = ipgre_tap_validate(tb, data, extack);
1062	if (ret)
1063		return ret;
1064
1065	/* ERSPAN should only have GRE sequence and key flag */
 
 
 
 
1066	if (data[IFLA_GRE_OFLAGS])
1067		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1068	if (data[IFLA_GRE_IFLAGS])
1069		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1070	if (!data[IFLA_GRE_COLLECT_METADATA] &&
1071	    flags != (GRE_SEQ | GRE_KEY))
1072		return -EINVAL;
1073
1074	/* ERSPAN Session ID only has 10-bit. Since we reuse
1075	 * 32-bit key field as ID, check it's range.
1076	 */
1077	if (data[IFLA_GRE_IKEY] &&
1078	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1079		return -EINVAL;
1080
1081	if (data[IFLA_GRE_OKEY] &&
1082	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1083		return -EINVAL;
1084
1085	return 0;
1086}
1087
1088static int ipgre_netlink_parms(struct net_device *dev,
1089				struct nlattr *data[],
1090				struct nlattr *tb[],
1091				struct ip_tunnel_parm *parms,
1092				__u32 *fwmark)
1093{
1094	struct ip_tunnel *t = netdev_priv(dev);
1095
1096	memset(parms, 0, sizeof(*parms));
1097
1098	parms->iph.protocol = IPPROTO_GRE;
1099
1100	if (!data)
1101		return 0;
1102
1103	if (data[IFLA_GRE_LINK])
1104		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1105
1106	if (data[IFLA_GRE_IFLAGS])
1107		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
 
1108
1109	if (data[IFLA_GRE_OFLAGS])
1110		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
 
1111
1112	if (data[IFLA_GRE_IKEY])
1113		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1114
1115	if (data[IFLA_GRE_OKEY])
1116		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1117
1118	if (data[IFLA_GRE_LOCAL])
1119		parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1120
1121	if (data[IFLA_GRE_REMOTE])
1122		parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1123
1124	if (data[IFLA_GRE_TTL])
1125		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1126
1127	if (data[IFLA_GRE_TOS])
1128		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1129
1130	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1131		if (t->ignore_df)
1132			return -EINVAL;
1133		parms->iph.frag_off = htons(IP_DF);
1134	}
1135
1136	if (data[IFLA_GRE_COLLECT_METADATA]) {
1137		t->collect_md = true;
1138		if (dev->type == ARPHRD_IPGRE)
1139			dev->type = ARPHRD_NONE;
1140	}
1141
1142	if (data[IFLA_GRE_IGNORE_DF]) {
1143		if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1144		  && (parms->iph.frag_off & htons(IP_DF)))
1145			return -EINVAL;
1146		t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1147	}
1148
1149	if (data[IFLA_GRE_FWMARK])
1150		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152	if (data[IFLA_GRE_ERSPAN_VER]) {
1153		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1154
1155		if (t->erspan_ver != 1 && t->erspan_ver != 2)
1156			return -EINVAL;
1157	}
1158
1159	if (t->erspan_ver == 1) {
1160		if (data[IFLA_GRE_ERSPAN_INDEX]) {
1161			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1162			if (t->index & ~INDEX_MASK)
1163				return -EINVAL;
1164		}
1165	} else if (t->erspan_ver == 2) {
1166		if (data[IFLA_GRE_ERSPAN_DIR]) {
1167			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1168			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1169				return -EINVAL;
1170		}
1171		if (data[IFLA_GRE_ERSPAN_HWID]) {
1172			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1173			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1174				return -EINVAL;
1175		}
1176	}
1177
1178	return 0;
1179}
1180
1181/* This function returns true when ENCAP attributes are present in the nl msg */
1182static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1183				      struct ip_tunnel_encap *ipencap)
1184{
1185	bool ret = false;
1186
1187	memset(ipencap, 0, sizeof(*ipencap));
1188
1189	if (!data)
1190		return ret;
1191
1192	if (data[IFLA_GRE_ENCAP_TYPE]) {
1193		ret = true;
1194		ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1195	}
1196
1197	if (data[IFLA_GRE_ENCAP_FLAGS]) {
1198		ret = true;
1199		ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1200	}
1201
1202	if (data[IFLA_GRE_ENCAP_SPORT]) {
1203		ret = true;
1204		ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1205	}
1206
1207	if (data[IFLA_GRE_ENCAP_DPORT]) {
1208		ret = true;
1209		ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1210	}
1211
1212	return ret;
1213}
1214
1215static int gre_tap_init(struct net_device *dev)
1216{
1217	__gre_tunnel_init(dev);
1218	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1219	netif_keep_dst(dev);
1220
1221	return ip_tunnel_init(dev);
1222}
1223
1224static const struct net_device_ops gre_tap_netdev_ops = {
1225	.ndo_init		= gre_tap_init,
1226	.ndo_uninit		= ip_tunnel_uninit,
1227	.ndo_start_xmit		= gre_tap_xmit,
1228	.ndo_set_mac_address 	= eth_mac_addr,
1229	.ndo_validate_addr	= eth_validate_addr,
1230	.ndo_change_mtu		= ip_tunnel_change_mtu,
1231	.ndo_get_stats64	= ip_tunnel_get_stats64,
1232	.ndo_get_iflink		= ip_tunnel_get_iflink,
1233	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1234};
1235
1236static int erspan_tunnel_init(struct net_device *dev)
1237{
1238	struct ip_tunnel *tunnel = netdev_priv(dev);
1239
1240	tunnel->tun_hlen = 8;
 
 
 
 
1241	tunnel->parms.iph.protocol = IPPROTO_GRE;
1242	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1243		       erspan_hdr_len(tunnel->erspan_ver);
1244
1245	dev->features		|= GRE_FEATURES;
1246	dev->hw_features	|= GRE_FEATURES;
1247	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
1248	netif_keep_dst(dev);
1249
1250	return ip_tunnel_init(dev);
1251}
1252
1253static const struct net_device_ops erspan_netdev_ops = {
1254	.ndo_init		= erspan_tunnel_init,
1255	.ndo_uninit		= ip_tunnel_uninit,
1256	.ndo_start_xmit		= erspan_xmit,
1257	.ndo_set_mac_address	= eth_mac_addr,
1258	.ndo_validate_addr	= eth_validate_addr,
1259	.ndo_change_mtu		= ip_tunnel_change_mtu,
1260	.ndo_get_stats64	= ip_tunnel_get_stats64,
1261	.ndo_get_iflink		= ip_tunnel_get_iflink,
1262	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
1263};
1264
1265static void ipgre_tap_setup(struct net_device *dev)
1266{
1267	ether_setup(dev);
1268	dev->max_mtu = 0;
1269	dev->netdev_ops	= &gre_tap_netdev_ops;
1270	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1271	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
1272	ip_tunnel_setup(dev, gre_tap_net_id);
1273}
1274
1275static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1276			 struct nlattr *tb[], struct nlattr *data[],
1277			 struct netlink_ext_ack *extack)
1278{
1279	struct ip_tunnel_parm p;
1280	struct ip_tunnel_encap ipencap;
1281	__u32 fwmark = 0;
1282	int err;
1283
1284	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1285		struct ip_tunnel *t = netdev_priv(dev);
1286		err = ip_tunnel_encap_setup(t, &ipencap);
1287
1288		if (err < 0)
1289			return err;
1290	}
1291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1292	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1293	if (err < 0)
1294		return err;
1295	return ip_tunnel_newlink(dev, tb, &p, fwmark);
1296}
1297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1298static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1299			    struct nlattr *data[],
1300			    struct netlink_ext_ack *extack)
1301{
1302	struct ip_tunnel *t = netdev_priv(dev);
1303	struct ip_tunnel_encap ipencap;
1304	__u32 fwmark = t->fwmark;
1305	struct ip_tunnel_parm p;
1306	int err;
1307
1308	if (ipgre_netlink_encap_parms(data, &ipencap)) {
1309		err = ip_tunnel_encap_setup(t, &ipencap);
1310
1311		if (err < 0)
1312			return err;
1313	}
1314
1315	err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1316	if (err < 0)
1317		return err;
1318
1319	err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1320	if (err < 0)
1321		return err;
1322
1323	t->parms.i_flags = p.i_flags;
1324	t->parms.o_flags = p.o_flags;
1325
1326	if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1327		ipgre_link_update(dev, !tb[IFLA_MTU]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328
1329	return 0;
1330}
1331
1332static size_t ipgre_get_size(const struct net_device *dev)
1333{
1334	return
1335		/* IFLA_GRE_LINK */
1336		nla_total_size(4) +
1337		/* IFLA_GRE_IFLAGS */
1338		nla_total_size(2) +
1339		/* IFLA_GRE_OFLAGS */
1340		nla_total_size(2) +
1341		/* IFLA_GRE_IKEY */
1342		nla_total_size(4) +
1343		/* IFLA_GRE_OKEY */
1344		nla_total_size(4) +
1345		/* IFLA_GRE_LOCAL */
1346		nla_total_size(4) +
1347		/* IFLA_GRE_REMOTE */
1348		nla_total_size(4) +
1349		/* IFLA_GRE_TTL */
1350		nla_total_size(1) +
1351		/* IFLA_GRE_TOS */
1352		nla_total_size(1) +
1353		/* IFLA_GRE_PMTUDISC */
1354		nla_total_size(1) +
1355		/* IFLA_GRE_ENCAP_TYPE */
1356		nla_total_size(2) +
1357		/* IFLA_GRE_ENCAP_FLAGS */
1358		nla_total_size(2) +
1359		/* IFLA_GRE_ENCAP_SPORT */
1360		nla_total_size(2) +
1361		/* IFLA_GRE_ENCAP_DPORT */
1362		nla_total_size(2) +
1363		/* IFLA_GRE_COLLECT_METADATA */
1364		nla_total_size(0) +
1365		/* IFLA_GRE_IGNORE_DF */
1366		nla_total_size(1) +
1367		/* IFLA_GRE_FWMARK */
1368		nla_total_size(4) +
1369		/* IFLA_GRE_ERSPAN_INDEX */
1370		nla_total_size(4) +
1371		/* IFLA_GRE_ERSPAN_VER */
1372		nla_total_size(1) +
1373		/* IFLA_GRE_ERSPAN_DIR */
1374		nla_total_size(1) +
1375		/* IFLA_GRE_ERSPAN_HWID */
1376		nla_total_size(2) +
1377		0;
1378}
1379
1380static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1381{
1382	struct ip_tunnel *t = netdev_priv(dev);
1383	struct ip_tunnel_parm *p = &t->parms;
1384	__be16 o_flags = p->o_flags;
1385
1386	if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1387		if (!t->collect_md)
1388			o_flags |= TUNNEL_KEY;
1389
1390		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1391			goto nla_put_failure;
1392
1393		if (t->erspan_ver == 1) {
1394			if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1395				goto nla_put_failure;
1396		} else {
1397			if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1398				goto nla_put_failure;
1399			if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1400				goto nla_put_failure;
1401		}
1402	}
1403
1404	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1405	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
1406			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1407	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
1408			 gre_tnl_flags_to_gre_flags(o_flags)) ||
1409	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1410	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1411	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1412	    nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1413	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1414	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1415	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1416		       !!(p->iph.frag_off & htons(IP_DF))) ||
1417	    nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1418		goto nla_put_failure;
1419
1420	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1421			t->encap.type) ||
1422	    nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1423			 t->encap.sport) ||
1424	    nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1425			 t->encap.dport) ||
1426	    nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1427			t->encap.flags))
1428		goto nla_put_failure;
1429
1430	if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1431		goto nla_put_failure;
1432
1433	if (t->collect_md) {
1434		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1435			goto nla_put_failure;
1436	}
1437
1438	return 0;
1439
1440nla_put_failure:
1441	return -EMSGSIZE;
1442}
1443
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444static void erspan_setup(struct net_device *dev)
1445{
1446	struct ip_tunnel *t = netdev_priv(dev);
1447
1448	ether_setup(dev);
1449	dev->max_mtu = 0;
1450	dev->netdev_ops = &erspan_netdev_ops;
1451	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1452	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1453	ip_tunnel_setup(dev, erspan_net_id);
1454	t->erspan_ver = 1;
1455}
1456
1457static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1458	[IFLA_GRE_LINK]		= { .type = NLA_U32 },
1459	[IFLA_GRE_IFLAGS]	= { .type = NLA_U16 },
1460	[IFLA_GRE_OFLAGS]	= { .type = NLA_U16 },
1461	[IFLA_GRE_IKEY]		= { .type = NLA_U32 },
1462	[IFLA_GRE_OKEY]		= { .type = NLA_U32 },
1463	[IFLA_GRE_LOCAL]	= { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1464	[IFLA_GRE_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1465	[IFLA_GRE_TTL]		= { .type = NLA_U8 },
1466	[IFLA_GRE_TOS]		= { .type = NLA_U8 },
1467	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
1468	[IFLA_GRE_ENCAP_TYPE]	= { .type = NLA_U16 },
1469	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
1470	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
1471	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
1472	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
1473	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
1474	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
1475	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
1476	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
1477	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
1478	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
1479};
1480
1481static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1482	.kind		= "gre",
1483	.maxtype	= IFLA_GRE_MAX,
1484	.policy		= ipgre_policy,
1485	.priv_size	= sizeof(struct ip_tunnel),
1486	.setup		= ipgre_tunnel_setup,
1487	.validate	= ipgre_tunnel_validate,
1488	.newlink	= ipgre_newlink,
1489	.changelink	= ipgre_changelink,
1490	.dellink	= ip_tunnel_dellink,
1491	.get_size	= ipgre_get_size,
1492	.fill_info	= ipgre_fill_info,
1493	.get_link_net	= ip_tunnel_get_link_net,
1494};
1495
1496static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1497	.kind		= "gretap",
1498	.maxtype	= IFLA_GRE_MAX,
1499	.policy		= ipgre_policy,
1500	.priv_size	= sizeof(struct ip_tunnel),
1501	.setup		= ipgre_tap_setup,
1502	.validate	= ipgre_tap_validate,
1503	.newlink	= ipgre_newlink,
1504	.changelink	= ipgre_changelink,
1505	.dellink	= ip_tunnel_dellink,
1506	.get_size	= ipgre_get_size,
1507	.fill_info	= ipgre_fill_info,
1508	.get_link_net	= ip_tunnel_get_link_net,
1509};
1510
1511static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1512	.kind		= "erspan",
1513	.maxtype	= IFLA_GRE_MAX,
1514	.policy		= ipgre_policy,
1515	.priv_size	= sizeof(struct ip_tunnel),
1516	.setup		= erspan_setup,
1517	.validate	= erspan_validate,
1518	.newlink	= ipgre_newlink,
1519	.changelink	= ipgre_changelink,
1520	.dellink	= ip_tunnel_dellink,
1521	.get_size	= ipgre_get_size,
1522	.fill_info	= ipgre_fill_info,
1523	.get_link_net	= ip_tunnel_get_link_net,
1524};
1525
1526struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1527					u8 name_assign_type)
1528{
1529	struct nlattr *tb[IFLA_MAX + 1];
1530	struct net_device *dev;
1531	LIST_HEAD(list_kill);
1532	struct ip_tunnel *t;
1533	int err;
1534
1535	memset(&tb, 0, sizeof(tb));
1536
1537	dev = rtnl_create_link(net, name, name_assign_type,
1538			       &ipgre_tap_ops, tb, NULL);
1539	if (IS_ERR(dev))
1540		return dev;
1541
1542	/* Configure flow based GRE device. */
1543	t = netdev_priv(dev);
1544	t->collect_md = true;
1545
1546	err = ipgre_newlink(net, dev, tb, NULL, NULL);
1547	if (err < 0) {
1548		free_netdev(dev);
1549		return ERR_PTR(err);
1550	}
1551
1552	/* openvswitch users expect packet sizes to be unrestricted,
1553	 * so set the largest MTU we can.
1554	 */
1555	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1556	if (err)
1557		goto out;
1558
1559	err = rtnl_configure_link(dev, NULL);
1560	if (err < 0)
1561		goto out;
1562
1563	return dev;
1564out:
1565	ip_tunnel_dellink(dev, &list_kill);
1566	unregister_netdevice_many(&list_kill);
1567	return ERR_PTR(err);
1568}
1569EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1570
1571static int __net_init ipgre_tap_init_net(struct net *net)
1572{
1573	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1574}
1575
1576static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
 
1577{
1578	ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
 
1579}
1580
1581static struct pernet_operations ipgre_tap_net_ops = {
1582	.init = ipgre_tap_init_net,
1583	.exit_batch = ipgre_tap_exit_batch_net,
1584	.id   = &gre_tap_net_id,
1585	.size = sizeof(struct ip_tunnel_net),
1586};
1587
1588static int __net_init erspan_init_net(struct net *net)
1589{
1590	return ip_tunnel_init_net(net, erspan_net_id,
1591				  &erspan_link_ops, "erspan0");
1592}
1593
1594static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
 
1595{
1596	ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
 
1597}
1598
1599static struct pernet_operations erspan_net_ops = {
1600	.init = erspan_init_net,
1601	.exit_batch = erspan_exit_batch_net,
1602	.id   = &erspan_net_id,
1603	.size = sizeof(struct ip_tunnel_net),
1604};
1605
1606static int __init ipgre_init(void)
1607{
1608	int err;
1609
1610	pr_info("GRE over IPv4 tunneling driver\n");
1611
1612	err = register_pernet_device(&ipgre_net_ops);
1613	if (err < 0)
1614		return err;
1615
1616	err = register_pernet_device(&ipgre_tap_net_ops);
1617	if (err < 0)
1618		goto pnet_tap_failed;
1619
1620	err = register_pernet_device(&erspan_net_ops);
1621	if (err < 0)
1622		goto pnet_erspan_failed;
1623
1624	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1625	if (err < 0) {
1626		pr_info("%s: can't add protocol\n", __func__);
1627		goto add_proto_failed;
1628	}
1629
1630	err = rtnl_link_register(&ipgre_link_ops);
1631	if (err < 0)
1632		goto rtnl_link_failed;
1633
1634	err = rtnl_link_register(&ipgre_tap_ops);
1635	if (err < 0)
1636		goto tap_ops_failed;
1637
1638	err = rtnl_link_register(&erspan_link_ops);
1639	if (err < 0)
1640		goto erspan_link_failed;
1641
1642	return 0;
1643
1644erspan_link_failed:
1645	rtnl_link_unregister(&ipgre_tap_ops);
1646tap_ops_failed:
1647	rtnl_link_unregister(&ipgre_link_ops);
1648rtnl_link_failed:
1649	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1650add_proto_failed:
1651	unregister_pernet_device(&erspan_net_ops);
1652pnet_erspan_failed:
1653	unregister_pernet_device(&ipgre_tap_net_ops);
1654pnet_tap_failed:
1655	unregister_pernet_device(&ipgre_net_ops);
1656	return err;
1657}
1658
1659static void __exit ipgre_fini(void)
1660{
1661	rtnl_link_unregister(&ipgre_tap_ops);
1662	rtnl_link_unregister(&ipgre_link_ops);
1663	rtnl_link_unregister(&erspan_link_ops);
1664	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1665	unregister_pernet_device(&ipgre_tap_net_ops);
1666	unregister_pernet_device(&ipgre_net_ops);
1667	unregister_pernet_device(&erspan_net_ops);
1668}
1669
1670module_init(ipgre_init);
1671module_exit(ipgre_fini);
 
1672MODULE_LICENSE("GPL");
1673MODULE_ALIAS_RTNL_LINK("gre");
1674MODULE_ALIAS_RTNL_LINK("gretap");
1675MODULE_ALIAS_RTNL_LINK("erspan");
1676MODULE_ALIAS_NETDEV("gre0");
1677MODULE_ALIAS_NETDEV("gretap0");
1678MODULE_ALIAS_NETDEV("erspan0");