Loading...
1/*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/capability.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <asm/uaccess.h>
19#include <linux/skbuff.h>
20#include <linux/netdevice.h>
21#include <linux/in.h>
22#include <linux/tcp.h>
23#include <linux/udp.h>
24#include <linux/if_arp.h>
25#include <linux/mroute.h>
26#include <linux/init.h>
27#include <linux/in6.h>
28#include <linux/inetdevice.h>
29#include <linux/igmp.h>
30#include <linux/netfilter_ipv4.h>
31#include <linux/etherdevice.h>
32#include <linux/if_ether.h>
33
34#include <net/sock.h>
35#include <net/ip.h>
36#include <net/icmp.h>
37#include <net/protocol.h>
38#include <net/ipip.h>
39#include <net/arp.h>
40#include <net/checksum.h>
41#include <net/dsfield.h>
42#include <net/inet_ecn.h>
43#include <net/xfrm.h>
44#include <net/net_namespace.h>
45#include <net/netns/generic.h>
46#include <net/rtnetlink.h>
47#include <net/gre.h>
48
49#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
50#include <net/ipv6.h>
51#include <net/ip6_fib.h>
52#include <net/ip6_route.h>
53#endif
54
55/*
56 Problems & solutions
57 --------------------
58
59 1. The most important issue is detecting local dead loops.
60 They would cause complete host lockup in transmit, which
61 would be "resolved" by stack overflow or, if queueing is enabled,
62 with infinite looping in net_bh.
63
64 We cannot track such dead loops during route installation,
65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaing new variable in ALL
69 skb, even if no tunneling is used.
70
71 Current solution: xmit_recursion breaks dead loops. This is a percpu
72 counter, since when we enter the first ndo_xmit(), cpu migration is
73 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
74
75 2. Networking dead loops would not kill routers, but would really
76 kill network. IP hop limit plays role of "t->recursion" in this case,
77 if we copy it from packet being encapsulated to upper header.
78 It is very good solution, but it introduces two problems:
79
80 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
81 do not work over tunnels.
82 - traceroute does not work. I planned to relay ICMP from tunnel,
83 so that this problem would be solved and traceroute output
84 would even more informative. This idea appeared to be wrong:
85 only Linux complies to rfc1812 now (yes, guys, Linux is the only
86 true router now :-)), all routers (at least, in neighbourhood of mine)
87 return only 8 bytes of payload. It is the end.
88
89 Hence, if we want that OSPF worked or traceroute said something reasonable,
90 we should search for another solution.
91
92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, tt is not solution at all.
95
96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 fastly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made
105 all that we could make. Even if it is your gated who injected
106 fatal route to network, even if it were you who configured
107 fatal static route: you are innocent. :-)
108
109
110
111 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
112 practically identical code. It would be good to glue them
113 together, but it is not very evident, how to make them modular.
114 sit is integral part of IPv6, ipip and gre are naturally modular.
115 We could extract common parts (hash table, ioctl etc)
116 to a separate module (ip_tunnel.c).
117
118 Alexey Kuznetsov.
119 */
120
121static struct rtnl_link_ops ipgre_link_ops __read_mostly;
122static int ipgre_tunnel_init(struct net_device *dev);
123static void ipgre_tunnel_setup(struct net_device *dev);
124static int ipgre_tunnel_bind_dev(struct net_device *dev);
125
126/* Fallback tunnel: no source, no destination, no key, no options */
127
128#define HASH_SIZE 16
129
130static int ipgre_net_id __read_mostly;
131struct ipgre_net {
132 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
133
134 struct net_device *fb_tunnel_dev;
135};
136
137/* Tunnel hash table */
138
139/*
140 4 hash tables:
141
142 3: (remote,local)
143 2: (remote,*)
144 1: (*,local)
145 0: (*,*)
146
147 We require exact key match i.e. if a key is present in packet
148 it will match only tunnel with the same key; if it is not present,
149 it will match only keyless tunnel.
150
151 All keysless packets, if not matched configured keyless tunnels
152 will match fallback tunnel.
153 */
154
155#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
156
157#define tunnels_r_l tunnels[3]
158#define tunnels_r tunnels[2]
159#define tunnels_l tunnels[1]
160#define tunnels_wc tunnels[0]
161/*
162 * Locking : hash tables are protected by RCU and RTNL
163 */
164
165#define for_each_ip_tunnel_rcu(start) \
166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
167
168/* often modified stats are per cpu, other are shared (netdev->stats) */
169struct pcpu_tstats {
170 unsigned long rx_packets;
171 unsigned long rx_bytes;
172 unsigned long tx_packets;
173 unsigned long tx_bytes;
174};
175
176static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
177{
178 struct pcpu_tstats sum = { 0 };
179 int i;
180
181 for_each_possible_cpu(i) {
182 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
183
184 sum.rx_packets += tstats->rx_packets;
185 sum.rx_bytes += tstats->rx_bytes;
186 sum.tx_packets += tstats->tx_packets;
187 sum.tx_bytes += tstats->tx_bytes;
188 }
189 dev->stats.rx_packets = sum.rx_packets;
190 dev->stats.rx_bytes = sum.rx_bytes;
191 dev->stats.tx_packets = sum.tx_packets;
192 dev->stats.tx_bytes = sum.tx_bytes;
193 return &dev->stats;
194}
195
196/* Given src, dst and key, find appropriate for input tunnel. */
197
198static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
199 __be32 remote, __be32 local,
200 __be32 key, __be16 gre_proto)
201{
202 struct net *net = dev_net(dev);
203 int link = dev->ifindex;
204 unsigned int h0 = HASH(remote);
205 unsigned int h1 = HASH(key);
206 struct ip_tunnel *t, *cand = NULL;
207 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
208 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
209 ARPHRD_ETHER : ARPHRD_IPGRE;
210 int score, cand_score = 4;
211
212 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
213 if (local != t->parms.iph.saddr ||
214 remote != t->parms.iph.daddr ||
215 key != t->parms.i_key ||
216 !(t->dev->flags & IFF_UP))
217 continue;
218
219 if (t->dev->type != ARPHRD_IPGRE &&
220 t->dev->type != dev_type)
221 continue;
222
223 score = 0;
224 if (t->parms.link != link)
225 score |= 1;
226 if (t->dev->type != dev_type)
227 score |= 2;
228 if (score == 0)
229 return t;
230
231 if (score < cand_score) {
232 cand = t;
233 cand_score = score;
234 }
235 }
236
237 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
238 if (remote != t->parms.iph.daddr ||
239 key != t->parms.i_key ||
240 !(t->dev->flags & IFF_UP))
241 continue;
242
243 if (t->dev->type != ARPHRD_IPGRE &&
244 t->dev->type != dev_type)
245 continue;
246
247 score = 0;
248 if (t->parms.link != link)
249 score |= 1;
250 if (t->dev->type != dev_type)
251 score |= 2;
252 if (score == 0)
253 return t;
254
255 if (score < cand_score) {
256 cand = t;
257 cand_score = score;
258 }
259 }
260
261 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
262 if ((local != t->parms.iph.saddr &&
263 (local != t->parms.iph.daddr ||
264 !ipv4_is_multicast(local))) ||
265 key != t->parms.i_key ||
266 !(t->dev->flags & IFF_UP))
267 continue;
268
269 if (t->dev->type != ARPHRD_IPGRE &&
270 t->dev->type != dev_type)
271 continue;
272
273 score = 0;
274 if (t->parms.link != link)
275 score |= 1;
276 if (t->dev->type != dev_type)
277 score |= 2;
278 if (score == 0)
279 return t;
280
281 if (score < cand_score) {
282 cand = t;
283 cand_score = score;
284 }
285 }
286
287 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
288 if (t->parms.i_key != key ||
289 !(t->dev->flags & IFF_UP))
290 continue;
291
292 if (t->dev->type != ARPHRD_IPGRE &&
293 t->dev->type != dev_type)
294 continue;
295
296 score = 0;
297 if (t->parms.link != link)
298 score |= 1;
299 if (t->dev->type != dev_type)
300 score |= 2;
301 if (score == 0)
302 return t;
303
304 if (score < cand_score) {
305 cand = t;
306 cand_score = score;
307 }
308 }
309
310 if (cand != NULL)
311 return cand;
312
313 dev = ign->fb_tunnel_dev;
314 if (dev->flags & IFF_UP)
315 return netdev_priv(dev);
316
317 return NULL;
318}
319
320static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
321 struct ip_tunnel_parm *parms)
322{
323 __be32 remote = parms->iph.daddr;
324 __be32 local = parms->iph.saddr;
325 __be32 key = parms->i_key;
326 unsigned int h = HASH(key);
327 int prio = 0;
328
329 if (local)
330 prio |= 1;
331 if (remote && !ipv4_is_multicast(remote)) {
332 prio |= 2;
333 h ^= HASH(remote);
334 }
335
336 return &ign->tunnels[prio][h];
337}
338
339static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
340 struct ip_tunnel *t)
341{
342 return __ipgre_bucket(ign, &t->parms);
343}
344
345static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
346{
347 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
348
349 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
350 rcu_assign_pointer(*tp, t);
351}
352
353static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
354{
355 struct ip_tunnel __rcu **tp;
356 struct ip_tunnel *iter;
357
358 for (tp = ipgre_bucket(ign, t);
359 (iter = rtnl_dereference(*tp)) != NULL;
360 tp = &iter->next) {
361 if (t == iter) {
362 rcu_assign_pointer(*tp, t->next);
363 break;
364 }
365 }
366}
367
368static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
369 struct ip_tunnel_parm *parms,
370 int type)
371{
372 __be32 remote = parms->iph.daddr;
373 __be32 local = parms->iph.saddr;
374 __be32 key = parms->i_key;
375 int link = parms->link;
376 struct ip_tunnel *t;
377 struct ip_tunnel __rcu **tp;
378 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
379
380 for (tp = __ipgre_bucket(ign, parms);
381 (t = rtnl_dereference(*tp)) != NULL;
382 tp = &t->next)
383 if (local == t->parms.iph.saddr &&
384 remote == t->parms.iph.daddr &&
385 key == t->parms.i_key &&
386 link == t->parms.link &&
387 type == t->dev->type)
388 break;
389
390 return t;
391}
392
393static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
394 struct ip_tunnel_parm *parms, int create)
395{
396 struct ip_tunnel *t, *nt;
397 struct net_device *dev;
398 char name[IFNAMSIZ];
399 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
400
401 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
402 if (t || !create)
403 return t;
404
405 if (parms->name[0])
406 strlcpy(name, parms->name, IFNAMSIZ);
407 else
408 strcpy(name, "gre%d");
409
410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
411 if (!dev)
412 return NULL;
413
414 dev_net_set(dev, net);
415
416 nt = netdev_priv(dev);
417 nt->parms = *parms;
418 dev->rtnl_link_ops = &ipgre_link_ops;
419
420 dev->mtu = ipgre_tunnel_bind_dev(dev);
421
422 if (register_netdevice(dev) < 0)
423 goto failed_free;
424
425 dev_hold(dev);
426 ipgre_tunnel_link(ign, nt);
427 return nt;
428
429failed_free:
430 free_netdev(dev);
431 return NULL;
432}
433
434static void ipgre_tunnel_uninit(struct net_device *dev)
435{
436 struct net *net = dev_net(dev);
437 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
438
439 ipgre_tunnel_unlink(ign, netdev_priv(dev));
440 dev_put(dev);
441}
442
443
444static void ipgre_err(struct sk_buff *skb, u32 info)
445{
446
447/* All the routers (except for Linux) return only
448 8 bytes of packet payload. It means, that precise relaying of
449 ICMP in the real Internet is absolutely infeasible.
450
451 Moreover, Cisco "wise men" put GRE key to the third word
452 in GRE header. It makes impossible maintaining even soft state for keyed
453 GRE tunnels with enabled checksum. Tell them "thank you".
454
455 Well, I wonder, rfc1812 was written by Cisco employee,
456 what the hell these idiots break standrads established
457 by themself???
458 */
459
460 const struct iphdr *iph = (const struct iphdr *)skb->data;
461 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
462 int grehlen = (iph->ihl<<2) + 4;
463 const int type = icmp_hdr(skb)->type;
464 const int code = icmp_hdr(skb)->code;
465 struct ip_tunnel *t;
466 __be16 flags;
467
468 flags = p[0];
469 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
470 if (flags&(GRE_VERSION|GRE_ROUTING))
471 return;
472 if (flags&GRE_KEY) {
473 grehlen += 4;
474 if (flags&GRE_CSUM)
475 grehlen += 4;
476 }
477 }
478
479 /* If only 8 bytes returned, keyed message will be dropped here */
480 if (skb_headlen(skb) < grehlen)
481 return;
482
483 switch (type) {
484 default:
485 case ICMP_PARAMETERPROB:
486 return;
487
488 case ICMP_DEST_UNREACH:
489 switch (code) {
490 case ICMP_SR_FAILED:
491 case ICMP_PORT_UNREACH:
492 /* Impossible event. */
493 return;
494 case ICMP_FRAG_NEEDED:
495 /* Soft state for pmtu is maintained by IP core. */
496 return;
497 default:
498 /* All others are translated to HOST_UNREACH.
499 rfc2003 contains "deep thoughts" about NET_UNREACH,
500 I believe they are just ether pollution. --ANK
501 */
502 break;
503 }
504 break;
505 case ICMP_TIME_EXCEEDED:
506 if (code != ICMP_EXC_TTL)
507 return;
508 break;
509 }
510
511 rcu_read_lock();
512 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
513 flags & GRE_KEY ?
514 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
515 p[1]);
516 if (t == NULL || t->parms.iph.daddr == 0 ||
517 ipv4_is_multicast(t->parms.iph.daddr))
518 goto out;
519
520 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
521 goto out;
522
523 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
524 t->err_count++;
525 else
526 t->err_count = 1;
527 t->err_time = jiffies;
528out:
529 rcu_read_unlock();
530}
531
532static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
533{
534 if (INET_ECN_is_ce(iph->tos)) {
535 if (skb->protocol == htons(ETH_P_IP)) {
536 IP_ECN_set_ce(ip_hdr(skb));
537 } else if (skb->protocol == htons(ETH_P_IPV6)) {
538 IP6_ECN_set_ce(ipv6_hdr(skb));
539 }
540 }
541}
542
543static inline u8
544ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
545{
546 u8 inner = 0;
547 if (skb->protocol == htons(ETH_P_IP))
548 inner = old_iph->tos;
549 else if (skb->protocol == htons(ETH_P_IPV6))
550 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
551 return INET_ECN_encapsulate(tos, inner);
552}
553
554static int ipgre_rcv(struct sk_buff *skb)
555{
556 const struct iphdr *iph;
557 u8 *h;
558 __be16 flags;
559 __sum16 csum = 0;
560 __be32 key = 0;
561 u32 seqno = 0;
562 struct ip_tunnel *tunnel;
563 int offset = 4;
564 __be16 gre_proto;
565
566 if (!pskb_may_pull(skb, 16))
567 goto drop_nolock;
568
569 iph = ip_hdr(skb);
570 h = skb->data;
571 flags = *(__be16*)h;
572
573 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
574 /* - Version must be 0.
575 - We do not support routing headers.
576 */
577 if (flags&(GRE_VERSION|GRE_ROUTING))
578 goto drop_nolock;
579
580 if (flags&GRE_CSUM) {
581 switch (skb->ip_summed) {
582 case CHECKSUM_COMPLETE:
583 csum = csum_fold(skb->csum);
584 if (!csum)
585 break;
586 /* fall through */
587 case CHECKSUM_NONE:
588 skb->csum = 0;
589 csum = __skb_checksum_complete(skb);
590 skb->ip_summed = CHECKSUM_COMPLETE;
591 }
592 offset += 4;
593 }
594 if (flags&GRE_KEY) {
595 key = *(__be32*)(h + offset);
596 offset += 4;
597 }
598 if (flags&GRE_SEQ) {
599 seqno = ntohl(*(__be32*)(h + offset));
600 offset += 4;
601 }
602 }
603
604 gre_proto = *(__be16 *)(h + 2);
605
606 rcu_read_lock();
607 if ((tunnel = ipgre_tunnel_lookup(skb->dev,
608 iph->saddr, iph->daddr, key,
609 gre_proto))) {
610 struct pcpu_tstats *tstats;
611
612 secpath_reset(skb);
613
614 skb->protocol = gre_proto;
615 /* WCCP version 1 and 2 protocol decoding.
616 * - Change protocol to IP
617 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
618 */
619 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
620 skb->protocol = htons(ETH_P_IP);
621 if ((*(h + offset) & 0xF0) != 0x40)
622 offset += 4;
623 }
624
625 skb->mac_header = skb->network_header;
626 __pskb_pull(skb, offset);
627 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
628 skb->pkt_type = PACKET_HOST;
629#ifdef CONFIG_NET_IPGRE_BROADCAST
630 if (ipv4_is_multicast(iph->daddr)) {
631 /* Looped back packet, drop it! */
632 if (rt_is_output_route(skb_rtable(skb)))
633 goto drop;
634 tunnel->dev->stats.multicast++;
635 skb->pkt_type = PACKET_BROADCAST;
636 }
637#endif
638
639 if (((flags&GRE_CSUM) && csum) ||
640 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
641 tunnel->dev->stats.rx_crc_errors++;
642 tunnel->dev->stats.rx_errors++;
643 goto drop;
644 }
645 if (tunnel->parms.i_flags&GRE_SEQ) {
646 if (!(flags&GRE_SEQ) ||
647 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
648 tunnel->dev->stats.rx_fifo_errors++;
649 tunnel->dev->stats.rx_errors++;
650 goto drop;
651 }
652 tunnel->i_seqno = seqno + 1;
653 }
654
655 /* Warning: All skb pointers will be invalidated! */
656 if (tunnel->dev->type == ARPHRD_ETHER) {
657 if (!pskb_may_pull(skb, ETH_HLEN)) {
658 tunnel->dev->stats.rx_length_errors++;
659 tunnel->dev->stats.rx_errors++;
660 goto drop;
661 }
662
663 iph = ip_hdr(skb);
664 skb->protocol = eth_type_trans(skb, tunnel->dev);
665 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
666 }
667
668 tstats = this_cpu_ptr(tunnel->dev->tstats);
669 tstats->rx_packets++;
670 tstats->rx_bytes += skb->len;
671
672 __skb_tunnel_rx(skb, tunnel->dev);
673
674 skb_reset_network_header(skb);
675 ipgre_ecn_decapsulate(iph, skb);
676
677 netif_rx(skb);
678
679 rcu_read_unlock();
680 return 0;
681 }
682 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
683
684drop:
685 rcu_read_unlock();
686drop_nolock:
687 kfree_skb(skb);
688 return 0;
689}
690
691static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
692{
693 struct ip_tunnel *tunnel = netdev_priv(dev);
694 struct pcpu_tstats *tstats;
695 const struct iphdr *old_iph = ip_hdr(skb);
696 const struct iphdr *tiph;
697 struct flowi4 fl4;
698 u8 tos;
699 __be16 df;
700 struct rtable *rt; /* Route to the other host */
701 struct net_device *tdev; /* Device to other host */
702 struct iphdr *iph; /* Our new IP header */
703 unsigned int max_headroom; /* The extra header space needed */
704 int gre_hlen;
705 __be32 dst;
706 int mtu;
707
708 if (dev->type == ARPHRD_ETHER)
709 IPCB(skb)->flags = 0;
710
711 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
712 gre_hlen = 0;
713 tiph = (const struct iphdr *)skb->data;
714 } else {
715 gre_hlen = tunnel->hlen;
716 tiph = &tunnel->parms.iph;
717 }
718
719 if ((dst = tiph->daddr) == 0) {
720 /* NBMA tunnel */
721
722 if (skb_dst(skb) == NULL) {
723 dev->stats.tx_fifo_errors++;
724 goto tx_error;
725 }
726
727 if (skb->protocol == htons(ETH_P_IP)) {
728 rt = skb_rtable(skb);
729 if ((dst = rt->rt_gateway) == 0)
730 goto tx_error_icmp;
731 }
732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
733 else if (skb->protocol == htons(ETH_P_IPV6)) {
734 struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
735 const struct in6_addr *addr6;
736 int addr_type;
737
738 if (neigh == NULL)
739 goto tx_error;
740
741 addr6 = (const struct in6_addr *)&neigh->primary_key;
742 addr_type = ipv6_addr_type(addr6);
743
744 if (addr_type == IPV6_ADDR_ANY) {
745 addr6 = &ipv6_hdr(skb)->daddr;
746 addr_type = ipv6_addr_type(addr6);
747 }
748
749 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
750 goto tx_error_icmp;
751
752 dst = addr6->s6_addr32[3];
753 }
754#endif
755 else
756 goto tx_error;
757 }
758
759 tos = tiph->tos;
760 if (tos == 1) {
761 tos = 0;
762 if (skb->protocol == htons(ETH_P_IP))
763 tos = old_iph->tos;
764 else if (skb->protocol == htons(ETH_P_IPV6))
765 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
766 }
767
768 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
769 tunnel->parms.o_key, RT_TOS(tos),
770 tunnel->parms.link);
771 if (IS_ERR(rt)) {
772 dev->stats.tx_carrier_errors++;
773 goto tx_error;
774 }
775 tdev = rt->dst.dev;
776
777 if (tdev == dev) {
778 ip_rt_put(rt);
779 dev->stats.collisions++;
780 goto tx_error;
781 }
782
783 df = tiph->frag_off;
784 if (df)
785 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
786 else
787 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
788
789 if (skb_dst(skb))
790 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
791
792 if (skb->protocol == htons(ETH_P_IP)) {
793 df |= (old_iph->frag_off&htons(IP_DF));
794
795 if ((old_iph->frag_off&htons(IP_DF)) &&
796 mtu < ntohs(old_iph->tot_len)) {
797 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
798 ip_rt_put(rt);
799 goto tx_error;
800 }
801 }
802#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
803 else if (skb->protocol == htons(ETH_P_IPV6)) {
804 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
805
806 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
807 if ((tunnel->parms.iph.daddr &&
808 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
809 rt6->rt6i_dst.plen == 128) {
810 rt6->rt6i_flags |= RTF_MODIFIED;
811 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
812 }
813 }
814
815 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
816 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
817 ip_rt_put(rt);
818 goto tx_error;
819 }
820 }
821#endif
822
823 if (tunnel->err_count > 0) {
824 if (time_before(jiffies,
825 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
826 tunnel->err_count--;
827
828 dst_link_failure(skb);
829 } else
830 tunnel->err_count = 0;
831 }
832
833 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
834
835 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
836 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
837 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
838 if (max_headroom > dev->needed_headroom)
839 dev->needed_headroom = max_headroom;
840 if (!new_skb) {
841 ip_rt_put(rt);
842 dev->stats.tx_dropped++;
843 dev_kfree_skb(skb);
844 return NETDEV_TX_OK;
845 }
846 if (skb->sk)
847 skb_set_owner_w(new_skb, skb->sk);
848 dev_kfree_skb(skb);
849 skb = new_skb;
850 old_iph = ip_hdr(skb);
851 }
852
853 skb_reset_transport_header(skb);
854 skb_push(skb, gre_hlen);
855 skb_reset_network_header(skb);
856 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
857 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
858 IPSKB_REROUTED);
859 skb_dst_drop(skb);
860 skb_dst_set(skb, &rt->dst);
861
862 /*
863 * Push down and install the IPIP header.
864 */
865
866 iph = ip_hdr(skb);
867 iph->version = 4;
868 iph->ihl = sizeof(struct iphdr) >> 2;
869 iph->frag_off = df;
870 iph->protocol = IPPROTO_GRE;
871 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
872 iph->daddr = fl4.daddr;
873 iph->saddr = fl4.saddr;
874
875 if ((iph->ttl = tiph->ttl) == 0) {
876 if (skb->protocol == htons(ETH_P_IP))
877 iph->ttl = old_iph->ttl;
878#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
879 else if (skb->protocol == htons(ETH_P_IPV6))
880 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
881#endif
882 else
883 iph->ttl = ip4_dst_hoplimit(&rt->dst);
884 }
885
886 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
887 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
888 htons(ETH_P_TEB) : skb->protocol;
889
890 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
891 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
892
893 if (tunnel->parms.o_flags&GRE_SEQ) {
894 ++tunnel->o_seqno;
895 *ptr = htonl(tunnel->o_seqno);
896 ptr--;
897 }
898 if (tunnel->parms.o_flags&GRE_KEY) {
899 *ptr = tunnel->parms.o_key;
900 ptr--;
901 }
902 if (tunnel->parms.o_flags&GRE_CSUM) {
903 *ptr = 0;
904 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
905 }
906 }
907
908 nf_reset(skb);
909 tstats = this_cpu_ptr(dev->tstats);
910 __IPTUNNEL_XMIT(tstats, &dev->stats);
911 return NETDEV_TX_OK;
912
913tx_error_icmp:
914 dst_link_failure(skb);
915
916tx_error:
917 dev->stats.tx_errors++;
918 dev_kfree_skb(skb);
919 return NETDEV_TX_OK;
920}
921
922static int ipgre_tunnel_bind_dev(struct net_device *dev)
923{
924 struct net_device *tdev = NULL;
925 struct ip_tunnel *tunnel;
926 const struct iphdr *iph;
927 int hlen = LL_MAX_HEADER;
928 int mtu = ETH_DATA_LEN;
929 int addend = sizeof(struct iphdr) + 4;
930
931 tunnel = netdev_priv(dev);
932 iph = &tunnel->parms.iph;
933
934 /* Guess output device to choose reasonable mtu and needed_headroom */
935
936 if (iph->daddr) {
937 struct flowi4 fl4;
938 struct rtable *rt;
939
940 rt = ip_route_output_gre(dev_net(dev), &fl4,
941 iph->daddr, iph->saddr,
942 tunnel->parms.o_key,
943 RT_TOS(iph->tos),
944 tunnel->parms.link);
945 if (!IS_ERR(rt)) {
946 tdev = rt->dst.dev;
947 ip_rt_put(rt);
948 }
949
950 if (dev->type != ARPHRD_ETHER)
951 dev->flags |= IFF_POINTOPOINT;
952 }
953
954 if (!tdev && tunnel->parms.link)
955 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
956
957 if (tdev) {
958 hlen = tdev->hard_header_len + tdev->needed_headroom;
959 mtu = tdev->mtu;
960 }
961 dev->iflink = tunnel->parms.link;
962
963 /* Precalculate GRE options length */
964 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
965 if (tunnel->parms.o_flags&GRE_CSUM)
966 addend += 4;
967 if (tunnel->parms.o_flags&GRE_KEY)
968 addend += 4;
969 if (tunnel->parms.o_flags&GRE_SEQ)
970 addend += 4;
971 }
972 dev->needed_headroom = addend + hlen;
973 mtu -= dev->hard_header_len + addend;
974
975 if (mtu < 68)
976 mtu = 68;
977
978 tunnel->hlen = addend;
979
980 return mtu;
981}
982
983static int
984ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
985{
986 int err = 0;
987 struct ip_tunnel_parm p;
988 struct ip_tunnel *t;
989 struct net *net = dev_net(dev);
990 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
991
992 switch (cmd) {
993 case SIOCGETTUNNEL:
994 t = NULL;
995 if (dev == ign->fb_tunnel_dev) {
996 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
997 err = -EFAULT;
998 break;
999 }
1000 t = ipgre_tunnel_locate(net, &p, 0);
1001 }
1002 if (t == NULL)
1003 t = netdev_priv(dev);
1004 memcpy(&p, &t->parms, sizeof(p));
1005 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1006 err = -EFAULT;
1007 break;
1008
1009 case SIOCADDTUNNEL:
1010 case SIOCCHGTUNNEL:
1011 err = -EPERM;
1012 if (!capable(CAP_NET_ADMIN))
1013 goto done;
1014
1015 err = -EFAULT;
1016 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1017 goto done;
1018
1019 err = -EINVAL;
1020 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1021 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1022 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1023 goto done;
1024 if (p.iph.ttl)
1025 p.iph.frag_off |= htons(IP_DF);
1026
1027 if (!(p.i_flags&GRE_KEY))
1028 p.i_key = 0;
1029 if (!(p.o_flags&GRE_KEY))
1030 p.o_key = 0;
1031
1032 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1033
1034 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1035 if (t != NULL) {
1036 if (t->dev != dev) {
1037 err = -EEXIST;
1038 break;
1039 }
1040 } else {
1041 unsigned int nflags = 0;
1042
1043 t = netdev_priv(dev);
1044
1045 if (ipv4_is_multicast(p.iph.daddr))
1046 nflags = IFF_BROADCAST;
1047 else if (p.iph.daddr)
1048 nflags = IFF_POINTOPOINT;
1049
1050 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1051 err = -EINVAL;
1052 break;
1053 }
1054 ipgre_tunnel_unlink(ign, t);
1055 synchronize_net();
1056 t->parms.iph.saddr = p.iph.saddr;
1057 t->parms.iph.daddr = p.iph.daddr;
1058 t->parms.i_key = p.i_key;
1059 t->parms.o_key = p.o_key;
1060 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1061 memcpy(dev->broadcast, &p.iph.daddr, 4);
1062 ipgre_tunnel_link(ign, t);
1063 netdev_state_change(dev);
1064 }
1065 }
1066
1067 if (t) {
1068 err = 0;
1069 if (cmd == SIOCCHGTUNNEL) {
1070 t->parms.iph.ttl = p.iph.ttl;
1071 t->parms.iph.tos = p.iph.tos;
1072 t->parms.iph.frag_off = p.iph.frag_off;
1073 if (t->parms.link != p.link) {
1074 t->parms.link = p.link;
1075 dev->mtu = ipgre_tunnel_bind_dev(dev);
1076 netdev_state_change(dev);
1077 }
1078 }
1079 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1080 err = -EFAULT;
1081 } else
1082 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1083 break;
1084
1085 case SIOCDELTUNNEL:
1086 err = -EPERM;
1087 if (!capable(CAP_NET_ADMIN))
1088 goto done;
1089
1090 if (dev == ign->fb_tunnel_dev) {
1091 err = -EFAULT;
1092 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1093 goto done;
1094 err = -ENOENT;
1095 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1096 goto done;
1097 err = -EPERM;
1098 if (t == netdev_priv(ign->fb_tunnel_dev))
1099 goto done;
1100 dev = t->dev;
1101 }
1102 unregister_netdevice(dev);
1103 err = 0;
1104 break;
1105
1106 default:
1107 err = -EINVAL;
1108 }
1109
1110done:
1111 return err;
1112}
1113
1114static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1115{
1116 struct ip_tunnel *tunnel = netdev_priv(dev);
1117 if (new_mtu < 68 ||
1118 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1119 return -EINVAL;
1120 dev->mtu = new_mtu;
1121 return 0;
1122}
1123
1124/* Nice toy. Unfortunately, useless in real life :-)
1125 It allows to construct virtual multiprotocol broadcast "LAN"
1126 over the Internet, provided multicast routing is tuned.
1127
1128
1129 I have no idea was this bicycle invented before me,
1130 so that I had to set ARPHRD_IPGRE to a random value.
1131 I have an impression, that Cisco could make something similar,
1132 but this feature is apparently missing in IOS<=11.2(8).
1133
1134 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1135 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1136
1137 ping -t 255 224.66.66.66
1138
1139 If nobody answers, mbone does not work.
1140
1141 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1142 ip addr add 10.66.66.<somewhat>/24 dev Universe
1143 ifconfig Universe up
1144 ifconfig Universe add fe80::<Your_real_addr>/10
1145 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1146 ftp 10.66.66.66
1147 ...
1148 ftp fec0:6666:6666::193.233.7.65
1149 ...
1150
1151 */
1152
1153static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1154 unsigned short type,
1155 const void *daddr, const void *saddr, unsigned int len)
1156{
1157 struct ip_tunnel *t = netdev_priv(dev);
1158 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1159 __be16 *p = (__be16*)(iph+1);
1160
1161 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1162 p[0] = t->parms.o_flags;
1163 p[1] = htons(type);
1164
1165 /*
1166 * Set the source hardware address.
1167 */
1168
1169 if (saddr)
1170 memcpy(&iph->saddr, saddr, 4);
1171 if (daddr)
1172 memcpy(&iph->daddr, daddr, 4);
1173 if (iph->daddr)
1174 return t->hlen;
1175
1176 return -t->hlen;
1177}
1178
1179static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1180{
1181 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1182 memcpy(haddr, &iph->saddr, 4);
1183 return 4;
1184}
1185
1186static const struct header_ops ipgre_header_ops = {
1187 .create = ipgre_header,
1188 .parse = ipgre_header_parse,
1189};
1190
1191#ifdef CONFIG_NET_IPGRE_BROADCAST
1192static int ipgre_open(struct net_device *dev)
1193{
1194 struct ip_tunnel *t = netdev_priv(dev);
1195
1196 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1197 struct flowi4 fl4;
1198 struct rtable *rt;
1199
1200 rt = ip_route_output_gre(dev_net(dev), &fl4,
1201 t->parms.iph.daddr,
1202 t->parms.iph.saddr,
1203 t->parms.o_key,
1204 RT_TOS(t->parms.iph.tos),
1205 t->parms.link);
1206 if (IS_ERR(rt))
1207 return -EADDRNOTAVAIL;
1208 dev = rt->dst.dev;
1209 ip_rt_put(rt);
1210 if (__in_dev_get_rtnl(dev) == NULL)
1211 return -EADDRNOTAVAIL;
1212 t->mlink = dev->ifindex;
1213 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1214 }
1215 return 0;
1216}
1217
1218static int ipgre_close(struct net_device *dev)
1219{
1220 struct ip_tunnel *t = netdev_priv(dev);
1221
1222 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1223 struct in_device *in_dev;
1224 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1225 if (in_dev)
1226 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1227 }
1228 return 0;
1229}
1230
1231#endif
1232
1233static const struct net_device_ops ipgre_netdev_ops = {
1234 .ndo_init = ipgre_tunnel_init,
1235 .ndo_uninit = ipgre_tunnel_uninit,
1236#ifdef CONFIG_NET_IPGRE_BROADCAST
1237 .ndo_open = ipgre_open,
1238 .ndo_stop = ipgre_close,
1239#endif
1240 .ndo_start_xmit = ipgre_tunnel_xmit,
1241 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1242 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1243 .ndo_get_stats = ipgre_get_stats,
1244};
1245
1246static void ipgre_dev_free(struct net_device *dev)
1247{
1248 free_percpu(dev->tstats);
1249 free_netdev(dev);
1250}
1251
1252static void ipgre_tunnel_setup(struct net_device *dev)
1253{
1254 dev->netdev_ops = &ipgre_netdev_ops;
1255 dev->destructor = ipgre_dev_free;
1256
1257 dev->type = ARPHRD_IPGRE;
1258 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1259 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1260 dev->flags = IFF_NOARP;
1261 dev->iflink = 0;
1262 dev->addr_len = 4;
1263 dev->features |= NETIF_F_NETNS_LOCAL;
1264 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1265}
1266
1267static int ipgre_tunnel_init(struct net_device *dev)
1268{
1269 struct ip_tunnel *tunnel;
1270 struct iphdr *iph;
1271
1272 tunnel = netdev_priv(dev);
1273 iph = &tunnel->parms.iph;
1274
1275 tunnel->dev = dev;
1276 strcpy(tunnel->parms.name, dev->name);
1277
1278 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1279 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1280
1281 if (iph->daddr) {
1282#ifdef CONFIG_NET_IPGRE_BROADCAST
1283 if (ipv4_is_multicast(iph->daddr)) {
1284 if (!iph->saddr)
1285 return -EINVAL;
1286 dev->flags = IFF_BROADCAST;
1287 dev->header_ops = &ipgre_header_ops;
1288 }
1289#endif
1290 } else
1291 dev->header_ops = &ipgre_header_ops;
1292
1293 dev->tstats = alloc_percpu(struct pcpu_tstats);
1294 if (!dev->tstats)
1295 return -ENOMEM;
1296
1297 return 0;
1298}
1299
1300static void ipgre_fb_tunnel_init(struct net_device *dev)
1301{
1302 struct ip_tunnel *tunnel = netdev_priv(dev);
1303 struct iphdr *iph = &tunnel->parms.iph;
1304
1305 tunnel->dev = dev;
1306 strcpy(tunnel->parms.name, dev->name);
1307
1308 iph->version = 4;
1309 iph->protocol = IPPROTO_GRE;
1310 iph->ihl = 5;
1311 tunnel->hlen = sizeof(struct iphdr) + 4;
1312
1313 dev_hold(dev);
1314}
1315
1316
1317static const struct gre_protocol ipgre_protocol = {
1318 .handler = ipgre_rcv,
1319 .err_handler = ipgre_err,
1320};
1321
1322static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1323{
1324 int prio;
1325
1326 for (prio = 0; prio < 4; prio++) {
1327 int h;
1328 for (h = 0; h < HASH_SIZE; h++) {
1329 struct ip_tunnel *t;
1330
1331 t = rtnl_dereference(ign->tunnels[prio][h]);
1332
1333 while (t != NULL) {
1334 unregister_netdevice_queue(t->dev, head);
1335 t = rtnl_dereference(t->next);
1336 }
1337 }
1338 }
1339}
1340
1341static int __net_init ipgre_init_net(struct net *net)
1342{
1343 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1344 int err;
1345
1346 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1347 ipgre_tunnel_setup);
1348 if (!ign->fb_tunnel_dev) {
1349 err = -ENOMEM;
1350 goto err_alloc_dev;
1351 }
1352 dev_net_set(ign->fb_tunnel_dev, net);
1353
1354 ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1355 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1356
1357 if ((err = register_netdev(ign->fb_tunnel_dev)))
1358 goto err_reg_dev;
1359
1360 rcu_assign_pointer(ign->tunnels_wc[0],
1361 netdev_priv(ign->fb_tunnel_dev));
1362 return 0;
1363
1364err_reg_dev:
1365 ipgre_dev_free(ign->fb_tunnel_dev);
1366err_alloc_dev:
1367 return err;
1368}
1369
1370static void __net_exit ipgre_exit_net(struct net *net)
1371{
1372 struct ipgre_net *ign;
1373 LIST_HEAD(list);
1374
1375 ign = net_generic(net, ipgre_net_id);
1376 rtnl_lock();
1377 ipgre_destroy_tunnels(ign, &list);
1378 unregister_netdevice_many(&list);
1379 rtnl_unlock();
1380}
1381
1382static struct pernet_operations ipgre_net_ops = {
1383 .init = ipgre_init_net,
1384 .exit = ipgre_exit_net,
1385 .id = &ipgre_net_id,
1386 .size = sizeof(struct ipgre_net),
1387};
1388
1389static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1390{
1391 __be16 flags;
1392
1393 if (!data)
1394 return 0;
1395
1396 flags = 0;
1397 if (data[IFLA_GRE_IFLAGS])
1398 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1399 if (data[IFLA_GRE_OFLAGS])
1400 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1401 if (flags & (GRE_VERSION|GRE_ROUTING))
1402 return -EINVAL;
1403
1404 return 0;
1405}
1406
1407static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1408{
1409 __be32 daddr;
1410
1411 if (tb[IFLA_ADDRESS]) {
1412 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1413 return -EINVAL;
1414 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1415 return -EADDRNOTAVAIL;
1416 }
1417
1418 if (!data)
1419 goto out;
1420
1421 if (data[IFLA_GRE_REMOTE]) {
1422 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1423 if (!daddr)
1424 return -EINVAL;
1425 }
1426
1427out:
1428 return ipgre_tunnel_validate(tb, data);
1429}
1430
1431static void ipgre_netlink_parms(struct nlattr *data[],
1432 struct ip_tunnel_parm *parms)
1433{
1434 memset(parms, 0, sizeof(*parms));
1435
1436 parms->iph.protocol = IPPROTO_GRE;
1437
1438 if (!data)
1439 return;
1440
1441 if (data[IFLA_GRE_LINK])
1442 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1443
1444 if (data[IFLA_GRE_IFLAGS])
1445 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1446
1447 if (data[IFLA_GRE_OFLAGS])
1448 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1449
1450 if (data[IFLA_GRE_IKEY])
1451 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1452
1453 if (data[IFLA_GRE_OKEY])
1454 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1455
1456 if (data[IFLA_GRE_LOCAL])
1457 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1458
1459 if (data[IFLA_GRE_REMOTE])
1460 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1461
1462 if (data[IFLA_GRE_TTL])
1463 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1464
1465 if (data[IFLA_GRE_TOS])
1466 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1467
1468 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1469 parms->iph.frag_off = htons(IP_DF);
1470}
1471
1472static int ipgre_tap_init(struct net_device *dev)
1473{
1474 struct ip_tunnel *tunnel;
1475
1476 tunnel = netdev_priv(dev);
1477
1478 tunnel->dev = dev;
1479 strcpy(tunnel->parms.name, dev->name);
1480
1481 ipgre_tunnel_bind_dev(dev);
1482
1483 dev->tstats = alloc_percpu(struct pcpu_tstats);
1484 if (!dev->tstats)
1485 return -ENOMEM;
1486
1487 return 0;
1488}
1489
1490static const struct net_device_ops ipgre_tap_netdev_ops = {
1491 .ndo_init = ipgre_tap_init,
1492 .ndo_uninit = ipgre_tunnel_uninit,
1493 .ndo_start_xmit = ipgre_tunnel_xmit,
1494 .ndo_set_mac_address = eth_mac_addr,
1495 .ndo_validate_addr = eth_validate_addr,
1496 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1497 .ndo_get_stats = ipgre_get_stats,
1498};
1499
1500static void ipgre_tap_setup(struct net_device *dev)
1501{
1502
1503 ether_setup(dev);
1504
1505 dev->netdev_ops = &ipgre_tap_netdev_ops;
1506 dev->destructor = ipgre_dev_free;
1507
1508 dev->iflink = 0;
1509 dev->features |= NETIF_F_NETNS_LOCAL;
1510}
1511
1512static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1513 struct nlattr *data[])
1514{
1515 struct ip_tunnel *nt;
1516 struct net *net = dev_net(dev);
1517 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1518 int mtu;
1519 int err;
1520
1521 nt = netdev_priv(dev);
1522 ipgre_netlink_parms(data, &nt->parms);
1523
1524 if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1525 return -EEXIST;
1526
1527 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1528 random_ether_addr(dev->dev_addr);
1529
1530 mtu = ipgre_tunnel_bind_dev(dev);
1531 if (!tb[IFLA_MTU])
1532 dev->mtu = mtu;
1533
1534 /* Can use a lockless transmit, unless we generate output sequences */
1535 if (!(nt->parms.o_flags & GRE_SEQ))
1536 dev->features |= NETIF_F_LLTX;
1537
1538 err = register_netdevice(dev);
1539 if (err)
1540 goto out;
1541
1542 dev_hold(dev);
1543 ipgre_tunnel_link(ign, nt);
1544
1545out:
1546 return err;
1547}
1548
1549static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1550 struct nlattr *data[])
1551{
1552 struct ip_tunnel *t, *nt;
1553 struct net *net = dev_net(dev);
1554 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1555 struct ip_tunnel_parm p;
1556 int mtu;
1557
1558 if (dev == ign->fb_tunnel_dev)
1559 return -EINVAL;
1560
1561 nt = netdev_priv(dev);
1562 ipgre_netlink_parms(data, &p);
1563
1564 t = ipgre_tunnel_locate(net, &p, 0);
1565
1566 if (t) {
1567 if (t->dev != dev)
1568 return -EEXIST;
1569 } else {
1570 t = nt;
1571
1572 if (dev->type != ARPHRD_ETHER) {
1573 unsigned int nflags = 0;
1574
1575 if (ipv4_is_multicast(p.iph.daddr))
1576 nflags = IFF_BROADCAST;
1577 else if (p.iph.daddr)
1578 nflags = IFF_POINTOPOINT;
1579
1580 if ((dev->flags ^ nflags) &
1581 (IFF_POINTOPOINT | IFF_BROADCAST))
1582 return -EINVAL;
1583 }
1584
1585 ipgre_tunnel_unlink(ign, t);
1586 t->parms.iph.saddr = p.iph.saddr;
1587 t->parms.iph.daddr = p.iph.daddr;
1588 t->parms.i_key = p.i_key;
1589 if (dev->type != ARPHRD_ETHER) {
1590 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1591 memcpy(dev->broadcast, &p.iph.daddr, 4);
1592 }
1593 ipgre_tunnel_link(ign, t);
1594 netdev_state_change(dev);
1595 }
1596
1597 t->parms.o_key = p.o_key;
1598 t->parms.iph.ttl = p.iph.ttl;
1599 t->parms.iph.tos = p.iph.tos;
1600 t->parms.iph.frag_off = p.iph.frag_off;
1601
1602 if (t->parms.link != p.link) {
1603 t->parms.link = p.link;
1604 mtu = ipgre_tunnel_bind_dev(dev);
1605 if (!tb[IFLA_MTU])
1606 dev->mtu = mtu;
1607 netdev_state_change(dev);
1608 }
1609
1610 return 0;
1611}
1612
1613static size_t ipgre_get_size(const struct net_device *dev)
1614{
1615 return
1616 /* IFLA_GRE_LINK */
1617 nla_total_size(4) +
1618 /* IFLA_GRE_IFLAGS */
1619 nla_total_size(2) +
1620 /* IFLA_GRE_OFLAGS */
1621 nla_total_size(2) +
1622 /* IFLA_GRE_IKEY */
1623 nla_total_size(4) +
1624 /* IFLA_GRE_OKEY */
1625 nla_total_size(4) +
1626 /* IFLA_GRE_LOCAL */
1627 nla_total_size(4) +
1628 /* IFLA_GRE_REMOTE */
1629 nla_total_size(4) +
1630 /* IFLA_GRE_TTL */
1631 nla_total_size(1) +
1632 /* IFLA_GRE_TOS */
1633 nla_total_size(1) +
1634 /* IFLA_GRE_PMTUDISC */
1635 nla_total_size(1) +
1636 0;
1637}
1638
1639static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1640{
1641 struct ip_tunnel *t = netdev_priv(dev);
1642 struct ip_tunnel_parm *p = &t->parms;
1643
1644 NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
1645 NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
1646 NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
1647 NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
1648 NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
1649 NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
1650 NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
1651 NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
1652 NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
1653 NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
1654
1655 return 0;
1656
1657nla_put_failure:
1658 return -EMSGSIZE;
1659}
1660
1661static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1662 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1663 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1664 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1665 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1666 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1667 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1668 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1669 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1670 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1671 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1672};
1673
1674static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1675 .kind = "gre",
1676 .maxtype = IFLA_GRE_MAX,
1677 .policy = ipgre_policy,
1678 .priv_size = sizeof(struct ip_tunnel),
1679 .setup = ipgre_tunnel_setup,
1680 .validate = ipgre_tunnel_validate,
1681 .newlink = ipgre_newlink,
1682 .changelink = ipgre_changelink,
1683 .get_size = ipgre_get_size,
1684 .fill_info = ipgre_fill_info,
1685};
1686
1687static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1688 .kind = "gretap",
1689 .maxtype = IFLA_GRE_MAX,
1690 .policy = ipgre_policy,
1691 .priv_size = sizeof(struct ip_tunnel),
1692 .setup = ipgre_tap_setup,
1693 .validate = ipgre_tap_validate,
1694 .newlink = ipgre_newlink,
1695 .changelink = ipgre_changelink,
1696 .get_size = ipgre_get_size,
1697 .fill_info = ipgre_fill_info,
1698};
1699
1700/*
1701 * And now the modules code and kernel interface.
1702 */
1703
1704static int __init ipgre_init(void)
1705{
1706 int err;
1707
1708 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1709
1710 err = register_pernet_device(&ipgre_net_ops);
1711 if (err < 0)
1712 return err;
1713
1714 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1715 if (err < 0) {
1716 printk(KERN_INFO "ipgre init: can't add protocol\n");
1717 goto add_proto_failed;
1718 }
1719
1720 err = rtnl_link_register(&ipgre_link_ops);
1721 if (err < 0)
1722 goto rtnl_link_failed;
1723
1724 err = rtnl_link_register(&ipgre_tap_ops);
1725 if (err < 0)
1726 goto tap_ops_failed;
1727
1728out:
1729 return err;
1730
1731tap_ops_failed:
1732 rtnl_link_unregister(&ipgre_link_ops);
1733rtnl_link_failed:
1734 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1735add_proto_failed:
1736 unregister_pernet_device(&ipgre_net_ops);
1737 goto out;
1738}
1739
1740static void __exit ipgre_fini(void)
1741{
1742 rtnl_link_unregister(&ipgre_tap_ops);
1743 rtnl_link_unregister(&ipgre_link_ops);
1744 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1745 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1746 unregister_pernet_device(&ipgre_net_ops);
1747}
1748
1749module_init(ipgre_init);
1750module_exit(ipgre_fini);
1751MODULE_LICENSE("GPL");
1752MODULE_ALIAS_RTNL_LINK("gre");
1753MODULE_ALIAS_RTNL_LINK("gretap");
1754MODULE_ALIAS_NETDEV("gre0");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux NET3: GRE over IP protocol decoder.
4 *
5 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/capability.h>
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/uaccess.h>
16#include <linux/skbuff.h>
17#include <linux/netdevice.h>
18#include <linux/in.h>
19#include <linux/tcp.h>
20#include <linux/udp.h>
21#include <linux/if_arp.h>
22#include <linux/if_vlan.h>
23#include <linux/init.h>
24#include <linux/in6.h>
25#include <linux/inetdevice.h>
26#include <linux/igmp.h>
27#include <linux/netfilter_ipv4.h>
28#include <linux/etherdevice.h>
29#include <linux/if_ether.h>
30
31#include <net/sock.h>
32#include <net/ip.h>
33#include <net/icmp.h>
34#include <net/protocol.h>
35#include <net/ip_tunnels.h>
36#include <net/arp.h>
37#include <net/checksum.h>
38#include <net/dsfield.h>
39#include <net/inet_ecn.h>
40#include <net/xfrm.h>
41#include <net/net_namespace.h>
42#include <net/netns/generic.h>
43#include <net/rtnetlink.h>
44#include <net/gre.h>
45#include <net/dst_metadata.h>
46#include <net/erspan.h>
47#include <net/inet_dscp.h>
48
49/*
50 Problems & solutions
51 --------------------
52
53 1. The most important issue is detecting local dead loops.
54 They would cause complete host lockup in transmit, which
55 would be "resolved" by stack overflow or, if queueing is enabled,
56 with infinite looping in net_bh.
57
58 We cannot track such dead loops during route installation,
59 it is infeasible task. The most general solutions would be
60 to keep skb->encapsulation counter (sort of local ttl),
61 and silently drop packet when it expires. It is a good
62 solution, but it supposes maintaining new variable in ALL
63 skb, even if no tunneling is used.
64
65 Current solution: xmit_recursion breaks dead loops. This is a percpu
66 counter, since when we enter the first ndo_xmit(), cpu migration is
67 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
68
69 2. Networking dead loops would not kill routers, but would really
70 kill network. IP hop limit plays role of "t->recursion" in this case,
71 if we copy it from packet being encapsulated to upper header.
72 It is very good solution, but it introduces two problems:
73
74 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
75 do not work over tunnels.
76 - traceroute does not work. I planned to relay ICMP from tunnel,
77 so that this problem would be solved and traceroute output
78 would even more informative. This idea appeared to be wrong:
79 only Linux complies to rfc1812 now (yes, guys, Linux is the only
80 true router now :-)), all routers (at least, in neighbourhood of mine)
81 return only 8 bytes of payload. It is the end.
82
83 Hence, if we want that OSPF worked or traceroute said something reasonable,
84 we should search for another solution.
85
86 One of them is to parse packet trying to detect inner encapsulation
87 made by our node. It is difficult or even impossible, especially,
88 taking into account fragmentation. TO be short, ttl is not solution at all.
89
90 Current solution: The solution was UNEXPECTEDLY SIMPLE.
91 We force DF flag on tunnels with preconfigured hop limit,
92 that is ALL. :-) Well, it does not remove the problem completely,
93 but exponential growth of network traffic is changed to linear
94 (branches, that exceed pmtu are pruned) and tunnel mtu
95 rapidly degrades to value <68, where looping stops.
96 Yes, it is not good if there exists a router in the loop,
97 which does not force DF, even when encapsulating packets have DF set.
98 But it is not our problem! Nobody could accuse us, we made
99 all that we could make. Even if it is your gated who injected
100 fatal route to network, even if it were you who configured
101 fatal static route: you are innocent. :-)
102
103 Alexey Kuznetsov.
104 */
105
106static bool log_ecn_error = true;
107module_param(log_ecn_error, bool, 0644);
108MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
109
110static struct rtnl_link_ops ipgre_link_ops __read_mostly;
111static const struct header_ops ipgre_header_ops;
112
113static int ipgre_tunnel_init(struct net_device *dev);
114static void erspan_build_header(struct sk_buff *skb,
115 u32 id, u32 index,
116 bool truncate, bool is_ipv4);
117
118static unsigned int ipgre_net_id __read_mostly;
119static unsigned int gre_tap_net_id __read_mostly;
120static unsigned int erspan_net_id __read_mostly;
121
122static int ipgre_err(struct sk_buff *skb, u32 info,
123 const struct tnl_ptk_info *tpi)
124{
125
126 /* All the routers (except for Linux) return only
127 8 bytes of packet payload. It means, that precise relaying of
128 ICMP in the real Internet is absolutely infeasible.
129
130 Moreover, Cisco "wise men" put GRE key to the third word
131 in GRE header. It makes impossible maintaining even soft
132 state for keyed GRE tunnels with enabled checksum. Tell
133 them "thank you".
134
135 Well, I wonder, rfc1812 was written by Cisco employee,
136 what the hell these idiots break standards established
137 by themselves???
138 */
139 struct net *net = dev_net(skb->dev);
140 struct ip_tunnel_net *itn;
141 const struct iphdr *iph;
142 const int type = icmp_hdr(skb)->type;
143 const int code = icmp_hdr(skb)->code;
144 unsigned int data_len = 0;
145 struct ip_tunnel *t;
146
147 if (tpi->proto == htons(ETH_P_TEB))
148 itn = net_generic(net, gre_tap_net_id);
149 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
150 tpi->proto == htons(ETH_P_ERSPAN2))
151 itn = net_generic(net, erspan_net_id);
152 else
153 itn = net_generic(net, ipgre_net_id);
154
155 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
156 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
157 iph->daddr, iph->saddr, tpi->key);
158
159 if (!t)
160 return -ENOENT;
161
162 switch (type) {
163 default:
164 case ICMP_PARAMETERPROB:
165 return 0;
166
167 case ICMP_DEST_UNREACH:
168 switch (code) {
169 case ICMP_SR_FAILED:
170 case ICMP_PORT_UNREACH:
171 /* Impossible event. */
172 return 0;
173 default:
174 /* All others are translated to HOST_UNREACH.
175 rfc2003 contains "deep thoughts" about NET_UNREACH,
176 I believe they are just ether pollution. --ANK
177 */
178 break;
179 }
180 break;
181
182 case ICMP_TIME_EXCEEDED:
183 if (code != ICMP_EXC_TTL)
184 return 0;
185 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
186 break;
187
188 case ICMP_REDIRECT:
189 break;
190 }
191
192#if IS_ENABLED(CONFIG_IPV6)
193 if (tpi->proto == htons(ETH_P_IPV6) &&
194 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
195 type, data_len))
196 return 0;
197#endif
198
199 if (t->parms.iph.daddr == 0 ||
200 ipv4_is_multicast(t->parms.iph.daddr))
201 return 0;
202
203 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
204 return 0;
205
206 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
207 t->err_count++;
208 else
209 t->err_count = 1;
210 t->err_time = jiffies;
211
212 return 0;
213}
214
215static void gre_err(struct sk_buff *skb, u32 info)
216{
217 /* All the routers (except for Linux) return only
218 * 8 bytes of packet payload. It means, that precise relaying of
219 * ICMP in the real Internet is absolutely infeasible.
220 *
221 * Moreover, Cisco "wise men" put GRE key to the third word
222 * in GRE header. It makes impossible maintaining even soft
223 * state for keyed
224 * GRE tunnels with enabled checksum. Tell them "thank you".
225 *
226 * Well, I wonder, rfc1812 was written by Cisco employee,
227 * what the hell these idiots break standards established
228 * by themselves???
229 */
230
231 const struct iphdr *iph = (struct iphdr *)skb->data;
232 const int type = icmp_hdr(skb)->type;
233 const int code = icmp_hdr(skb)->code;
234 struct tnl_ptk_info tpi;
235
236 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
237 iph->ihl * 4) < 0)
238 return;
239
240 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
241 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
242 skb->dev->ifindex, IPPROTO_GRE);
243 return;
244 }
245 if (type == ICMP_REDIRECT) {
246 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
247 IPPROTO_GRE);
248 return;
249 }
250
251 ipgre_err(skb, info, &tpi);
252}
253
254static bool is_erspan_type1(int gre_hdr_len)
255{
256 /* Both ERSPAN type I (version 0) and type II (version 1) use
257 * protocol 0x88BE, but the type I has only 4-byte GRE header,
258 * while type II has 8-byte.
259 */
260 return gre_hdr_len == 4;
261}
262
263static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
264 int gre_hdr_len)
265{
266 struct net *net = dev_net(skb->dev);
267 struct metadata_dst *tun_dst = NULL;
268 struct erspan_base_hdr *ershdr;
269 IP_TUNNEL_DECLARE_FLAGS(flags);
270 struct ip_tunnel_net *itn;
271 struct ip_tunnel *tunnel;
272 const struct iphdr *iph;
273 struct erspan_md2 *md2;
274 int ver;
275 int len;
276
277 ip_tunnel_flags_copy(flags, tpi->flags);
278
279 itn = net_generic(net, erspan_net_id);
280 iph = ip_hdr(skb);
281 if (is_erspan_type1(gre_hdr_len)) {
282 ver = 0;
283 __set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
284 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
285 iph->saddr, iph->daddr, 0);
286 } else {
287 if (unlikely(!pskb_may_pull(skb,
288 gre_hdr_len + sizeof(*ershdr))))
289 return PACKET_REJECT;
290
291 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
292 ver = ershdr->ver;
293 iph = ip_hdr(skb);
294 __set_bit(IP_TUNNEL_KEY_BIT, flags);
295 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
296 iph->saddr, iph->daddr, tpi->key);
297 }
298
299 if (tunnel) {
300 if (is_erspan_type1(gre_hdr_len))
301 len = gre_hdr_len;
302 else
303 len = gre_hdr_len + erspan_hdr_len(ver);
304
305 if (unlikely(!pskb_may_pull(skb, len)))
306 return PACKET_REJECT;
307
308 if (__iptunnel_pull_header(skb,
309 len,
310 htons(ETH_P_TEB),
311 false, false) < 0)
312 goto drop;
313
314 if (tunnel->collect_md) {
315 struct erspan_metadata *pkt_md, *md;
316 struct ip_tunnel_info *info;
317 unsigned char *gh;
318 __be64 tun_id;
319
320 __set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
321 ip_tunnel_flags_copy(flags, tpi->flags);
322 tun_id = key32_to_tunnel_id(tpi->key);
323
324 tun_dst = ip_tun_rx_dst(skb, flags,
325 tun_id, sizeof(*md));
326 if (!tun_dst)
327 return PACKET_REJECT;
328
329 /* skb can be uncloned in __iptunnel_pull_header, so
330 * old pkt_md is no longer valid and we need to reset
331 * it
332 */
333 gh = skb_network_header(skb) +
334 skb_network_header_len(skb);
335 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
336 sizeof(*ershdr));
337 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
338 md->version = ver;
339 md2 = &md->u.md2;
340 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
341 ERSPAN_V2_MDSIZE);
342
343 info = &tun_dst->u.tun_info;
344 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
345 info->key.tun_flags);
346 info->options_len = sizeof(*md);
347 }
348
349 skb_reset_mac_header(skb);
350 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
351 return PACKET_RCVD;
352 }
353 return PACKET_REJECT;
354
355drop:
356 kfree_skb(skb);
357 return PACKET_RCVD;
358}
359
360static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
361 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
362{
363 struct metadata_dst *tun_dst = NULL;
364 const struct iphdr *iph;
365 struct ip_tunnel *tunnel;
366
367 iph = ip_hdr(skb);
368 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
369 iph->saddr, iph->daddr, tpi->key);
370
371 if (tunnel) {
372 const struct iphdr *tnl_params;
373
374 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
375 raw_proto, false) < 0)
376 goto drop;
377
378 /* Special case for ipgre_header_parse(), which expects the
379 * mac_header to point to the outer IP header.
380 */
381 if (tunnel->dev->header_ops == &ipgre_header_ops)
382 skb_pop_mac_header(skb);
383 else
384 skb_reset_mac_header(skb);
385
386 tnl_params = &tunnel->parms.iph;
387 if (tunnel->collect_md || tnl_params->daddr == 0) {
388 IP_TUNNEL_DECLARE_FLAGS(flags) = { };
389 __be64 tun_id;
390
391 __set_bit(IP_TUNNEL_CSUM_BIT, flags);
392 __set_bit(IP_TUNNEL_KEY_BIT, flags);
393 ip_tunnel_flags_and(flags, tpi->flags, flags);
394
395 tun_id = key32_to_tunnel_id(tpi->key);
396 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
397 if (!tun_dst)
398 return PACKET_REJECT;
399 }
400
401 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
402 return PACKET_RCVD;
403 }
404 return PACKET_NEXT;
405
406drop:
407 kfree_skb(skb);
408 return PACKET_RCVD;
409}
410
411static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
412 int hdr_len)
413{
414 struct net *net = dev_net(skb->dev);
415 struct ip_tunnel_net *itn;
416 int res;
417
418 if (tpi->proto == htons(ETH_P_TEB))
419 itn = net_generic(net, gre_tap_net_id);
420 else
421 itn = net_generic(net, ipgre_net_id);
422
423 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
424 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
425 /* ipgre tunnels in collect metadata mode should receive
426 * also ETH_P_TEB traffic.
427 */
428 itn = net_generic(net, ipgre_net_id);
429 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
430 }
431 return res;
432}
433
434static int gre_rcv(struct sk_buff *skb)
435{
436 struct tnl_ptk_info tpi;
437 bool csum_err = false;
438 int hdr_len;
439
440#ifdef CONFIG_NET_IPGRE_BROADCAST
441 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
442 /* Looped back packet, drop it! */
443 if (rt_is_output_route(skb_rtable(skb)))
444 goto drop;
445 }
446#endif
447
448 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
449 if (hdr_len < 0)
450 goto drop;
451
452 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
453 tpi.proto == htons(ETH_P_ERSPAN2))) {
454 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
455 return 0;
456 goto out;
457 }
458
459 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
460 return 0;
461
462out:
463 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
464drop:
465 kfree_skb(skb);
466 return 0;
467}
468
469static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
470 const struct iphdr *tnl_params,
471 __be16 proto)
472{
473 struct ip_tunnel *tunnel = netdev_priv(dev);
474 IP_TUNNEL_DECLARE_FLAGS(flags);
475
476 ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
477
478 /* Push GRE header. */
479 gre_build_header(skb, tunnel->tun_hlen,
480 flags, proto, tunnel->parms.o_key,
481 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
482 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
483
484 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
485}
486
487static int gre_handle_offloads(struct sk_buff *skb, bool csum)
488{
489 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
490}
491
492static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
493 __be16 proto)
494{
495 struct ip_tunnel *tunnel = netdev_priv(dev);
496 IP_TUNNEL_DECLARE_FLAGS(flags) = { };
497 struct ip_tunnel_info *tun_info;
498 const struct ip_tunnel_key *key;
499 int tunnel_hlen;
500
501 tun_info = skb_tunnel_info(skb);
502 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
503 ip_tunnel_info_af(tun_info) != AF_INET))
504 goto err_free_skb;
505
506 key = &tun_info->key;
507 tunnel_hlen = gre_calc_hlen(key->tun_flags);
508
509 if (skb_cow_head(skb, dev->needed_headroom))
510 goto err_free_skb;
511
512 /* Push Tunnel header. */
513 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
514 tunnel->parms.o_flags)))
515 goto err_free_skb;
516
517 __set_bit(IP_TUNNEL_CSUM_BIT, flags);
518 __set_bit(IP_TUNNEL_KEY_BIT, flags);
519 __set_bit(IP_TUNNEL_SEQ_BIT, flags);
520 ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
521
522 gre_build_header(skb, tunnel_hlen, flags, proto,
523 tunnel_id_to_key32(tun_info->key.tun_id),
524 test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
525 htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
526
527 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
528
529 return;
530
531err_free_skb:
532 kfree_skb(skb);
533 DEV_STATS_INC(dev, tx_dropped);
534}
535
536static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
537{
538 struct ip_tunnel *tunnel = netdev_priv(dev);
539 IP_TUNNEL_DECLARE_FLAGS(flags) = { };
540 struct ip_tunnel_info *tun_info;
541 const struct ip_tunnel_key *key;
542 struct erspan_metadata *md;
543 bool truncate = false;
544 __be16 proto;
545 int tunnel_hlen;
546 int version;
547 int nhoff;
548
549 tun_info = skb_tunnel_info(skb);
550 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
551 ip_tunnel_info_af(tun_info) != AF_INET))
552 goto err_free_skb;
553
554 key = &tun_info->key;
555 if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
556 goto err_free_skb;
557 if (tun_info->options_len < sizeof(*md))
558 goto err_free_skb;
559 md = ip_tunnel_info_opts(tun_info);
560
561 /* ERSPAN has fixed 8 byte GRE header */
562 version = md->version;
563 tunnel_hlen = 8 + erspan_hdr_len(version);
564
565 if (skb_cow_head(skb, dev->needed_headroom))
566 goto err_free_skb;
567
568 if (gre_handle_offloads(skb, false))
569 goto err_free_skb;
570
571 if (skb->len > dev->mtu + dev->hard_header_len) {
572 if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
573 goto err_free_skb;
574 truncate = true;
575 }
576
577 nhoff = skb_network_offset(skb);
578 if (skb->protocol == htons(ETH_P_IP) &&
579 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
580 truncate = true;
581
582 if (skb->protocol == htons(ETH_P_IPV6)) {
583 int thoff;
584
585 if (skb_transport_header_was_set(skb))
586 thoff = skb_transport_offset(skb);
587 else
588 thoff = nhoff + sizeof(struct ipv6hdr);
589 if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
590 truncate = true;
591 }
592
593 if (version == 1) {
594 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
595 ntohl(md->u.index), truncate, true);
596 proto = htons(ETH_P_ERSPAN);
597 } else if (version == 2) {
598 erspan_build_header_v2(skb,
599 ntohl(tunnel_id_to_key32(key->tun_id)),
600 md->u.md2.dir,
601 get_hwid(&md->u.md2),
602 truncate, true);
603 proto = htons(ETH_P_ERSPAN2);
604 } else {
605 goto err_free_skb;
606 }
607
608 __set_bit(IP_TUNNEL_SEQ_BIT, flags);
609 gre_build_header(skb, 8, flags, proto, 0,
610 htonl(atomic_fetch_inc(&tunnel->o_seqno)));
611
612 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
613
614 return;
615
616err_free_skb:
617 kfree_skb(skb);
618 DEV_STATS_INC(dev, tx_dropped);
619}
620
621static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
622{
623 struct ip_tunnel_info *info = skb_tunnel_info(skb);
624 const struct ip_tunnel_key *key;
625 struct rtable *rt;
626 struct flowi4 fl4;
627
628 if (ip_tunnel_info_af(info) != AF_INET)
629 return -EINVAL;
630
631 key = &info->key;
632 ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
633 tunnel_id_to_key32(key->tun_id),
634 key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
635 skb->mark, skb_get_hash(skb), key->flow_flags);
636 rt = ip_route_output_key(dev_net(dev), &fl4);
637 if (IS_ERR(rt))
638 return PTR_ERR(rt);
639
640 ip_rt_put(rt);
641 info->key.u.ipv4.src = fl4.saddr;
642 return 0;
643}
644
645static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
646 struct net_device *dev)
647{
648 struct ip_tunnel *tunnel = netdev_priv(dev);
649 const struct iphdr *tnl_params;
650
651 if (!pskb_inet_may_pull(skb))
652 goto free_skb;
653
654 if (tunnel->collect_md) {
655 gre_fb_xmit(skb, dev, skb->protocol);
656 return NETDEV_TX_OK;
657 }
658
659 if (dev->header_ops) {
660 int pull_len = tunnel->hlen + sizeof(struct iphdr);
661
662 if (skb_cow_head(skb, 0))
663 goto free_skb;
664
665 if (!pskb_may_pull(skb, pull_len))
666 goto free_skb;
667
668 tnl_params = (const struct iphdr *)skb->data;
669
670 /* ip_tunnel_xmit() needs skb->data pointing to gre header. */
671 skb_pull(skb, pull_len);
672 skb_reset_mac_header(skb);
673
674 if (skb->ip_summed == CHECKSUM_PARTIAL &&
675 skb_checksum_start(skb) < skb->data)
676 goto free_skb;
677 } else {
678 if (skb_cow_head(skb, dev->needed_headroom))
679 goto free_skb;
680
681 tnl_params = &tunnel->parms.iph;
682 }
683
684 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
685 tunnel->parms.o_flags)))
686 goto free_skb;
687
688 __gre_xmit(skb, dev, tnl_params, skb->protocol);
689 return NETDEV_TX_OK;
690
691free_skb:
692 kfree_skb(skb);
693 DEV_STATS_INC(dev, tx_dropped);
694 return NETDEV_TX_OK;
695}
696
697static netdev_tx_t erspan_xmit(struct sk_buff *skb,
698 struct net_device *dev)
699{
700 struct ip_tunnel *tunnel = netdev_priv(dev);
701 bool truncate = false;
702 __be16 proto;
703
704 if (!pskb_inet_may_pull(skb))
705 goto free_skb;
706
707 if (tunnel->collect_md) {
708 erspan_fb_xmit(skb, dev);
709 return NETDEV_TX_OK;
710 }
711
712 if (gre_handle_offloads(skb, false))
713 goto free_skb;
714
715 if (skb_cow_head(skb, dev->needed_headroom))
716 goto free_skb;
717
718 if (skb->len > dev->mtu + dev->hard_header_len) {
719 if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
720 goto free_skb;
721 truncate = true;
722 }
723
724 /* Push ERSPAN header */
725 if (tunnel->erspan_ver == 0) {
726 proto = htons(ETH_P_ERSPAN);
727 __clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
728 } else if (tunnel->erspan_ver == 1) {
729 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
730 tunnel->index,
731 truncate, true);
732 proto = htons(ETH_P_ERSPAN);
733 } else if (tunnel->erspan_ver == 2) {
734 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
735 tunnel->dir, tunnel->hwid,
736 truncate, true);
737 proto = htons(ETH_P_ERSPAN2);
738 } else {
739 goto free_skb;
740 }
741
742 __clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
743 __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
744 return NETDEV_TX_OK;
745
746free_skb:
747 kfree_skb(skb);
748 DEV_STATS_INC(dev, tx_dropped);
749 return NETDEV_TX_OK;
750}
751
752static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
753 struct net_device *dev)
754{
755 struct ip_tunnel *tunnel = netdev_priv(dev);
756
757 if (!pskb_inet_may_pull(skb))
758 goto free_skb;
759
760 if (tunnel->collect_md) {
761 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
762 return NETDEV_TX_OK;
763 }
764
765 if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
766 tunnel->parms.o_flags)))
767 goto free_skb;
768
769 if (skb_cow_head(skb, dev->needed_headroom))
770 goto free_skb;
771
772 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
773 return NETDEV_TX_OK;
774
775free_skb:
776 kfree_skb(skb);
777 DEV_STATS_INC(dev, tx_dropped);
778 return NETDEV_TX_OK;
779}
780
781static void ipgre_link_update(struct net_device *dev, bool set_mtu)
782{
783 struct ip_tunnel *tunnel = netdev_priv(dev);
784 int len;
785
786 len = tunnel->tun_hlen;
787 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
788 len = tunnel->tun_hlen - len;
789 tunnel->hlen = tunnel->hlen + len;
790
791 if (dev->header_ops)
792 dev->hard_header_len += len;
793 else
794 dev->needed_headroom += len;
795
796 if (set_mtu)
797 WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
798
799 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
800 (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
801 tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
802 dev->features &= ~NETIF_F_GSO_SOFTWARE;
803 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
804 } else {
805 dev->features |= NETIF_F_GSO_SOFTWARE;
806 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
807 }
808}
809
810static int ipgre_tunnel_ctl(struct net_device *dev,
811 struct ip_tunnel_parm_kern *p,
812 int cmd)
813{
814 __be16 i_flags, o_flags;
815 int err;
816
817 if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
818 !ip_tunnel_flags_is_be16_compat(p->o_flags))
819 return -EOVERFLOW;
820
821 i_flags = ip_tunnel_flags_to_be16(p->i_flags);
822 o_flags = ip_tunnel_flags_to_be16(p->o_flags);
823
824 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
825 if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
826 p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
827 ((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
828 return -EINVAL;
829 }
830
831 gre_flags_to_tnl_flags(p->i_flags, i_flags);
832 gre_flags_to_tnl_flags(p->o_flags, o_flags);
833
834 err = ip_tunnel_ctl(dev, p, cmd);
835 if (err)
836 return err;
837
838 if (cmd == SIOCCHGTUNNEL) {
839 struct ip_tunnel *t = netdev_priv(dev);
840
841 ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
842 ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
843
844 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
845 ipgre_link_update(dev, true);
846 }
847
848 i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
849 ip_tunnel_flags_from_be16(p->i_flags, i_flags);
850 o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
851 ip_tunnel_flags_from_be16(p->o_flags, o_flags);
852
853 return 0;
854}
855
856/* Nice toy. Unfortunately, useless in real life :-)
857 It allows to construct virtual multiprotocol broadcast "LAN"
858 over the Internet, provided multicast routing is tuned.
859
860
861 I have no idea was this bicycle invented before me,
862 so that I had to set ARPHRD_IPGRE to a random value.
863 I have an impression, that Cisco could make something similar,
864 but this feature is apparently missing in IOS<=11.2(8).
865
866 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
867 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
868
869 ping -t 255 224.66.66.66
870
871 If nobody answers, mbone does not work.
872
873 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
874 ip addr add 10.66.66.<somewhat>/24 dev Universe
875 ifconfig Universe up
876 ifconfig Universe add fe80::<Your_real_addr>/10
877 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
878 ftp 10.66.66.66
879 ...
880 ftp fec0:6666:6666::193.233.7.65
881 ...
882 */
883static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
884 unsigned short type,
885 const void *daddr, const void *saddr, unsigned int len)
886{
887 struct ip_tunnel *t = netdev_priv(dev);
888 struct iphdr *iph;
889 struct gre_base_hdr *greh;
890
891 iph = skb_push(skb, t->hlen + sizeof(*iph));
892 greh = (struct gre_base_hdr *)(iph+1);
893 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
894 greh->protocol = htons(type);
895
896 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
897
898 /* Set the source hardware address. */
899 if (saddr)
900 memcpy(&iph->saddr, saddr, 4);
901 if (daddr)
902 memcpy(&iph->daddr, daddr, 4);
903 if (iph->daddr)
904 return t->hlen + sizeof(*iph);
905
906 return -(t->hlen + sizeof(*iph));
907}
908
909static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
910{
911 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
912 memcpy(haddr, &iph->saddr, 4);
913 return 4;
914}
915
916static const struct header_ops ipgre_header_ops = {
917 .create = ipgre_header,
918 .parse = ipgre_header_parse,
919};
920
921#ifdef CONFIG_NET_IPGRE_BROADCAST
922static int ipgre_open(struct net_device *dev)
923{
924 struct ip_tunnel *t = netdev_priv(dev);
925
926 if (ipv4_is_multicast(t->parms.iph.daddr)) {
927 struct flowi4 fl4;
928 struct rtable *rt;
929
930 rt = ip_route_output_gre(t->net, &fl4,
931 t->parms.iph.daddr,
932 t->parms.iph.saddr,
933 t->parms.o_key,
934 t->parms.iph.tos & INET_DSCP_MASK,
935 t->parms.link);
936 if (IS_ERR(rt))
937 return -EADDRNOTAVAIL;
938 dev = rt->dst.dev;
939 ip_rt_put(rt);
940 if (!__in_dev_get_rtnl(dev))
941 return -EADDRNOTAVAIL;
942 t->mlink = dev->ifindex;
943 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
944 }
945 return 0;
946}
947
948static int ipgre_close(struct net_device *dev)
949{
950 struct ip_tunnel *t = netdev_priv(dev);
951
952 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
953 struct in_device *in_dev;
954 in_dev = inetdev_by_index(t->net, t->mlink);
955 if (in_dev)
956 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
957 }
958 return 0;
959}
960#endif
961
962static const struct net_device_ops ipgre_netdev_ops = {
963 .ndo_init = ipgre_tunnel_init,
964 .ndo_uninit = ip_tunnel_uninit,
965#ifdef CONFIG_NET_IPGRE_BROADCAST
966 .ndo_open = ipgre_open,
967 .ndo_stop = ipgre_close,
968#endif
969 .ndo_start_xmit = ipgre_xmit,
970 .ndo_siocdevprivate = ip_tunnel_siocdevprivate,
971 .ndo_change_mtu = ip_tunnel_change_mtu,
972 .ndo_get_stats64 = dev_get_tstats64,
973 .ndo_get_iflink = ip_tunnel_get_iflink,
974 .ndo_tunnel_ctl = ipgre_tunnel_ctl,
975};
976
977#define GRE_FEATURES (NETIF_F_SG | \
978 NETIF_F_FRAGLIST | \
979 NETIF_F_HIGHDMA | \
980 NETIF_F_HW_CSUM)
981
982static void ipgre_tunnel_setup(struct net_device *dev)
983{
984 dev->netdev_ops = &ipgre_netdev_ops;
985 dev->type = ARPHRD_IPGRE;
986 ip_tunnel_setup(dev, ipgre_net_id);
987}
988
989static void __gre_tunnel_init(struct net_device *dev)
990{
991 struct ip_tunnel *tunnel;
992
993 tunnel = netdev_priv(dev);
994 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
995 tunnel->parms.iph.protocol = IPPROTO_GRE;
996
997 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
998 dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
999
1000 dev->features |= GRE_FEATURES;
1001 dev->hw_features |= GRE_FEATURES;
1002
1003 /* TCP offload with GRE SEQ is not supported, nor can we support 2
1004 * levels of outer headers requiring an update.
1005 */
1006 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
1007 return;
1008 if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
1009 tunnel->encap.type != TUNNEL_ENCAP_NONE)
1010 return;
1011
1012 dev->features |= NETIF_F_GSO_SOFTWARE;
1013 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1014
1015 dev->lltx = true;
1016}
1017
1018static int ipgre_tunnel_init(struct net_device *dev)
1019{
1020 struct ip_tunnel *tunnel = netdev_priv(dev);
1021 struct iphdr *iph = &tunnel->parms.iph;
1022
1023 __gre_tunnel_init(dev);
1024
1025 __dev_addr_set(dev, &iph->saddr, 4);
1026 memcpy(dev->broadcast, &iph->daddr, 4);
1027
1028 dev->flags = IFF_NOARP;
1029 netif_keep_dst(dev);
1030 dev->addr_len = 4;
1031
1032 if (iph->daddr && !tunnel->collect_md) {
1033#ifdef CONFIG_NET_IPGRE_BROADCAST
1034 if (ipv4_is_multicast(iph->daddr)) {
1035 if (!iph->saddr)
1036 return -EINVAL;
1037 dev->flags = IFF_BROADCAST;
1038 dev->header_ops = &ipgre_header_ops;
1039 dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1040 dev->needed_headroom = 0;
1041 }
1042#endif
1043 } else if (!tunnel->collect_md) {
1044 dev->header_ops = &ipgre_header_ops;
1045 dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1046 dev->needed_headroom = 0;
1047 }
1048
1049 return ip_tunnel_init(dev);
1050}
1051
1052static const struct gre_protocol ipgre_protocol = {
1053 .handler = gre_rcv,
1054 .err_handler = gre_err,
1055};
1056
1057static int __net_init ipgre_init_net(struct net *net)
1058{
1059 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1060}
1061
1062static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
1063 struct list_head *dev_to_kill)
1064{
1065 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
1066 dev_to_kill);
1067}
1068
1069static struct pernet_operations ipgre_net_ops = {
1070 .init = ipgre_init_net,
1071 .exit_batch_rtnl = ipgre_exit_batch_rtnl,
1072 .id = &ipgre_net_id,
1073 .size = sizeof(struct ip_tunnel_net),
1074};
1075
1076static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1077 struct netlink_ext_ack *extack)
1078{
1079 __be16 flags;
1080
1081 if (!data)
1082 return 0;
1083
1084 flags = 0;
1085 if (data[IFLA_GRE_IFLAGS])
1086 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1087 if (data[IFLA_GRE_OFLAGS])
1088 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1089 if (flags & (GRE_VERSION|GRE_ROUTING))
1090 return -EINVAL;
1091
1092 if (data[IFLA_GRE_COLLECT_METADATA] &&
1093 data[IFLA_GRE_ENCAP_TYPE] &&
1094 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1095 return -EINVAL;
1096
1097 return 0;
1098}
1099
1100static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1101 struct netlink_ext_ack *extack)
1102{
1103 __be32 daddr;
1104
1105 if (tb[IFLA_ADDRESS]) {
1106 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1107 return -EINVAL;
1108 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1109 return -EADDRNOTAVAIL;
1110 }
1111
1112 if (!data)
1113 goto out;
1114
1115 if (data[IFLA_GRE_REMOTE]) {
1116 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1117 if (!daddr)
1118 return -EINVAL;
1119 }
1120
1121out:
1122 return ipgre_tunnel_validate(tb, data, extack);
1123}
1124
1125static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1126 struct netlink_ext_ack *extack)
1127{
1128 __be16 flags = 0;
1129 int ret;
1130
1131 if (!data)
1132 return 0;
1133
1134 ret = ipgre_tap_validate(tb, data, extack);
1135 if (ret)
1136 return ret;
1137
1138 if (data[IFLA_GRE_ERSPAN_VER] &&
1139 nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1140 return 0;
1141
1142 /* ERSPAN type II/III should only have GRE sequence and key flag */
1143 if (data[IFLA_GRE_OFLAGS])
1144 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1145 if (data[IFLA_GRE_IFLAGS])
1146 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1147 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1148 flags != (GRE_SEQ | GRE_KEY))
1149 return -EINVAL;
1150
1151 /* ERSPAN Session ID only has 10-bit. Since we reuse
1152 * 32-bit key field as ID, check it's range.
1153 */
1154 if (data[IFLA_GRE_IKEY] &&
1155 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1156 return -EINVAL;
1157
1158 if (data[IFLA_GRE_OKEY] &&
1159 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1160 return -EINVAL;
1161
1162 return 0;
1163}
1164
1165static int ipgre_netlink_parms(struct net_device *dev,
1166 struct nlattr *data[],
1167 struct nlattr *tb[],
1168 struct ip_tunnel_parm_kern *parms,
1169 __u32 *fwmark)
1170{
1171 struct ip_tunnel *t = netdev_priv(dev);
1172
1173 memset(parms, 0, sizeof(*parms));
1174
1175 parms->iph.protocol = IPPROTO_GRE;
1176
1177 if (!data)
1178 return 0;
1179
1180 if (data[IFLA_GRE_LINK])
1181 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1182
1183 if (data[IFLA_GRE_IFLAGS])
1184 gre_flags_to_tnl_flags(parms->i_flags,
1185 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1186
1187 if (data[IFLA_GRE_OFLAGS])
1188 gre_flags_to_tnl_flags(parms->o_flags,
1189 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1190
1191 if (data[IFLA_GRE_IKEY])
1192 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1193
1194 if (data[IFLA_GRE_OKEY])
1195 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1196
1197 if (data[IFLA_GRE_LOCAL])
1198 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1199
1200 if (data[IFLA_GRE_REMOTE])
1201 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1202
1203 if (data[IFLA_GRE_TTL])
1204 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1205
1206 if (data[IFLA_GRE_TOS])
1207 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1208
1209 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1210 if (t->ignore_df)
1211 return -EINVAL;
1212 parms->iph.frag_off = htons(IP_DF);
1213 }
1214
1215 if (data[IFLA_GRE_COLLECT_METADATA]) {
1216 t->collect_md = true;
1217 if (dev->type == ARPHRD_IPGRE)
1218 dev->type = ARPHRD_NONE;
1219 }
1220
1221 if (data[IFLA_GRE_IGNORE_DF]) {
1222 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1223 && (parms->iph.frag_off & htons(IP_DF)))
1224 return -EINVAL;
1225 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1226 }
1227
1228 if (data[IFLA_GRE_FWMARK])
1229 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1230
1231 return 0;
1232}
1233
1234static int erspan_netlink_parms(struct net_device *dev,
1235 struct nlattr *data[],
1236 struct nlattr *tb[],
1237 struct ip_tunnel_parm_kern *parms,
1238 __u32 *fwmark)
1239{
1240 struct ip_tunnel *t = netdev_priv(dev);
1241 int err;
1242
1243 err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1244 if (err)
1245 return err;
1246 if (!data)
1247 return 0;
1248
1249 if (data[IFLA_GRE_ERSPAN_VER]) {
1250 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1251
1252 if (t->erspan_ver > 2)
1253 return -EINVAL;
1254 }
1255
1256 if (t->erspan_ver == 1) {
1257 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1258 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1259 if (t->index & ~INDEX_MASK)
1260 return -EINVAL;
1261 }
1262 } else if (t->erspan_ver == 2) {
1263 if (data[IFLA_GRE_ERSPAN_DIR]) {
1264 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1265 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1266 return -EINVAL;
1267 }
1268 if (data[IFLA_GRE_ERSPAN_HWID]) {
1269 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1270 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1271 return -EINVAL;
1272 }
1273 }
1274
1275 return 0;
1276}
1277
1278/* This function returns true when ENCAP attributes are present in the nl msg */
1279static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1280 struct ip_tunnel_encap *ipencap)
1281{
1282 bool ret = false;
1283
1284 memset(ipencap, 0, sizeof(*ipencap));
1285
1286 if (!data)
1287 return ret;
1288
1289 if (data[IFLA_GRE_ENCAP_TYPE]) {
1290 ret = true;
1291 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1292 }
1293
1294 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1295 ret = true;
1296 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1297 }
1298
1299 if (data[IFLA_GRE_ENCAP_SPORT]) {
1300 ret = true;
1301 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1302 }
1303
1304 if (data[IFLA_GRE_ENCAP_DPORT]) {
1305 ret = true;
1306 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1307 }
1308
1309 return ret;
1310}
1311
1312static int gre_tap_init(struct net_device *dev)
1313{
1314 __gre_tunnel_init(dev);
1315 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1316 netif_keep_dst(dev);
1317
1318 return ip_tunnel_init(dev);
1319}
1320
1321static const struct net_device_ops gre_tap_netdev_ops = {
1322 .ndo_init = gre_tap_init,
1323 .ndo_uninit = ip_tunnel_uninit,
1324 .ndo_start_xmit = gre_tap_xmit,
1325 .ndo_set_mac_address = eth_mac_addr,
1326 .ndo_validate_addr = eth_validate_addr,
1327 .ndo_change_mtu = ip_tunnel_change_mtu,
1328 .ndo_get_stats64 = dev_get_tstats64,
1329 .ndo_get_iflink = ip_tunnel_get_iflink,
1330 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1331};
1332
1333static int erspan_tunnel_init(struct net_device *dev)
1334{
1335 struct ip_tunnel *tunnel = netdev_priv(dev);
1336
1337 if (tunnel->erspan_ver == 0)
1338 tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1339 else
1340 tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1341
1342 tunnel->parms.iph.protocol = IPPROTO_GRE;
1343 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1344 erspan_hdr_len(tunnel->erspan_ver);
1345
1346 dev->features |= GRE_FEATURES;
1347 dev->hw_features |= GRE_FEATURES;
1348 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1349 netif_keep_dst(dev);
1350
1351 return ip_tunnel_init(dev);
1352}
1353
1354static const struct net_device_ops erspan_netdev_ops = {
1355 .ndo_init = erspan_tunnel_init,
1356 .ndo_uninit = ip_tunnel_uninit,
1357 .ndo_start_xmit = erspan_xmit,
1358 .ndo_set_mac_address = eth_mac_addr,
1359 .ndo_validate_addr = eth_validate_addr,
1360 .ndo_change_mtu = ip_tunnel_change_mtu,
1361 .ndo_get_stats64 = dev_get_tstats64,
1362 .ndo_get_iflink = ip_tunnel_get_iflink,
1363 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1364};
1365
1366static void ipgre_tap_setup(struct net_device *dev)
1367{
1368 ether_setup(dev);
1369 dev->max_mtu = 0;
1370 dev->netdev_ops = &gre_tap_netdev_ops;
1371 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1372 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1373 ip_tunnel_setup(dev, gre_tap_net_id);
1374}
1375
1376static int
1377ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1378{
1379 struct ip_tunnel_encap ipencap;
1380
1381 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1382 struct ip_tunnel *t = netdev_priv(dev);
1383 int err = ip_tunnel_encap_setup(t, &ipencap);
1384
1385 if (err < 0)
1386 return err;
1387 }
1388
1389 return 0;
1390}
1391
1392static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1393 struct nlattr *tb[], struct nlattr *data[],
1394 struct netlink_ext_ack *extack)
1395{
1396 struct ip_tunnel_parm_kern p;
1397 __u32 fwmark = 0;
1398 int err;
1399
1400 err = ipgre_newlink_encap_setup(dev, data);
1401 if (err)
1402 return err;
1403
1404 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1405 if (err < 0)
1406 return err;
1407 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1408}
1409
1410static int erspan_newlink(struct net *src_net, struct net_device *dev,
1411 struct nlattr *tb[], struct nlattr *data[],
1412 struct netlink_ext_ack *extack)
1413{
1414 struct ip_tunnel_parm_kern p;
1415 __u32 fwmark = 0;
1416 int err;
1417
1418 err = ipgre_newlink_encap_setup(dev, data);
1419 if (err)
1420 return err;
1421
1422 err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1423 if (err)
1424 return err;
1425 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1426}
1427
1428static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1429 struct nlattr *data[],
1430 struct netlink_ext_ack *extack)
1431{
1432 struct ip_tunnel *t = netdev_priv(dev);
1433 struct ip_tunnel_parm_kern p;
1434 __u32 fwmark = t->fwmark;
1435 int err;
1436
1437 err = ipgre_newlink_encap_setup(dev, data);
1438 if (err)
1439 return err;
1440
1441 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1442 if (err < 0)
1443 return err;
1444
1445 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1446 if (err < 0)
1447 return err;
1448
1449 ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1450 ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1451
1452 ipgre_link_update(dev, !tb[IFLA_MTU]);
1453
1454 return 0;
1455}
1456
1457static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1458 struct nlattr *data[],
1459 struct netlink_ext_ack *extack)
1460{
1461 struct ip_tunnel *t = netdev_priv(dev);
1462 struct ip_tunnel_parm_kern p;
1463 __u32 fwmark = t->fwmark;
1464 int err;
1465
1466 err = ipgre_newlink_encap_setup(dev, data);
1467 if (err)
1468 return err;
1469
1470 err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1471 if (err < 0)
1472 return err;
1473
1474 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1475 if (err < 0)
1476 return err;
1477
1478 ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1479 ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1480
1481 return 0;
1482}
1483
1484static size_t ipgre_get_size(const struct net_device *dev)
1485{
1486 return
1487 /* IFLA_GRE_LINK */
1488 nla_total_size(4) +
1489 /* IFLA_GRE_IFLAGS */
1490 nla_total_size(2) +
1491 /* IFLA_GRE_OFLAGS */
1492 nla_total_size(2) +
1493 /* IFLA_GRE_IKEY */
1494 nla_total_size(4) +
1495 /* IFLA_GRE_OKEY */
1496 nla_total_size(4) +
1497 /* IFLA_GRE_LOCAL */
1498 nla_total_size(4) +
1499 /* IFLA_GRE_REMOTE */
1500 nla_total_size(4) +
1501 /* IFLA_GRE_TTL */
1502 nla_total_size(1) +
1503 /* IFLA_GRE_TOS */
1504 nla_total_size(1) +
1505 /* IFLA_GRE_PMTUDISC */
1506 nla_total_size(1) +
1507 /* IFLA_GRE_ENCAP_TYPE */
1508 nla_total_size(2) +
1509 /* IFLA_GRE_ENCAP_FLAGS */
1510 nla_total_size(2) +
1511 /* IFLA_GRE_ENCAP_SPORT */
1512 nla_total_size(2) +
1513 /* IFLA_GRE_ENCAP_DPORT */
1514 nla_total_size(2) +
1515 /* IFLA_GRE_COLLECT_METADATA */
1516 nla_total_size(0) +
1517 /* IFLA_GRE_IGNORE_DF */
1518 nla_total_size(1) +
1519 /* IFLA_GRE_FWMARK */
1520 nla_total_size(4) +
1521 /* IFLA_GRE_ERSPAN_INDEX */
1522 nla_total_size(4) +
1523 /* IFLA_GRE_ERSPAN_VER */
1524 nla_total_size(1) +
1525 /* IFLA_GRE_ERSPAN_DIR */
1526 nla_total_size(1) +
1527 /* IFLA_GRE_ERSPAN_HWID */
1528 nla_total_size(2) +
1529 0;
1530}
1531
1532static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1533{
1534 struct ip_tunnel *t = netdev_priv(dev);
1535 struct ip_tunnel_parm_kern *p = &t->parms;
1536 IP_TUNNEL_DECLARE_FLAGS(o_flags);
1537
1538 ip_tunnel_flags_copy(o_flags, p->o_flags);
1539
1540 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1541 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1542 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1543 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1544 gre_tnl_flags_to_gre_flags(o_flags)) ||
1545 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1546 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1547 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1548 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1549 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1550 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1551 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1552 !!(p->iph.frag_off & htons(IP_DF))) ||
1553 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1554 goto nla_put_failure;
1555
1556 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1557 t->encap.type) ||
1558 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1559 t->encap.sport) ||
1560 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1561 t->encap.dport) ||
1562 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1563 t->encap.flags))
1564 goto nla_put_failure;
1565
1566 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1567 goto nla_put_failure;
1568
1569 if (t->collect_md) {
1570 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1571 goto nla_put_failure;
1572 }
1573
1574 return 0;
1575
1576nla_put_failure:
1577 return -EMSGSIZE;
1578}
1579
1580static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1581{
1582 struct ip_tunnel *t = netdev_priv(dev);
1583
1584 if (t->erspan_ver <= 2) {
1585 if (t->erspan_ver != 0 && !t->collect_md)
1586 __set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
1587
1588 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1589 goto nla_put_failure;
1590
1591 if (t->erspan_ver == 1) {
1592 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1593 goto nla_put_failure;
1594 } else if (t->erspan_ver == 2) {
1595 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1596 goto nla_put_failure;
1597 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1598 goto nla_put_failure;
1599 }
1600 }
1601
1602 return ipgre_fill_info(skb, dev);
1603
1604nla_put_failure:
1605 return -EMSGSIZE;
1606}
1607
1608static void erspan_setup(struct net_device *dev)
1609{
1610 struct ip_tunnel *t = netdev_priv(dev);
1611
1612 ether_setup(dev);
1613 dev->max_mtu = 0;
1614 dev->netdev_ops = &erspan_netdev_ops;
1615 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1616 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1617 ip_tunnel_setup(dev, erspan_net_id);
1618 t->erspan_ver = 1;
1619}
1620
1621static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1622 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1623 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1624 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1625 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1626 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1627 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
1628 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
1629 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1630 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1631 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1632 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1633 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1634 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1635 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1636 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1637 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1638 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1639 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1640 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1641 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1642 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1643};
1644
1645static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1646 .kind = "gre",
1647 .maxtype = IFLA_GRE_MAX,
1648 .policy = ipgre_policy,
1649 .priv_size = sizeof(struct ip_tunnel),
1650 .setup = ipgre_tunnel_setup,
1651 .validate = ipgre_tunnel_validate,
1652 .newlink = ipgre_newlink,
1653 .changelink = ipgre_changelink,
1654 .dellink = ip_tunnel_dellink,
1655 .get_size = ipgre_get_size,
1656 .fill_info = ipgre_fill_info,
1657 .get_link_net = ip_tunnel_get_link_net,
1658};
1659
1660static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1661 .kind = "gretap",
1662 .maxtype = IFLA_GRE_MAX,
1663 .policy = ipgre_policy,
1664 .priv_size = sizeof(struct ip_tunnel),
1665 .setup = ipgre_tap_setup,
1666 .validate = ipgre_tap_validate,
1667 .newlink = ipgre_newlink,
1668 .changelink = ipgre_changelink,
1669 .dellink = ip_tunnel_dellink,
1670 .get_size = ipgre_get_size,
1671 .fill_info = ipgre_fill_info,
1672 .get_link_net = ip_tunnel_get_link_net,
1673};
1674
1675static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1676 .kind = "erspan",
1677 .maxtype = IFLA_GRE_MAX,
1678 .policy = ipgre_policy,
1679 .priv_size = sizeof(struct ip_tunnel),
1680 .setup = erspan_setup,
1681 .validate = erspan_validate,
1682 .newlink = erspan_newlink,
1683 .changelink = erspan_changelink,
1684 .dellink = ip_tunnel_dellink,
1685 .get_size = ipgre_get_size,
1686 .fill_info = erspan_fill_info,
1687 .get_link_net = ip_tunnel_get_link_net,
1688};
1689
1690struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1691 u8 name_assign_type)
1692{
1693 struct nlattr *tb[IFLA_MAX + 1];
1694 struct net_device *dev;
1695 LIST_HEAD(list_kill);
1696 struct ip_tunnel *t;
1697 int err;
1698
1699 memset(&tb, 0, sizeof(tb));
1700
1701 dev = rtnl_create_link(net, name, name_assign_type,
1702 &ipgre_tap_ops, tb, NULL);
1703 if (IS_ERR(dev))
1704 return dev;
1705
1706 /* Configure flow based GRE device. */
1707 t = netdev_priv(dev);
1708 t->collect_md = true;
1709
1710 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1711 if (err < 0) {
1712 free_netdev(dev);
1713 return ERR_PTR(err);
1714 }
1715
1716 /* openvswitch users expect packet sizes to be unrestricted,
1717 * so set the largest MTU we can.
1718 */
1719 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1720 if (err)
1721 goto out;
1722
1723 err = rtnl_configure_link(dev, NULL, 0, NULL);
1724 if (err < 0)
1725 goto out;
1726
1727 return dev;
1728out:
1729 ip_tunnel_dellink(dev, &list_kill);
1730 unregister_netdevice_many(&list_kill);
1731 return ERR_PTR(err);
1732}
1733EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1734
1735static int __net_init ipgre_tap_init_net(struct net *net)
1736{
1737 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1738}
1739
1740static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
1741 struct list_head *dev_to_kill)
1742{
1743 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
1744 dev_to_kill);
1745}
1746
1747static struct pernet_operations ipgre_tap_net_ops = {
1748 .init = ipgre_tap_init_net,
1749 .exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
1750 .id = &gre_tap_net_id,
1751 .size = sizeof(struct ip_tunnel_net),
1752};
1753
1754static int __net_init erspan_init_net(struct net *net)
1755{
1756 return ip_tunnel_init_net(net, erspan_net_id,
1757 &erspan_link_ops, "erspan0");
1758}
1759
1760static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
1761 struct list_head *dev_to_kill)
1762{
1763 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
1764 dev_to_kill);
1765}
1766
1767static struct pernet_operations erspan_net_ops = {
1768 .init = erspan_init_net,
1769 .exit_batch_rtnl = erspan_exit_batch_rtnl,
1770 .id = &erspan_net_id,
1771 .size = sizeof(struct ip_tunnel_net),
1772};
1773
1774static int __init ipgre_init(void)
1775{
1776 int err;
1777
1778 pr_info("GRE over IPv4 tunneling driver\n");
1779
1780 err = register_pernet_device(&ipgre_net_ops);
1781 if (err < 0)
1782 return err;
1783
1784 err = register_pernet_device(&ipgre_tap_net_ops);
1785 if (err < 0)
1786 goto pnet_tap_failed;
1787
1788 err = register_pernet_device(&erspan_net_ops);
1789 if (err < 0)
1790 goto pnet_erspan_failed;
1791
1792 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1793 if (err < 0) {
1794 pr_info("%s: can't add protocol\n", __func__);
1795 goto add_proto_failed;
1796 }
1797
1798 err = rtnl_link_register(&ipgre_link_ops);
1799 if (err < 0)
1800 goto rtnl_link_failed;
1801
1802 err = rtnl_link_register(&ipgre_tap_ops);
1803 if (err < 0)
1804 goto tap_ops_failed;
1805
1806 err = rtnl_link_register(&erspan_link_ops);
1807 if (err < 0)
1808 goto erspan_link_failed;
1809
1810 return 0;
1811
1812erspan_link_failed:
1813 rtnl_link_unregister(&ipgre_tap_ops);
1814tap_ops_failed:
1815 rtnl_link_unregister(&ipgre_link_ops);
1816rtnl_link_failed:
1817 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1818add_proto_failed:
1819 unregister_pernet_device(&erspan_net_ops);
1820pnet_erspan_failed:
1821 unregister_pernet_device(&ipgre_tap_net_ops);
1822pnet_tap_failed:
1823 unregister_pernet_device(&ipgre_net_ops);
1824 return err;
1825}
1826
1827static void __exit ipgre_fini(void)
1828{
1829 rtnl_link_unregister(&ipgre_tap_ops);
1830 rtnl_link_unregister(&ipgre_link_ops);
1831 rtnl_link_unregister(&erspan_link_ops);
1832 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1833 unregister_pernet_device(&ipgre_tap_net_ops);
1834 unregister_pernet_device(&ipgre_net_ops);
1835 unregister_pernet_device(&erspan_net_ops);
1836}
1837
1838module_init(ipgre_init);
1839module_exit(ipgre_fini);
1840MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1841MODULE_LICENSE("GPL");
1842MODULE_ALIAS_RTNL_LINK("gre");
1843MODULE_ALIAS_RTNL_LINK("gretap");
1844MODULE_ALIAS_RTNL_LINK("erspan");
1845MODULE_ALIAS_NETDEV("gre0");
1846MODULE_ALIAS_NETDEV("gretap0");
1847MODULE_ALIAS_NETDEV("erspan0");