Loading...
1/*
2 * VXLAN: Virtual eXtensible Local Area Network
3 *
4 * Copyright (c) 2012-2013 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/slab.h>
17#include <linux/udp.h>
18#include <linux/igmp.h>
19#include <linux/if_ether.h>
20#include <linux/ethtool.h>
21#include <net/arp.h>
22#include <net/ndisc.h>
23#include <net/ip.h>
24#include <net/icmp.h>
25#include <net/rtnetlink.h>
26#include <net/inet_ecn.h>
27#include <net/net_namespace.h>
28#include <net/netns/generic.h>
29#include <net/tun_proto.h>
30#include <net/vxlan.h>
31
32#if IS_ENABLED(CONFIG_IPV6)
33#include <net/ip6_tunnel.h>
34#include <net/ip6_checksum.h>
35#endif
36
37#define VXLAN_VERSION "0.1"
38
39#define PORT_HASH_BITS 8
40#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
41#define FDB_AGE_DEFAULT 300 /* 5 min */
42#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
43
44/* UDP port for VXLAN traffic.
45 * The IANA assigned port is 4789, but the Linux default is 8472
46 * for compatibility with early adopters.
47 */
48static unsigned short vxlan_port __read_mostly = 8472;
49module_param_named(udp_port, vxlan_port, ushort, 0444);
50MODULE_PARM_DESC(udp_port, "Destination UDP port");
51
52static bool log_ecn_error = true;
53module_param(log_ecn_error, bool, 0644);
54MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
55
56static unsigned int vxlan_net_id;
57static struct rtnl_link_ops vxlan_link_ops;
58
59static const u8 all_zeros_mac[ETH_ALEN + 2];
60
61static int vxlan_sock_add(struct vxlan_dev *vxlan);
62
63static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
64
65/* per-network namespace private data for this module */
66struct vxlan_net {
67 struct list_head vxlan_list;
68 struct hlist_head sock_list[PORT_HASH_SIZE];
69 spinlock_t sock_lock;
70};
71
72/* Forwarding table entry */
73struct vxlan_fdb {
74 struct hlist_node hlist; /* linked list of entries */
75 struct rcu_head rcu;
76 unsigned long updated; /* jiffies */
77 unsigned long used;
78 struct list_head remotes;
79 u8 eth_addr[ETH_ALEN];
80 u16 state; /* see ndm_state */
81 __be32 vni;
82 u8 flags; /* see ndm_flags */
83};
84
85/* salt for hash table */
86static u32 vxlan_salt __read_mostly;
87
88static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
89{
90 return vs->flags & VXLAN_F_COLLECT_METADATA ||
91 ip_tunnel_collect_metadata();
92}
93
94#if IS_ENABLED(CONFIG_IPV6)
95static inline
96bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
97{
98 if (a->sa.sa_family != b->sa.sa_family)
99 return false;
100 if (a->sa.sa_family == AF_INET6)
101 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
102 else
103 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
104}
105
106static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
107{
108 if (ipa->sa.sa_family == AF_INET6)
109 return ipv6_addr_any(&ipa->sin6.sin6_addr);
110 else
111 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
112}
113
114static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
115{
116 if (ipa->sa.sa_family == AF_INET6)
117 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
118 else
119 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
120}
121
122static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
123{
124 if (nla_len(nla) >= sizeof(struct in6_addr)) {
125 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
126 ip->sa.sa_family = AF_INET6;
127 return 0;
128 } else if (nla_len(nla) >= sizeof(__be32)) {
129 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
130 ip->sa.sa_family = AF_INET;
131 return 0;
132 } else {
133 return -EAFNOSUPPORT;
134 }
135}
136
137static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
138 const union vxlan_addr *ip)
139{
140 if (ip->sa.sa_family == AF_INET6)
141 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
142 else
143 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
144}
145
146#else /* !CONFIG_IPV6 */
147
148static inline
149bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
150{
151 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
152}
153
154static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
155{
156 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
157}
158
159static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
160{
161 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
162}
163
164static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
165{
166 if (nla_len(nla) >= sizeof(struct in6_addr)) {
167 return -EAFNOSUPPORT;
168 } else if (nla_len(nla) >= sizeof(__be32)) {
169 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
170 ip->sa.sa_family = AF_INET;
171 return 0;
172 } else {
173 return -EAFNOSUPPORT;
174 }
175}
176
177static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
178 const union vxlan_addr *ip)
179{
180 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
181}
182#endif
183
184/* Virtual Network hash table head */
185static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
186{
187 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
188}
189
190/* Socket hash table head */
191static inline struct hlist_head *vs_head(struct net *net, __be16 port)
192{
193 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
194
195 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
196}
197
198/* First remote destination for a forwarding entry.
199 * Guaranteed to be non-NULL because remotes are never deleted.
200 */
201static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
202{
203 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
204}
205
206static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
207{
208 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
209}
210
211/* Find VXLAN socket based on network namespace, address family and UDP port
212 * and enabled unshareable flags.
213 */
214static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
215 __be16 port, u32 flags)
216{
217 struct vxlan_sock *vs;
218
219 flags &= VXLAN_F_RCV_FLAGS;
220
221 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
222 if (inet_sk(vs->sock->sk)->inet_sport == port &&
223 vxlan_get_sk_family(vs) == family &&
224 vs->flags == flags)
225 return vs;
226 }
227 return NULL;
228}
229
230static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
231 __be32 vni)
232{
233 struct vxlan_dev_node *node;
234
235 /* For flow based devices, map all packets to VNI 0 */
236 if (vs->flags & VXLAN_F_COLLECT_METADATA)
237 vni = 0;
238
239 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
240 if (node->vxlan->default_dst.remote_vni != vni)
241 continue;
242
243 if (IS_ENABLED(CONFIG_IPV6)) {
244 const struct vxlan_config *cfg = &node->vxlan->cfg;
245
246 if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
247 cfg->remote_ifindex != ifindex)
248 continue;
249 }
250
251 return node->vxlan;
252 }
253
254 return NULL;
255}
256
257/* Look up VNI in a per net namespace table */
258static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
259 __be32 vni, sa_family_t family,
260 __be16 port, u32 flags)
261{
262 struct vxlan_sock *vs;
263
264 vs = vxlan_find_sock(net, family, port, flags);
265 if (!vs)
266 return NULL;
267
268 return vxlan_vs_find_vni(vs, ifindex, vni);
269}
270
271/* Fill in neighbour message in skbuff. */
272static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
273 const struct vxlan_fdb *fdb,
274 u32 portid, u32 seq, int type, unsigned int flags,
275 const struct vxlan_rdst *rdst)
276{
277 unsigned long now = jiffies;
278 struct nda_cacheinfo ci;
279 struct nlmsghdr *nlh;
280 struct ndmsg *ndm;
281 bool send_ip, send_eth;
282
283 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
284 if (nlh == NULL)
285 return -EMSGSIZE;
286
287 ndm = nlmsg_data(nlh);
288 memset(ndm, 0, sizeof(*ndm));
289
290 send_eth = send_ip = true;
291
292 if (type == RTM_GETNEIGH) {
293 send_ip = !vxlan_addr_any(&rdst->remote_ip);
294 send_eth = !is_zero_ether_addr(fdb->eth_addr);
295 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
296 } else
297 ndm->ndm_family = AF_BRIDGE;
298 ndm->ndm_state = fdb->state;
299 ndm->ndm_ifindex = vxlan->dev->ifindex;
300 ndm->ndm_flags = fdb->flags;
301 ndm->ndm_type = RTN_UNICAST;
302
303 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
304 nla_put_s32(skb, NDA_LINK_NETNSID,
305 peernet2id(dev_net(vxlan->dev), vxlan->net)))
306 goto nla_put_failure;
307
308 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
309 goto nla_put_failure;
310
311 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
312 goto nla_put_failure;
313
314 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
315 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
316 goto nla_put_failure;
317 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
318 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
319 goto nla_put_failure;
320 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
321 nla_put_u32(skb, NDA_SRC_VNI,
322 be32_to_cpu(fdb->vni)))
323 goto nla_put_failure;
324 if (rdst->remote_ifindex &&
325 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
326 goto nla_put_failure;
327
328 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
329 ci.ndm_confirmed = 0;
330 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
331 ci.ndm_refcnt = 0;
332
333 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
334 goto nla_put_failure;
335
336 nlmsg_end(skb, nlh);
337 return 0;
338
339nla_put_failure:
340 nlmsg_cancel(skb, nlh);
341 return -EMSGSIZE;
342}
343
344static inline size_t vxlan_nlmsg_size(void)
345{
346 return NLMSG_ALIGN(sizeof(struct ndmsg))
347 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
348 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
349 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
350 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
351 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
352 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
353 + nla_total_size(sizeof(struct nda_cacheinfo));
354}
355
356static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
357 struct vxlan_rdst *rd, int type)
358{
359 struct net *net = dev_net(vxlan->dev);
360 struct sk_buff *skb;
361 int err = -ENOBUFS;
362
363 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
364 if (skb == NULL)
365 goto errout;
366
367 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
368 if (err < 0) {
369 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
370 WARN_ON(err == -EMSGSIZE);
371 kfree_skb(skb);
372 goto errout;
373 }
374
375 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
376 return;
377errout:
378 if (err < 0)
379 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
380}
381
382static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
383{
384 struct vxlan_dev *vxlan = netdev_priv(dev);
385 struct vxlan_fdb f = {
386 .state = NUD_STALE,
387 };
388 struct vxlan_rdst remote = {
389 .remote_ip = *ipa, /* goes to NDA_DST */
390 .remote_vni = cpu_to_be32(VXLAN_N_VID),
391 };
392
393 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
394}
395
396static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
397{
398 struct vxlan_fdb f = {
399 .state = NUD_STALE,
400 };
401 struct vxlan_rdst remote = { };
402
403 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
404
405 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
406}
407
408/* Hash Ethernet address */
409static u32 eth_hash(const unsigned char *addr)
410{
411 u64 value = get_unaligned((u64 *)addr);
412
413 /* only want 6 bytes */
414#ifdef __BIG_ENDIAN
415 value >>= 16;
416#else
417 value <<= 16;
418#endif
419 return hash_64(value, FDB_HASH_BITS);
420}
421
422static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
423{
424 /* use 1 byte of OUI and 3 bytes of NIC */
425 u32 key = get_unaligned((u32 *)(addr + 2));
426
427 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
428}
429
430/* Hash chain to use given mac address */
431static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
432 const u8 *mac, __be32 vni)
433{
434 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
435 return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
436 else
437 return &vxlan->fdb_head[eth_hash(mac)];
438}
439
440/* Look up Ethernet address in forwarding table */
441static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
442 const u8 *mac, __be32 vni)
443{
444 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
445 struct vxlan_fdb *f;
446
447 hlist_for_each_entry_rcu(f, head, hlist) {
448 if (ether_addr_equal(mac, f->eth_addr)) {
449 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
450 if (vni == f->vni)
451 return f;
452 } else {
453 return f;
454 }
455 }
456 }
457
458 return NULL;
459}
460
461static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
462 const u8 *mac, __be32 vni)
463{
464 struct vxlan_fdb *f;
465
466 f = __vxlan_find_mac(vxlan, mac, vni);
467 if (f)
468 f->used = jiffies;
469
470 return f;
471}
472
473/* caller should hold vxlan->hash_lock */
474static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
475 union vxlan_addr *ip, __be16 port,
476 __be32 vni, __u32 ifindex)
477{
478 struct vxlan_rdst *rd;
479
480 list_for_each_entry(rd, &f->remotes, list) {
481 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
482 rd->remote_port == port &&
483 rd->remote_vni == vni &&
484 rd->remote_ifindex == ifindex)
485 return rd;
486 }
487
488 return NULL;
489}
490
491/* Replace destination of unicast mac */
492static int vxlan_fdb_replace(struct vxlan_fdb *f,
493 union vxlan_addr *ip, __be16 port, __be32 vni,
494 __u32 ifindex)
495{
496 struct vxlan_rdst *rd;
497
498 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
499 if (rd)
500 return 0;
501
502 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
503 if (!rd)
504 return 0;
505
506 dst_cache_reset(&rd->dst_cache);
507 rd->remote_ip = *ip;
508 rd->remote_port = port;
509 rd->remote_vni = vni;
510 rd->remote_ifindex = ifindex;
511 return 1;
512}
513
514/* Add/update destinations for multicast */
515static int vxlan_fdb_append(struct vxlan_fdb *f,
516 union vxlan_addr *ip, __be16 port, __be32 vni,
517 __u32 ifindex, struct vxlan_rdst **rdp)
518{
519 struct vxlan_rdst *rd;
520
521 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
522 if (rd)
523 return 0;
524
525 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
526 if (rd == NULL)
527 return -ENOBUFS;
528
529 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
530 kfree(rd);
531 return -ENOBUFS;
532 }
533
534 rd->remote_ip = *ip;
535 rd->remote_port = port;
536 rd->remote_vni = vni;
537 rd->remote_ifindex = ifindex;
538
539 list_add_tail_rcu(&rd->list, &f->remotes);
540
541 *rdp = rd;
542 return 1;
543}
544
545static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
546 unsigned int off,
547 struct vxlanhdr *vh, size_t hdrlen,
548 __be32 vni_field,
549 struct gro_remcsum *grc,
550 bool nopartial)
551{
552 size_t start, offset;
553
554 if (skb->remcsum_offload)
555 return vh;
556
557 if (!NAPI_GRO_CB(skb)->csum_valid)
558 return NULL;
559
560 start = vxlan_rco_start(vni_field);
561 offset = start + vxlan_rco_offset(vni_field);
562
563 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
564 start, offset, grc, nopartial);
565
566 skb->remcsum_offload = 1;
567
568 return vh;
569}
570
571static struct sk_buff **vxlan_gro_receive(struct sock *sk,
572 struct sk_buff **head,
573 struct sk_buff *skb)
574{
575 struct sk_buff *p, **pp = NULL;
576 struct vxlanhdr *vh, *vh2;
577 unsigned int hlen, off_vx;
578 int flush = 1;
579 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
580 __be32 flags;
581 struct gro_remcsum grc;
582
583 skb_gro_remcsum_init(&grc);
584
585 off_vx = skb_gro_offset(skb);
586 hlen = off_vx + sizeof(*vh);
587 vh = skb_gro_header_fast(skb, off_vx);
588 if (skb_gro_header_hard(skb, hlen)) {
589 vh = skb_gro_header_slow(skb, hlen, off_vx);
590 if (unlikely(!vh))
591 goto out;
592 }
593
594 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
595
596 flags = vh->vx_flags;
597
598 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
599 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
600 vh->vx_vni, &grc,
601 !!(vs->flags &
602 VXLAN_F_REMCSUM_NOPARTIAL));
603
604 if (!vh)
605 goto out;
606 }
607
608 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
609
610 for (p = *head; p; p = p->next) {
611 if (!NAPI_GRO_CB(p)->same_flow)
612 continue;
613
614 vh2 = (struct vxlanhdr *)(p->data + off_vx);
615 if (vh->vx_flags != vh2->vx_flags ||
616 vh->vx_vni != vh2->vx_vni) {
617 NAPI_GRO_CB(p)->same_flow = 0;
618 continue;
619 }
620 }
621
622 pp = call_gro_receive(eth_gro_receive, head, skb);
623 flush = 0;
624
625out:
626 skb_gro_remcsum_cleanup(skb, &grc);
627 skb->remcsum_offload = 0;
628 NAPI_GRO_CB(skb)->flush |= flush;
629
630 return pp;
631}
632
633static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
634{
635 /* Sets 'skb->inner_mac_header' since we are always called with
636 * 'skb->encapsulation' set.
637 */
638 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
639}
640
641/* Add new entry to forwarding table -- assumes lock held */
642static int vxlan_fdb_create(struct vxlan_dev *vxlan,
643 const u8 *mac, union vxlan_addr *ip,
644 __u16 state, __u16 flags,
645 __be16 port, __be32 src_vni, __be32 vni,
646 __u32 ifindex, __u8 ndm_flags)
647{
648 struct vxlan_rdst *rd = NULL;
649 struct vxlan_fdb *f;
650 int notify = 0;
651 int rc;
652
653 f = __vxlan_find_mac(vxlan, mac, src_vni);
654 if (f) {
655 if (flags & NLM_F_EXCL) {
656 netdev_dbg(vxlan->dev,
657 "lost race to create %pM\n", mac);
658 return -EEXIST;
659 }
660 if (f->state != state) {
661 f->state = state;
662 f->updated = jiffies;
663 notify = 1;
664 }
665 if (f->flags != ndm_flags) {
666 f->flags = ndm_flags;
667 f->updated = jiffies;
668 notify = 1;
669 }
670 if ((flags & NLM_F_REPLACE)) {
671 /* Only change unicasts */
672 if (!(is_multicast_ether_addr(f->eth_addr) ||
673 is_zero_ether_addr(f->eth_addr))) {
674 notify |= vxlan_fdb_replace(f, ip, port, vni,
675 ifindex);
676 } else
677 return -EOPNOTSUPP;
678 }
679 if ((flags & NLM_F_APPEND) &&
680 (is_multicast_ether_addr(f->eth_addr) ||
681 is_zero_ether_addr(f->eth_addr))) {
682 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
683
684 if (rc < 0)
685 return rc;
686 notify |= rc;
687 }
688 } else {
689 if (!(flags & NLM_F_CREATE))
690 return -ENOENT;
691
692 if (vxlan->cfg.addrmax &&
693 vxlan->addrcnt >= vxlan->cfg.addrmax)
694 return -ENOSPC;
695
696 /* Disallow replace to add a multicast entry */
697 if ((flags & NLM_F_REPLACE) &&
698 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
699 return -EOPNOTSUPP;
700
701 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
702 f = kmalloc(sizeof(*f), GFP_ATOMIC);
703 if (!f)
704 return -ENOMEM;
705
706 notify = 1;
707 f->state = state;
708 f->flags = ndm_flags;
709 f->updated = f->used = jiffies;
710 f->vni = src_vni;
711 INIT_LIST_HEAD(&f->remotes);
712 memcpy(f->eth_addr, mac, ETH_ALEN);
713
714 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
715 if (rc < 0) {
716 kfree(f);
717 return rc;
718 }
719
720 ++vxlan->addrcnt;
721 hlist_add_head_rcu(&f->hlist,
722 vxlan_fdb_head(vxlan, mac, src_vni));
723 }
724
725 if (notify) {
726 if (rd == NULL)
727 rd = first_remote_rtnl(f);
728 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
729 }
730
731 return 0;
732}
733
734static void vxlan_fdb_free(struct rcu_head *head)
735{
736 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
737 struct vxlan_rdst *rd, *nd;
738
739 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
740 dst_cache_destroy(&rd->dst_cache);
741 kfree(rd);
742 }
743 kfree(f);
744}
745
746static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
747{
748 netdev_dbg(vxlan->dev,
749 "delete %pM\n", f->eth_addr);
750
751 --vxlan->addrcnt;
752 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
753
754 hlist_del_rcu(&f->hlist);
755 call_rcu(&f->rcu, vxlan_fdb_free);
756}
757
758static void vxlan_dst_free(struct rcu_head *head)
759{
760 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
761
762 dst_cache_destroy(&rd->dst_cache);
763 kfree(rd);
764}
765
766static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
767 struct vxlan_rdst *rd)
768{
769 list_del_rcu(&rd->list);
770 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
771 call_rcu(&rd->rcu, vxlan_dst_free);
772}
773
774static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
775 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
776 __be32 *vni, u32 *ifindex)
777{
778 struct net *net = dev_net(vxlan->dev);
779 int err;
780
781 if (tb[NDA_DST]) {
782 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
783 if (err)
784 return err;
785 } else {
786 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
787 if (remote->sa.sa_family == AF_INET) {
788 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
789 ip->sa.sa_family = AF_INET;
790#if IS_ENABLED(CONFIG_IPV6)
791 } else {
792 ip->sin6.sin6_addr = in6addr_any;
793 ip->sa.sa_family = AF_INET6;
794#endif
795 }
796 }
797
798 if (tb[NDA_PORT]) {
799 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
800 return -EINVAL;
801 *port = nla_get_be16(tb[NDA_PORT]);
802 } else {
803 *port = vxlan->cfg.dst_port;
804 }
805
806 if (tb[NDA_VNI]) {
807 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
808 return -EINVAL;
809 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
810 } else {
811 *vni = vxlan->default_dst.remote_vni;
812 }
813
814 if (tb[NDA_SRC_VNI]) {
815 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
816 return -EINVAL;
817 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
818 } else {
819 *src_vni = vxlan->default_dst.remote_vni;
820 }
821
822 if (tb[NDA_IFINDEX]) {
823 struct net_device *tdev;
824
825 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
826 return -EINVAL;
827 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
828 tdev = __dev_get_by_index(net, *ifindex);
829 if (!tdev)
830 return -EADDRNOTAVAIL;
831 } else {
832 *ifindex = 0;
833 }
834
835 return 0;
836}
837
838/* Add static entry (via netlink) */
839static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
840 struct net_device *dev,
841 const unsigned char *addr, u16 vid, u16 flags)
842{
843 struct vxlan_dev *vxlan = netdev_priv(dev);
844 /* struct net *net = dev_net(vxlan->dev); */
845 union vxlan_addr ip;
846 __be16 port;
847 __be32 src_vni, vni;
848 u32 ifindex;
849 int err;
850
851 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
852 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
853 ndm->ndm_state);
854 return -EINVAL;
855 }
856
857 if (tb[NDA_DST] == NULL)
858 return -EINVAL;
859
860 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
861 if (err)
862 return err;
863
864 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
865 return -EAFNOSUPPORT;
866
867 spin_lock_bh(&vxlan->hash_lock);
868 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
869 port, src_vni, vni, ifindex, ndm->ndm_flags);
870 spin_unlock_bh(&vxlan->hash_lock);
871
872 return err;
873}
874
875static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
876 const unsigned char *addr, union vxlan_addr ip,
877 __be16 port, __be32 src_vni, __be32 vni,
878 u32 ifindex, u16 vid)
879{
880 struct vxlan_fdb *f;
881 struct vxlan_rdst *rd = NULL;
882 int err = -ENOENT;
883
884 f = vxlan_find_mac(vxlan, addr, src_vni);
885 if (!f)
886 return err;
887
888 if (!vxlan_addr_any(&ip)) {
889 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
890 if (!rd)
891 goto out;
892 }
893
894 /* remove a destination if it's not the only one on the list,
895 * otherwise destroy the fdb entry
896 */
897 if (rd && !list_is_singular(&f->remotes)) {
898 vxlan_fdb_dst_destroy(vxlan, f, rd);
899 goto out;
900 }
901
902 vxlan_fdb_destroy(vxlan, f);
903
904out:
905 return 0;
906}
907
908/* Delete entry (via netlink) */
909static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
910 struct net_device *dev,
911 const unsigned char *addr, u16 vid)
912{
913 struct vxlan_dev *vxlan = netdev_priv(dev);
914 union vxlan_addr ip;
915 __be32 src_vni, vni;
916 __be16 port;
917 u32 ifindex;
918 int err;
919
920 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
921 if (err)
922 return err;
923
924 spin_lock_bh(&vxlan->hash_lock);
925 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
926 vid);
927 spin_unlock_bh(&vxlan->hash_lock);
928
929 return err;
930}
931
932/* Dump forwarding table */
933static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
934 struct net_device *dev,
935 struct net_device *filter_dev, int *idx)
936{
937 struct vxlan_dev *vxlan = netdev_priv(dev);
938 unsigned int h;
939 int err = 0;
940
941 for (h = 0; h < FDB_HASH_SIZE; ++h) {
942 struct vxlan_fdb *f;
943
944 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
945 struct vxlan_rdst *rd;
946
947 list_for_each_entry_rcu(rd, &f->remotes, list) {
948 if (*idx < cb->args[2])
949 goto skip;
950
951 err = vxlan_fdb_info(skb, vxlan, f,
952 NETLINK_CB(cb->skb).portid,
953 cb->nlh->nlmsg_seq,
954 RTM_NEWNEIGH,
955 NLM_F_MULTI, rd);
956 if (err < 0)
957 goto out;
958skip:
959 *idx += 1;
960 }
961 }
962 }
963out:
964 return err;
965}
966
967/* Watch incoming packets to learn mapping between Ethernet address
968 * and Tunnel endpoint.
969 * Return true if packet is bogus and should be dropped.
970 */
971static bool vxlan_snoop(struct net_device *dev,
972 union vxlan_addr *src_ip, const u8 *src_mac,
973 u32 src_ifindex, __be32 vni)
974{
975 struct vxlan_dev *vxlan = netdev_priv(dev);
976 struct vxlan_fdb *f;
977 u32 ifindex = 0;
978
979#if IS_ENABLED(CONFIG_IPV6)
980 if (src_ip->sa.sa_family == AF_INET6 &&
981 (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
982 ifindex = src_ifindex;
983#endif
984
985 f = vxlan_find_mac(vxlan, src_mac, vni);
986 if (likely(f)) {
987 struct vxlan_rdst *rdst = first_remote_rcu(f);
988
989 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
990 rdst->remote_ifindex == ifindex))
991 return false;
992
993 /* Don't migrate static entries, drop packets */
994 if (f->state & (NUD_PERMANENT | NUD_NOARP))
995 return true;
996
997 if (net_ratelimit())
998 netdev_info(dev,
999 "%pM migrated from %pIS to %pIS\n",
1000 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1001
1002 rdst->remote_ip = *src_ip;
1003 f->updated = jiffies;
1004 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1005 } else {
1006 /* learned new entry */
1007 spin_lock(&vxlan->hash_lock);
1008
1009 /* close off race between vxlan_flush and incoming packets */
1010 if (netif_running(dev))
1011 vxlan_fdb_create(vxlan, src_mac, src_ip,
1012 NUD_REACHABLE,
1013 NLM_F_EXCL|NLM_F_CREATE,
1014 vxlan->cfg.dst_port,
1015 vni,
1016 vxlan->default_dst.remote_vni,
1017 ifindex, NTF_SELF);
1018 spin_unlock(&vxlan->hash_lock);
1019 }
1020
1021 return false;
1022}
1023
1024/* See if multicast group is already in use by other ID */
1025static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1026{
1027 struct vxlan_dev *vxlan;
1028 struct vxlan_sock *sock4;
1029#if IS_ENABLED(CONFIG_IPV6)
1030 struct vxlan_sock *sock6;
1031#endif
1032 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1033
1034 sock4 = rtnl_dereference(dev->vn4_sock);
1035
1036 /* The vxlan_sock is only used by dev, leaving group has
1037 * no effect on other vxlan devices.
1038 */
1039 if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
1040 return false;
1041#if IS_ENABLED(CONFIG_IPV6)
1042 sock6 = rtnl_dereference(dev->vn6_sock);
1043 if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
1044 return false;
1045#endif
1046
1047 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1048 if (!netif_running(vxlan->dev) || vxlan == dev)
1049 continue;
1050
1051 if (family == AF_INET &&
1052 rtnl_dereference(vxlan->vn4_sock) != sock4)
1053 continue;
1054#if IS_ENABLED(CONFIG_IPV6)
1055 if (family == AF_INET6 &&
1056 rtnl_dereference(vxlan->vn6_sock) != sock6)
1057 continue;
1058#endif
1059
1060 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1061 &dev->default_dst.remote_ip))
1062 continue;
1063
1064 if (vxlan->default_dst.remote_ifindex !=
1065 dev->default_dst.remote_ifindex)
1066 continue;
1067
1068 return true;
1069 }
1070
1071 return false;
1072}
1073
1074static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1075{
1076 struct vxlan_net *vn;
1077
1078 if (!vs)
1079 return false;
1080 if (!refcount_dec_and_test(&vs->refcnt))
1081 return false;
1082
1083 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1084 spin_lock(&vn->sock_lock);
1085 hlist_del_rcu(&vs->hlist);
1086 udp_tunnel_notify_del_rx_port(vs->sock,
1087 (vs->flags & VXLAN_F_GPE) ?
1088 UDP_TUNNEL_TYPE_VXLAN_GPE :
1089 UDP_TUNNEL_TYPE_VXLAN);
1090 spin_unlock(&vn->sock_lock);
1091
1092 return true;
1093}
1094
1095static void vxlan_sock_release(struct vxlan_dev *vxlan)
1096{
1097 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1098#if IS_ENABLED(CONFIG_IPV6)
1099 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1100
1101 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
1102#endif
1103
1104 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
1105 synchronize_net();
1106
1107 vxlan_vs_del_dev(vxlan);
1108
1109 if (__vxlan_sock_release_prep(sock4)) {
1110 udp_tunnel_sock_release(sock4->sock);
1111 kfree(sock4);
1112 }
1113
1114#if IS_ENABLED(CONFIG_IPV6)
1115 if (__vxlan_sock_release_prep(sock6)) {
1116 udp_tunnel_sock_release(sock6->sock);
1117 kfree(sock6);
1118 }
1119#endif
1120}
1121
1122/* Update multicast group membership when first VNI on
1123 * multicast address is brought up
1124 */
1125static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1126{
1127 struct sock *sk;
1128 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1129 int ifindex = vxlan->default_dst.remote_ifindex;
1130 int ret = -EINVAL;
1131
1132 if (ip->sa.sa_family == AF_INET) {
1133 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1134 struct ip_mreqn mreq = {
1135 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1136 .imr_ifindex = ifindex,
1137 };
1138
1139 sk = sock4->sock->sk;
1140 lock_sock(sk);
1141 ret = ip_mc_join_group(sk, &mreq);
1142 release_sock(sk);
1143#if IS_ENABLED(CONFIG_IPV6)
1144 } else {
1145 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1146
1147 sk = sock6->sock->sk;
1148 lock_sock(sk);
1149 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1150 &ip->sin6.sin6_addr);
1151 release_sock(sk);
1152#endif
1153 }
1154
1155 return ret;
1156}
1157
1158/* Inverse of vxlan_igmp_join when last VNI is brought down */
1159static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1160{
1161 struct sock *sk;
1162 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1163 int ifindex = vxlan->default_dst.remote_ifindex;
1164 int ret = -EINVAL;
1165
1166 if (ip->sa.sa_family == AF_INET) {
1167 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1168 struct ip_mreqn mreq = {
1169 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1170 .imr_ifindex = ifindex,
1171 };
1172
1173 sk = sock4->sock->sk;
1174 lock_sock(sk);
1175 ret = ip_mc_leave_group(sk, &mreq);
1176 release_sock(sk);
1177#if IS_ENABLED(CONFIG_IPV6)
1178 } else {
1179 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1180
1181 sk = sock6->sock->sk;
1182 lock_sock(sk);
1183 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1184 &ip->sin6.sin6_addr);
1185 release_sock(sk);
1186#endif
1187 }
1188
1189 return ret;
1190}
1191
1192static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1193 struct sk_buff *skb, u32 vxflags)
1194{
1195 size_t start, offset;
1196
1197 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1198 goto out;
1199
1200 start = vxlan_rco_start(unparsed->vx_vni);
1201 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1202
1203 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1204 return false;
1205
1206 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1207 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1208out:
1209 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1210 unparsed->vx_vni &= VXLAN_VNI_MASK;
1211 return true;
1212}
1213
1214static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1215 struct sk_buff *skb, u32 vxflags,
1216 struct vxlan_metadata *md)
1217{
1218 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1219 struct metadata_dst *tun_dst;
1220
1221 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1222 goto out;
1223
1224 md->gbp = ntohs(gbp->policy_id);
1225
1226 tun_dst = (struct metadata_dst *)skb_dst(skb);
1227 if (tun_dst) {
1228 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1229 tun_dst->u.tun_info.options_len = sizeof(*md);
1230 }
1231 if (gbp->dont_learn)
1232 md->gbp |= VXLAN_GBP_DONT_LEARN;
1233
1234 if (gbp->policy_applied)
1235 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1236
1237 /* In flow-based mode, GBP is carried in dst_metadata */
1238 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1239 skb->mark = md->gbp;
1240out:
1241 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1242}
1243
1244static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1245 __be16 *protocol,
1246 struct sk_buff *skb, u32 vxflags)
1247{
1248 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1249
1250 /* Need to have Next Protocol set for interfaces in GPE mode. */
1251 if (!gpe->np_applied)
1252 return false;
1253 /* "The initial version is 0. If a receiver does not support the
1254 * version indicated it MUST drop the packet.
1255 */
1256 if (gpe->version != 0)
1257 return false;
1258 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1259 * processing MUST occur." However, we don't implement OAM
1260 * processing, thus drop the packet.
1261 */
1262 if (gpe->oam_flag)
1263 return false;
1264
1265 *protocol = tun_p_to_eth_p(gpe->next_protocol);
1266 if (!*protocol)
1267 return false;
1268
1269 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1270 return true;
1271}
1272
1273static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1274 struct vxlan_sock *vs,
1275 struct sk_buff *skb, __be32 vni)
1276{
1277 union vxlan_addr saddr;
1278 u32 ifindex = skb->dev->ifindex;
1279
1280 skb_reset_mac_header(skb);
1281 skb->protocol = eth_type_trans(skb, vxlan->dev);
1282 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1283
1284 /* Ignore packet loops (and multicast echo) */
1285 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1286 return false;
1287
1288 /* Get address from the outer IP header */
1289 if (vxlan_get_sk_family(vs) == AF_INET) {
1290 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1291 saddr.sa.sa_family = AF_INET;
1292#if IS_ENABLED(CONFIG_IPV6)
1293 } else {
1294 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1295 saddr.sa.sa_family = AF_INET6;
1296#endif
1297 }
1298
1299 if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
1300 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
1301 return false;
1302
1303 return true;
1304}
1305
1306static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1307 struct sk_buff *skb)
1308{
1309 int err = 0;
1310
1311 if (vxlan_get_sk_family(vs) == AF_INET)
1312 err = IP_ECN_decapsulate(oiph, skb);
1313#if IS_ENABLED(CONFIG_IPV6)
1314 else
1315 err = IP6_ECN_decapsulate(oiph, skb);
1316#endif
1317
1318 if (unlikely(err) && log_ecn_error) {
1319 if (vxlan_get_sk_family(vs) == AF_INET)
1320 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1321 &((struct iphdr *)oiph)->saddr,
1322 ((struct iphdr *)oiph)->tos);
1323 else
1324 net_info_ratelimited("non-ECT from %pI6\n",
1325 &((struct ipv6hdr *)oiph)->saddr);
1326 }
1327 return err <= 1;
1328}
1329
1330/* Callback from net/ipv4/udp.c to receive packets */
1331static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1332{
1333 struct pcpu_sw_netstats *stats;
1334 struct vxlan_dev *vxlan;
1335 struct vxlan_sock *vs;
1336 struct vxlanhdr unparsed;
1337 struct vxlan_metadata _md;
1338 struct vxlan_metadata *md = &_md;
1339 __be16 protocol = htons(ETH_P_TEB);
1340 bool raw_proto = false;
1341 void *oiph;
1342 __be32 vni = 0;
1343
1344 /* Need UDP and VXLAN header to be present */
1345 if (!pskb_may_pull(skb, VXLAN_HLEN))
1346 goto drop;
1347
1348 unparsed = *vxlan_hdr(skb);
1349 /* VNI flag always required to be set */
1350 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1351 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1352 ntohl(vxlan_hdr(skb)->vx_flags),
1353 ntohl(vxlan_hdr(skb)->vx_vni));
1354 /* Return non vxlan pkt */
1355 goto drop;
1356 }
1357 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1358 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1359
1360 vs = rcu_dereference_sk_user_data(sk);
1361 if (!vs)
1362 goto drop;
1363
1364 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1365
1366 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
1367 if (!vxlan)
1368 goto drop;
1369
1370 /* For backwards compatibility, only allow reserved fields to be
1371 * used by VXLAN extensions if explicitly requested.
1372 */
1373 if (vs->flags & VXLAN_F_GPE) {
1374 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1375 goto drop;
1376 raw_proto = true;
1377 }
1378
1379 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1380 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1381 goto drop;
1382
1383 if (vxlan_collect_metadata(vs)) {
1384 struct metadata_dst *tun_dst;
1385
1386 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1387 key32_to_tunnel_id(vni), sizeof(*md));
1388
1389 if (!tun_dst)
1390 goto drop;
1391
1392 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1393
1394 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1395 } else {
1396 memset(md, 0, sizeof(*md));
1397 }
1398
1399 if (vs->flags & VXLAN_F_REMCSUM_RX)
1400 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1401 goto drop;
1402 if (vs->flags & VXLAN_F_GBP)
1403 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1404 /* Note that GBP and GPE can never be active together. This is
1405 * ensured in vxlan_dev_configure.
1406 */
1407
1408 if (unparsed.vx_flags || unparsed.vx_vni) {
1409 /* If there are any unprocessed flags remaining treat
1410 * this as a malformed packet. This behavior diverges from
1411 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1412 * in reserved fields are to be ignored. The approach here
1413 * maintains compatibility with previous stack code, and also
1414 * is more robust and provides a little more security in
1415 * adding extensions to VXLAN.
1416 */
1417 goto drop;
1418 }
1419
1420 if (!raw_proto) {
1421 if (!vxlan_set_mac(vxlan, vs, skb, vni))
1422 goto drop;
1423 } else {
1424 skb_reset_mac_header(skb);
1425 skb->dev = vxlan->dev;
1426 skb->pkt_type = PACKET_HOST;
1427 }
1428
1429 oiph = skb_network_header(skb);
1430 skb_reset_network_header(skb);
1431
1432 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1433 ++vxlan->dev->stats.rx_frame_errors;
1434 ++vxlan->dev->stats.rx_errors;
1435 goto drop;
1436 }
1437
1438 stats = this_cpu_ptr(vxlan->dev->tstats);
1439 u64_stats_update_begin(&stats->syncp);
1440 stats->rx_packets++;
1441 stats->rx_bytes += skb->len;
1442 u64_stats_update_end(&stats->syncp);
1443
1444 gro_cells_receive(&vxlan->gro_cells, skb);
1445 return 0;
1446
1447drop:
1448 /* Consume bad packet */
1449 kfree_skb(skb);
1450 return 0;
1451}
1452
1453static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1454{
1455 struct vxlan_dev *vxlan = netdev_priv(dev);
1456 struct arphdr *parp;
1457 u8 *arpptr, *sha;
1458 __be32 sip, tip;
1459 struct neighbour *n;
1460
1461 if (dev->flags & IFF_NOARP)
1462 goto out;
1463
1464 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1465 dev->stats.tx_dropped++;
1466 goto out;
1467 }
1468 parp = arp_hdr(skb);
1469
1470 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1471 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1472 parp->ar_pro != htons(ETH_P_IP) ||
1473 parp->ar_op != htons(ARPOP_REQUEST) ||
1474 parp->ar_hln != dev->addr_len ||
1475 parp->ar_pln != 4)
1476 goto out;
1477 arpptr = (u8 *)parp + sizeof(struct arphdr);
1478 sha = arpptr;
1479 arpptr += dev->addr_len; /* sha */
1480 memcpy(&sip, arpptr, sizeof(sip));
1481 arpptr += sizeof(sip);
1482 arpptr += dev->addr_len; /* tha */
1483 memcpy(&tip, arpptr, sizeof(tip));
1484
1485 if (ipv4_is_loopback(tip) ||
1486 ipv4_is_multicast(tip))
1487 goto out;
1488
1489 n = neigh_lookup(&arp_tbl, &tip, dev);
1490
1491 if (n) {
1492 struct vxlan_fdb *f;
1493 struct sk_buff *reply;
1494
1495 if (!(n->nud_state & NUD_CONNECTED)) {
1496 neigh_release(n);
1497 goto out;
1498 }
1499
1500 f = vxlan_find_mac(vxlan, n->ha, vni);
1501 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1502 /* bridge-local neighbor */
1503 neigh_release(n);
1504 goto out;
1505 }
1506
1507 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1508 n->ha, sha);
1509
1510 neigh_release(n);
1511
1512 if (reply == NULL)
1513 goto out;
1514
1515 skb_reset_mac_header(reply);
1516 __skb_pull(reply, skb_network_offset(reply));
1517 reply->ip_summed = CHECKSUM_UNNECESSARY;
1518 reply->pkt_type = PACKET_HOST;
1519
1520 if (netif_rx_ni(reply) == NET_RX_DROP)
1521 dev->stats.rx_dropped++;
1522 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1523 union vxlan_addr ipa = {
1524 .sin.sin_addr.s_addr = tip,
1525 .sin.sin_family = AF_INET,
1526 };
1527
1528 vxlan_ip_miss(dev, &ipa);
1529 }
1530out:
1531 consume_skb(skb);
1532 return NETDEV_TX_OK;
1533}
1534
1535#if IS_ENABLED(CONFIG_IPV6)
1536static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1537 struct neighbour *n, bool isrouter)
1538{
1539 struct net_device *dev = request->dev;
1540 struct sk_buff *reply;
1541 struct nd_msg *ns, *na;
1542 struct ipv6hdr *pip6;
1543 u8 *daddr;
1544 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1545 int ns_olen;
1546 int i, len;
1547
1548 if (dev == NULL || !pskb_may_pull(request, request->len))
1549 return NULL;
1550
1551 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1552 sizeof(*na) + na_olen + dev->needed_tailroom;
1553 reply = alloc_skb(len, GFP_ATOMIC);
1554 if (reply == NULL)
1555 return NULL;
1556
1557 reply->protocol = htons(ETH_P_IPV6);
1558 reply->dev = dev;
1559 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1560 skb_push(reply, sizeof(struct ethhdr));
1561 skb_reset_mac_header(reply);
1562
1563 ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
1564
1565 daddr = eth_hdr(request)->h_source;
1566 ns_olen = request->len - skb_network_offset(request) -
1567 sizeof(struct ipv6hdr) - sizeof(*ns);
1568 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1569 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1570 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1571 break;
1572 }
1573 }
1574
1575 /* Ethernet header */
1576 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1577 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1578 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1579 reply->protocol = htons(ETH_P_IPV6);
1580
1581 skb_pull(reply, sizeof(struct ethhdr));
1582 skb_reset_network_header(reply);
1583 skb_put(reply, sizeof(struct ipv6hdr));
1584
1585 /* IPv6 header */
1586
1587 pip6 = ipv6_hdr(reply);
1588 memset(pip6, 0, sizeof(struct ipv6hdr));
1589 pip6->version = 6;
1590 pip6->priority = ipv6_hdr(request)->priority;
1591 pip6->nexthdr = IPPROTO_ICMPV6;
1592 pip6->hop_limit = 255;
1593 pip6->daddr = ipv6_hdr(request)->saddr;
1594 pip6->saddr = *(struct in6_addr *)n->primary_key;
1595
1596 skb_pull(reply, sizeof(struct ipv6hdr));
1597 skb_reset_transport_header(reply);
1598
1599 /* Neighbor Advertisement */
1600 na = skb_put_zero(reply, sizeof(*na) + na_olen);
1601 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1602 na->icmph.icmp6_router = isrouter;
1603 na->icmph.icmp6_override = 1;
1604 na->icmph.icmp6_solicited = 1;
1605 na->target = ns->target;
1606 ether_addr_copy(&na->opt[2], n->ha);
1607 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1608 na->opt[1] = na_olen >> 3;
1609
1610 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1611 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1612 csum_partial(na, sizeof(*na)+na_olen, 0));
1613
1614 pip6->payload_len = htons(sizeof(*na)+na_olen);
1615
1616 skb_push(reply, sizeof(struct ipv6hdr));
1617
1618 reply->ip_summed = CHECKSUM_UNNECESSARY;
1619
1620 return reply;
1621}
1622
1623static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1624{
1625 struct vxlan_dev *vxlan = netdev_priv(dev);
1626 const struct in6_addr *daddr;
1627 const struct ipv6hdr *iphdr;
1628 struct inet6_dev *in6_dev;
1629 struct neighbour *n;
1630 struct nd_msg *msg;
1631
1632 in6_dev = __in6_dev_get(dev);
1633 if (!in6_dev)
1634 goto out;
1635
1636 iphdr = ipv6_hdr(skb);
1637 daddr = &iphdr->daddr;
1638 msg = (struct nd_msg *)(iphdr + 1);
1639
1640 if (ipv6_addr_loopback(daddr) ||
1641 ipv6_addr_is_multicast(&msg->target))
1642 goto out;
1643
1644 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1645
1646 if (n) {
1647 struct vxlan_fdb *f;
1648 struct sk_buff *reply;
1649
1650 if (!(n->nud_state & NUD_CONNECTED)) {
1651 neigh_release(n);
1652 goto out;
1653 }
1654
1655 f = vxlan_find_mac(vxlan, n->ha, vni);
1656 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1657 /* bridge-local neighbor */
1658 neigh_release(n);
1659 goto out;
1660 }
1661
1662 reply = vxlan_na_create(skb, n,
1663 !!(f ? f->flags & NTF_ROUTER : 0));
1664
1665 neigh_release(n);
1666
1667 if (reply == NULL)
1668 goto out;
1669
1670 if (netif_rx_ni(reply) == NET_RX_DROP)
1671 dev->stats.rx_dropped++;
1672
1673 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1674 union vxlan_addr ipa = {
1675 .sin6.sin6_addr = msg->target,
1676 .sin6.sin6_family = AF_INET6,
1677 };
1678
1679 vxlan_ip_miss(dev, &ipa);
1680 }
1681
1682out:
1683 consume_skb(skb);
1684 return NETDEV_TX_OK;
1685}
1686#endif
1687
1688static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1689{
1690 struct vxlan_dev *vxlan = netdev_priv(dev);
1691 struct neighbour *n;
1692
1693 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1694 return false;
1695
1696 n = NULL;
1697 switch (ntohs(eth_hdr(skb)->h_proto)) {
1698 case ETH_P_IP:
1699 {
1700 struct iphdr *pip;
1701
1702 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1703 return false;
1704 pip = ip_hdr(skb);
1705 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1706 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1707 union vxlan_addr ipa = {
1708 .sin.sin_addr.s_addr = pip->daddr,
1709 .sin.sin_family = AF_INET,
1710 };
1711
1712 vxlan_ip_miss(dev, &ipa);
1713 return false;
1714 }
1715
1716 break;
1717 }
1718#if IS_ENABLED(CONFIG_IPV6)
1719 case ETH_P_IPV6:
1720 {
1721 struct ipv6hdr *pip6;
1722
1723 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1724 return false;
1725 pip6 = ipv6_hdr(skb);
1726 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1727 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1728 union vxlan_addr ipa = {
1729 .sin6.sin6_addr = pip6->daddr,
1730 .sin6.sin6_family = AF_INET6,
1731 };
1732
1733 vxlan_ip_miss(dev, &ipa);
1734 return false;
1735 }
1736
1737 break;
1738 }
1739#endif
1740 default:
1741 return false;
1742 }
1743
1744 if (n) {
1745 bool diff;
1746
1747 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1748 if (diff) {
1749 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1750 dev->addr_len);
1751 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1752 }
1753 neigh_release(n);
1754 return diff;
1755 }
1756
1757 return false;
1758}
1759
1760static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1761 struct vxlan_metadata *md)
1762{
1763 struct vxlanhdr_gbp *gbp;
1764
1765 if (!md->gbp)
1766 return;
1767
1768 gbp = (struct vxlanhdr_gbp *)vxh;
1769 vxh->vx_flags |= VXLAN_HF_GBP;
1770
1771 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1772 gbp->dont_learn = 1;
1773
1774 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1775 gbp->policy_applied = 1;
1776
1777 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1778}
1779
1780static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1781 __be16 protocol)
1782{
1783 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1784
1785 gpe->np_applied = 1;
1786 gpe->next_protocol = tun_p_from_eth_p(protocol);
1787 if (!gpe->next_protocol)
1788 return -EPFNOSUPPORT;
1789 return 0;
1790}
1791
1792static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1793 int iphdr_len, __be32 vni,
1794 struct vxlan_metadata *md, u32 vxflags,
1795 bool udp_sum)
1796{
1797 struct vxlanhdr *vxh;
1798 int min_headroom;
1799 int err;
1800 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1801 __be16 inner_protocol = htons(ETH_P_TEB);
1802
1803 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1804 skb->ip_summed == CHECKSUM_PARTIAL) {
1805 int csum_start = skb_checksum_start_offset(skb);
1806
1807 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1808 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1809 (skb->csum_offset == offsetof(struct udphdr, check) ||
1810 skb->csum_offset == offsetof(struct tcphdr, check)))
1811 type |= SKB_GSO_TUNNEL_REMCSUM;
1812 }
1813
1814 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1815 + VXLAN_HLEN + iphdr_len;
1816
1817 /* Need space for new headers (invalidates iph ptr) */
1818 err = skb_cow_head(skb, min_headroom);
1819 if (unlikely(err))
1820 return err;
1821
1822 err = iptunnel_handle_offloads(skb, type);
1823 if (err)
1824 return err;
1825
1826 vxh = __skb_push(skb, sizeof(*vxh));
1827 vxh->vx_flags = VXLAN_HF_VNI;
1828 vxh->vx_vni = vxlan_vni_field(vni);
1829
1830 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1831 unsigned int start;
1832
1833 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1834 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1835 vxh->vx_flags |= VXLAN_HF_RCO;
1836
1837 if (!skb_is_gso(skb)) {
1838 skb->ip_summed = CHECKSUM_NONE;
1839 skb->encapsulation = 0;
1840 }
1841 }
1842
1843 if (vxflags & VXLAN_F_GBP)
1844 vxlan_build_gbp_hdr(vxh, vxflags, md);
1845 if (vxflags & VXLAN_F_GPE) {
1846 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
1847 if (err < 0)
1848 return err;
1849 inner_protocol = skb->protocol;
1850 }
1851
1852 skb_set_inner_protocol(skb, inner_protocol);
1853 return 0;
1854}
1855
1856static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
1857 struct vxlan_sock *sock4,
1858 struct sk_buff *skb, int oif, u8 tos,
1859 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
1860 struct dst_cache *dst_cache,
1861 const struct ip_tunnel_info *info)
1862{
1863 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1864 struct rtable *rt = NULL;
1865 struct flowi4 fl4;
1866
1867 if (!sock4)
1868 return ERR_PTR(-EIO);
1869
1870 if (tos && !info)
1871 use_cache = false;
1872 if (use_cache) {
1873 rt = dst_cache_get_ip4(dst_cache, saddr);
1874 if (rt)
1875 return rt;
1876 }
1877
1878 memset(&fl4, 0, sizeof(fl4));
1879 fl4.flowi4_oif = oif;
1880 fl4.flowi4_tos = RT_TOS(tos);
1881 fl4.flowi4_mark = skb->mark;
1882 fl4.flowi4_proto = IPPROTO_UDP;
1883 fl4.daddr = daddr;
1884 fl4.saddr = *saddr;
1885 fl4.fl4_dport = dport;
1886 fl4.fl4_sport = sport;
1887
1888 rt = ip_route_output_key(vxlan->net, &fl4);
1889 if (likely(!IS_ERR(rt))) {
1890 if (rt->dst.dev == dev) {
1891 netdev_dbg(dev, "circular route to %pI4\n", &daddr);
1892 ip_rt_put(rt);
1893 return ERR_PTR(-ELOOP);
1894 }
1895
1896 *saddr = fl4.saddr;
1897 if (use_cache)
1898 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
1899 } else {
1900 netdev_dbg(dev, "no route to %pI4\n", &daddr);
1901 return ERR_PTR(-ENETUNREACH);
1902 }
1903 return rt;
1904}
1905
1906#if IS_ENABLED(CONFIG_IPV6)
1907static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1908 struct net_device *dev,
1909 struct vxlan_sock *sock6,
1910 struct sk_buff *skb, int oif, u8 tos,
1911 __be32 label,
1912 const struct in6_addr *daddr,
1913 struct in6_addr *saddr,
1914 __be16 dport, __be16 sport,
1915 struct dst_cache *dst_cache,
1916 const struct ip_tunnel_info *info)
1917{
1918 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1919 struct dst_entry *ndst;
1920 struct flowi6 fl6;
1921 int err;
1922
1923 if (!sock6)
1924 return ERR_PTR(-EIO);
1925
1926 if (tos && !info)
1927 use_cache = false;
1928 if (use_cache) {
1929 ndst = dst_cache_get_ip6(dst_cache, saddr);
1930 if (ndst)
1931 return ndst;
1932 }
1933
1934 memset(&fl6, 0, sizeof(fl6));
1935 fl6.flowi6_oif = oif;
1936 fl6.daddr = *daddr;
1937 fl6.saddr = *saddr;
1938 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1939 fl6.flowi6_mark = skb->mark;
1940 fl6.flowi6_proto = IPPROTO_UDP;
1941 fl6.fl6_dport = dport;
1942 fl6.fl6_sport = sport;
1943
1944 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1945 sock6->sock->sk,
1946 &ndst, &fl6);
1947 if (unlikely(err < 0)) {
1948 netdev_dbg(dev, "no route to %pI6\n", daddr);
1949 return ERR_PTR(-ENETUNREACH);
1950 }
1951
1952 if (unlikely(ndst->dev == dev)) {
1953 netdev_dbg(dev, "circular route to %pI6\n", daddr);
1954 dst_release(ndst);
1955 return ERR_PTR(-ELOOP);
1956 }
1957
1958 *saddr = fl6.saddr;
1959 if (use_cache)
1960 dst_cache_set_ip6(dst_cache, ndst, saddr);
1961 return ndst;
1962}
1963#endif
1964
1965/* Bypass encapsulation if the destination is local */
1966static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1967 struct vxlan_dev *dst_vxlan, __be32 vni)
1968{
1969 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1970 union vxlan_addr loopback;
1971 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1972 struct net_device *dev = skb->dev;
1973 int len = skb->len;
1974
1975 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1976 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1977 skb->pkt_type = PACKET_HOST;
1978 skb->encapsulation = 0;
1979 skb->dev = dst_vxlan->dev;
1980 __skb_pull(skb, skb_network_offset(skb));
1981
1982 if (remote_ip->sa.sa_family == AF_INET) {
1983 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1984 loopback.sa.sa_family = AF_INET;
1985#if IS_ENABLED(CONFIG_IPV6)
1986 } else {
1987 loopback.sin6.sin6_addr = in6addr_loopback;
1988 loopback.sa.sa_family = AF_INET6;
1989#endif
1990 }
1991
1992 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
1993 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
1994 vni);
1995
1996 u64_stats_update_begin(&tx_stats->syncp);
1997 tx_stats->tx_packets++;
1998 tx_stats->tx_bytes += len;
1999 u64_stats_update_end(&tx_stats->syncp);
2000
2001 if (netif_rx(skb) == NET_RX_SUCCESS) {
2002 u64_stats_update_begin(&rx_stats->syncp);
2003 rx_stats->rx_packets++;
2004 rx_stats->rx_bytes += len;
2005 u64_stats_update_end(&rx_stats->syncp);
2006 } else {
2007 dev->stats.rx_dropped++;
2008 }
2009}
2010
2011static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
2012 struct vxlan_dev *vxlan,
2013 union vxlan_addr *daddr,
2014 __be16 dst_port, int dst_ifindex, __be32 vni,
2015 struct dst_entry *dst,
2016 u32 rt_flags)
2017{
2018#if IS_ENABLED(CONFIG_IPV6)
2019 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
2020 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
2021 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
2022 */
2023 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
2024#endif
2025 /* Bypass encapsulation if the destination is local */
2026 if (rt_flags & RTCF_LOCAL &&
2027 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2028 struct vxlan_dev *dst_vxlan;
2029
2030 dst_release(dst);
2031 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
2032 daddr->sa.sa_family, dst_port,
2033 vxlan->cfg.flags);
2034 if (!dst_vxlan) {
2035 dev->stats.tx_errors++;
2036 kfree_skb(skb);
2037
2038 return -ENOENT;
2039 }
2040 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
2041 return 1;
2042 }
2043
2044 return 0;
2045}
2046
2047static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2048 __be32 default_vni, struct vxlan_rdst *rdst,
2049 bool did_rsc)
2050{
2051 struct dst_cache *dst_cache;
2052 struct ip_tunnel_info *info;
2053 struct vxlan_dev *vxlan = netdev_priv(dev);
2054 const struct iphdr *old_iph = ip_hdr(skb);
2055 union vxlan_addr *dst;
2056 union vxlan_addr remote_ip, local_ip;
2057 struct vxlan_metadata _md;
2058 struct vxlan_metadata *md = &_md;
2059 __be16 src_port = 0, dst_port;
2060 struct dst_entry *ndst = NULL;
2061 __be32 vni, label;
2062 __u8 tos, ttl;
2063 int ifindex;
2064 int err;
2065 u32 flags = vxlan->cfg.flags;
2066 bool udp_sum = false;
2067 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
2068
2069 info = skb_tunnel_info(skb);
2070
2071 if (rdst) {
2072 dst = &rdst->remote_ip;
2073 if (vxlan_addr_any(dst)) {
2074 if (did_rsc) {
2075 /* short-circuited back to local bridge */
2076 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
2077 return;
2078 }
2079 goto drop;
2080 }
2081
2082 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
2083 vni = (rdst->remote_vni) ? : default_vni;
2084 ifindex = rdst->remote_ifindex;
2085 local_ip = vxlan->cfg.saddr;
2086 dst_cache = &rdst->dst_cache;
2087 md->gbp = skb->mark;
2088 ttl = vxlan->cfg.ttl;
2089 if (!ttl && vxlan_addr_multicast(dst))
2090 ttl = 1;
2091
2092 tos = vxlan->cfg.tos;
2093 if (tos == 1)
2094 tos = ip_tunnel_get_dsfield(old_iph, skb);
2095
2096 if (dst->sa.sa_family == AF_INET)
2097 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2098 else
2099 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2100 label = vxlan->cfg.label;
2101 } else {
2102 if (!info) {
2103 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2104 dev->name);
2105 goto drop;
2106 }
2107 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
2108 if (remote_ip.sa.sa_family == AF_INET) {
2109 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
2110 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
2111 } else {
2112 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
2113 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2114 }
2115 dst = &remote_ip;
2116 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
2117 vni = tunnel_id_to_key32(info->key.tun_id);
2118 ifindex = 0;
2119 dst_cache = &info->dst_cache;
2120 if (info->options_len)
2121 md = ip_tunnel_info_opts(info);
2122 ttl = info->key.ttl;
2123 tos = info->key.tos;
2124 label = info->key.label;
2125 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2126 }
2127 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2128 vxlan->cfg.port_max, true);
2129
2130 rcu_read_lock();
2131 if (dst->sa.sa_family == AF_INET) {
2132 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2133 struct rtable *rt;
2134 __be16 df = 0;
2135
2136 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
2137 dst->sin.sin_addr.s_addr,
2138 &local_ip.sin.sin_addr.s_addr,
2139 dst_port, src_port,
2140 dst_cache, info);
2141 if (IS_ERR(rt)) {
2142 err = PTR_ERR(rt);
2143 goto tx_error;
2144 }
2145
2146 /* Bypass encapsulation if the destination is local */
2147 if (!info) {
2148 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2149 dst_port, ifindex, vni,
2150 &rt->dst, rt->rt_flags);
2151 if (err)
2152 goto out_unlock;
2153 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2154 df = htons(IP_DF);
2155 }
2156
2157 ndst = &rt->dst;
2158 if (skb_dst(skb)) {
2159 int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
2160
2161 skb_dst_update_pmtu(skb, mtu);
2162 }
2163
2164 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2165 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2166 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
2167 vni, md, flags, udp_sum);
2168 if (err < 0)
2169 goto tx_error;
2170
2171 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
2172 dst->sin.sin_addr.s_addr, tos, ttl, df,
2173 src_port, dst_port, xnet, !udp_sum);
2174#if IS_ENABLED(CONFIG_IPV6)
2175 } else {
2176 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2177
2178 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
2179 label, &dst->sin6.sin6_addr,
2180 &local_ip.sin6.sin6_addr,
2181 dst_port, src_port,
2182 dst_cache, info);
2183 if (IS_ERR(ndst)) {
2184 err = PTR_ERR(ndst);
2185 ndst = NULL;
2186 goto tx_error;
2187 }
2188
2189 if (!info) {
2190 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2191
2192 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2193 dst_port, ifindex, vni,
2194 ndst, rt6i_flags);
2195 if (err)
2196 goto out_unlock;
2197 }
2198
2199 if (skb_dst(skb)) {
2200 int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
2201
2202 skb_dst_update_pmtu(skb, mtu);
2203 }
2204
2205 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2206 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2207 skb_scrub_packet(skb, xnet);
2208 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2209 vni, md, flags, udp_sum);
2210 if (err < 0)
2211 goto tx_error;
2212
2213 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
2214 &local_ip.sin6.sin6_addr,
2215 &dst->sin6.sin6_addr, tos, ttl,
2216 label, src_port, dst_port, !udp_sum);
2217#endif
2218 }
2219out_unlock:
2220 rcu_read_unlock();
2221 return;
2222
2223drop:
2224 dev->stats.tx_dropped++;
2225 dev_kfree_skb(skb);
2226 return;
2227
2228tx_error:
2229 rcu_read_unlock();
2230 if (err == -ELOOP)
2231 dev->stats.collisions++;
2232 else if (err == -ENETUNREACH)
2233 dev->stats.tx_carrier_errors++;
2234 dst_release(ndst);
2235 dev->stats.tx_errors++;
2236 kfree_skb(skb);
2237}
2238
2239/* Transmit local packets over Vxlan
2240 *
2241 * Outer IP header inherits ECN and DF from inner header.
2242 * Outer UDP destination is the VXLAN assigned port.
2243 * source port is based on hash of flow
2244 */
2245static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2246{
2247 struct vxlan_dev *vxlan = netdev_priv(dev);
2248 struct vxlan_rdst *rdst, *fdst = NULL;
2249 const struct ip_tunnel_info *info;
2250 bool did_rsc = false;
2251 struct vxlan_fdb *f;
2252 struct ethhdr *eth;
2253 __be32 vni = 0;
2254
2255 info = skb_tunnel_info(skb);
2256
2257 skb_reset_mac_header(skb);
2258
2259 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
2260 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
2261 info->mode & IP_TUNNEL_INFO_TX) {
2262 vni = tunnel_id_to_key32(info->key.tun_id);
2263 } else {
2264 if (info && info->mode & IP_TUNNEL_INFO_TX)
2265 vxlan_xmit_one(skb, dev, vni, NULL, false);
2266 else
2267 kfree_skb(skb);
2268 return NETDEV_TX_OK;
2269 }
2270 }
2271
2272 if (vxlan->cfg.flags & VXLAN_F_PROXY) {
2273 eth = eth_hdr(skb);
2274 if (ntohs(eth->h_proto) == ETH_P_ARP)
2275 return arp_reduce(dev, skb, vni);
2276#if IS_ENABLED(CONFIG_IPV6)
2277 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2278 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
2279 sizeof(struct nd_msg)) &&
2280 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2281 struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
2282
2283 if (m->icmph.icmp6_code == 0 &&
2284 m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2285 return neigh_reduce(dev, skb, vni);
2286 }
2287#endif
2288 }
2289
2290 eth = eth_hdr(skb);
2291 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2292 did_rsc = false;
2293
2294 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
2295 (ntohs(eth->h_proto) == ETH_P_IP ||
2296 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2297 did_rsc = route_shortcircuit(dev, skb);
2298 if (did_rsc)
2299 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2300 }
2301
2302 if (f == NULL) {
2303 f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
2304 if (f == NULL) {
2305 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
2306 !is_multicast_ether_addr(eth->h_dest))
2307 vxlan_fdb_miss(vxlan, eth->h_dest);
2308
2309 dev->stats.tx_dropped++;
2310 kfree_skb(skb);
2311 return NETDEV_TX_OK;
2312 }
2313 }
2314
2315 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2316 struct sk_buff *skb1;
2317
2318 if (!fdst) {
2319 fdst = rdst;
2320 continue;
2321 }
2322 skb1 = skb_clone(skb, GFP_ATOMIC);
2323 if (skb1)
2324 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
2325 }
2326
2327 if (fdst)
2328 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
2329 else
2330 kfree_skb(skb);
2331 return NETDEV_TX_OK;
2332}
2333
2334/* Walk the forwarding table and purge stale entries */
2335static void vxlan_cleanup(struct timer_list *t)
2336{
2337 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
2338 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2339 unsigned int h;
2340
2341 if (!netif_running(vxlan->dev))
2342 return;
2343
2344 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2345 struct hlist_node *p, *n;
2346
2347 spin_lock_bh(&vxlan->hash_lock);
2348 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2349 struct vxlan_fdb *f
2350 = container_of(p, struct vxlan_fdb, hlist);
2351 unsigned long timeout;
2352
2353 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2354 continue;
2355
2356 if (f->flags & NTF_EXT_LEARNED)
2357 continue;
2358
2359 timeout = f->used + vxlan->cfg.age_interval * HZ;
2360 if (time_before_eq(timeout, jiffies)) {
2361 netdev_dbg(vxlan->dev,
2362 "garbage collect %pM\n",
2363 f->eth_addr);
2364 f->state = NUD_STALE;
2365 vxlan_fdb_destroy(vxlan, f);
2366 } else if (time_before(timeout, next_timer))
2367 next_timer = timeout;
2368 }
2369 spin_unlock_bh(&vxlan->hash_lock);
2370 }
2371
2372 mod_timer(&vxlan->age_timer, next_timer);
2373}
2374
2375static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2376{
2377 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2378
2379 spin_lock(&vn->sock_lock);
2380 hlist_del_init_rcu(&vxlan->hlist4.hlist);
2381#if IS_ENABLED(CONFIG_IPV6)
2382 hlist_del_init_rcu(&vxlan->hlist6.hlist);
2383#endif
2384 spin_unlock(&vn->sock_lock);
2385}
2386
2387static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2388 struct vxlan_dev_node *node)
2389{
2390 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2391 __be32 vni = vxlan->default_dst.remote_vni;
2392
2393 node->vxlan = vxlan;
2394 spin_lock(&vn->sock_lock);
2395 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
2396 spin_unlock(&vn->sock_lock);
2397}
2398
2399/* Setup stats when device is created */
2400static int vxlan_init(struct net_device *dev)
2401{
2402 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2403 if (!dev->tstats)
2404 return -ENOMEM;
2405
2406 return 0;
2407}
2408
2409static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2410{
2411 struct vxlan_fdb *f;
2412
2413 spin_lock_bh(&vxlan->hash_lock);
2414 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2415 if (f)
2416 vxlan_fdb_destroy(vxlan, f);
2417 spin_unlock_bh(&vxlan->hash_lock);
2418}
2419
2420static void vxlan_uninit(struct net_device *dev)
2421{
2422 struct vxlan_dev *vxlan = netdev_priv(dev);
2423
2424 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
2425
2426 free_percpu(dev->tstats);
2427}
2428
2429/* Start ageing timer and join group when device is brought up */
2430static int vxlan_open(struct net_device *dev)
2431{
2432 struct vxlan_dev *vxlan = netdev_priv(dev);
2433 int ret;
2434
2435 ret = vxlan_sock_add(vxlan);
2436 if (ret < 0)
2437 return ret;
2438
2439 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2440 ret = vxlan_igmp_join(vxlan);
2441 if (ret == -EADDRINUSE)
2442 ret = 0;
2443 if (ret) {
2444 vxlan_sock_release(vxlan);
2445 return ret;
2446 }
2447 }
2448
2449 if (vxlan->cfg.age_interval)
2450 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2451
2452 return ret;
2453}
2454
2455/* Purge the forwarding table */
2456static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2457{
2458 unsigned int h;
2459
2460 spin_lock_bh(&vxlan->hash_lock);
2461 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2462 struct hlist_node *p, *n;
2463 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2464 struct vxlan_fdb *f
2465 = container_of(p, struct vxlan_fdb, hlist);
2466 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2467 continue;
2468 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2469 if (!is_zero_ether_addr(f->eth_addr))
2470 vxlan_fdb_destroy(vxlan, f);
2471 }
2472 }
2473 spin_unlock_bh(&vxlan->hash_lock);
2474}
2475
2476/* Cleanup timer and forwarding table on shutdown */
2477static int vxlan_stop(struct net_device *dev)
2478{
2479 struct vxlan_dev *vxlan = netdev_priv(dev);
2480 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2481 int ret = 0;
2482
2483 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2484 !vxlan_group_used(vn, vxlan))
2485 ret = vxlan_igmp_leave(vxlan);
2486
2487 del_timer_sync(&vxlan->age_timer);
2488
2489 vxlan_flush(vxlan, false);
2490 vxlan_sock_release(vxlan);
2491
2492 return ret;
2493}
2494
2495/* Stub, nothing needs to be done. */
2496static void vxlan_set_multicast_list(struct net_device *dev)
2497{
2498}
2499
2500static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2501{
2502 struct vxlan_dev *vxlan = netdev_priv(dev);
2503 struct vxlan_rdst *dst = &vxlan->default_dst;
2504 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2505 dst->remote_ifindex);
2506 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
2507
2508 /* This check is different than dev->max_mtu, because it looks at
2509 * the lowerdev->mtu, rather than the static dev->max_mtu
2510 */
2511 if (lowerdev) {
2512 int max_mtu = lowerdev->mtu -
2513 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2514 if (new_mtu > max_mtu)
2515 return -EINVAL;
2516 }
2517
2518 dev->mtu = new_mtu;
2519 return 0;
2520}
2521
2522static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2523{
2524 struct vxlan_dev *vxlan = netdev_priv(dev);
2525 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2526 __be16 sport, dport;
2527
2528 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2529 vxlan->cfg.port_max, true);
2530 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2531
2532 if (ip_tunnel_info_af(info) == AF_INET) {
2533 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2534 struct rtable *rt;
2535
2536 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2537 info->key.u.ipv4.dst,
2538 &info->key.u.ipv4.src, dport, sport,
2539 &info->dst_cache, info);
2540 if (IS_ERR(rt))
2541 return PTR_ERR(rt);
2542 ip_rt_put(rt);
2543 } else {
2544#if IS_ENABLED(CONFIG_IPV6)
2545 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2546 struct dst_entry *ndst;
2547
2548 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2549 info->key.label, &info->key.u.ipv6.dst,
2550 &info->key.u.ipv6.src, dport, sport,
2551 &info->dst_cache, info);
2552 if (IS_ERR(ndst))
2553 return PTR_ERR(ndst);
2554 dst_release(ndst);
2555#else /* !CONFIG_IPV6 */
2556 return -EPFNOSUPPORT;
2557#endif
2558 }
2559 info->key.tp_src = sport;
2560 info->key.tp_dst = dport;
2561 return 0;
2562}
2563
2564static const struct net_device_ops vxlan_netdev_ether_ops = {
2565 .ndo_init = vxlan_init,
2566 .ndo_uninit = vxlan_uninit,
2567 .ndo_open = vxlan_open,
2568 .ndo_stop = vxlan_stop,
2569 .ndo_start_xmit = vxlan_xmit,
2570 .ndo_get_stats64 = ip_tunnel_get_stats64,
2571 .ndo_set_rx_mode = vxlan_set_multicast_list,
2572 .ndo_change_mtu = vxlan_change_mtu,
2573 .ndo_validate_addr = eth_validate_addr,
2574 .ndo_set_mac_address = eth_mac_addr,
2575 .ndo_fdb_add = vxlan_fdb_add,
2576 .ndo_fdb_del = vxlan_fdb_delete,
2577 .ndo_fdb_dump = vxlan_fdb_dump,
2578 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2579};
2580
2581static const struct net_device_ops vxlan_netdev_raw_ops = {
2582 .ndo_init = vxlan_init,
2583 .ndo_uninit = vxlan_uninit,
2584 .ndo_open = vxlan_open,
2585 .ndo_stop = vxlan_stop,
2586 .ndo_start_xmit = vxlan_xmit,
2587 .ndo_get_stats64 = ip_tunnel_get_stats64,
2588 .ndo_change_mtu = vxlan_change_mtu,
2589 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2590};
2591
2592/* Info for udev, that this is a virtual tunnel endpoint */
2593static struct device_type vxlan_type = {
2594 .name = "vxlan",
2595};
2596
2597/* Calls the ndo_udp_tunnel_add of the caller in order to
2598 * supply the listening VXLAN udp ports. Callers are expected
2599 * to implement the ndo_udp_tunnel_add.
2600 */
2601static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
2602{
2603 struct vxlan_sock *vs;
2604 struct net *net = dev_net(dev);
2605 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2606 unsigned int i;
2607
2608 spin_lock(&vn->sock_lock);
2609 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2610 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2611 unsigned short type;
2612
2613 if (vs->flags & VXLAN_F_GPE)
2614 type = UDP_TUNNEL_TYPE_VXLAN_GPE;
2615 else
2616 type = UDP_TUNNEL_TYPE_VXLAN;
2617
2618 if (push)
2619 udp_tunnel_push_rx_port(dev, vs->sock, type);
2620 else
2621 udp_tunnel_drop_rx_port(dev, vs->sock, type);
2622 }
2623 }
2624 spin_unlock(&vn->sock_lock);
2625}
2626
2627/* Initialize the device structure. */
2628static void vxlan_setup(struct net_device *dev)
2629{
2630 struct vxlan_dev *vxlan = netdev_priv(dev);
2631 unsigned int h;
2632
2633 eth_hw_addr_random(dev);
2634 ether_setup(dev);
2635
2636 dev->needs_free_netdev = true;
2637 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2638
2639 dev->features |= NETIF_F_LLTX;
2640 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2641 dev->features |= NETIF_F_RXCSUM;
2642 dev->features |= NETIF_F_GSO_SOFTWARE;
2643
2644 dev->vlan_features = dev->features;
2645 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2646 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2647 netif_keep_dst(dev);
2648 dev->priv_flags |= IFF_NO_QUEUE;
2649
2650 /* MTU range: 68 - 65535 */
2651 dev->min_mtu = ETH_MIN_MTU;
2652 dev->max_mtu = ETH_MAX_MTU;
2653
2654 INIT_LIST_HEAD(&vxlan->next);
2655 spin_lock_init(&vxlan->hash_lock);
2656
2657 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
2658
2659 vxlan->dev = dev;
2660
2661 gro_cells_init(&vxlan->gro_cells, dev);
2662
2663 for (h = 0; h < FDB_HASH_SIZE; ++h)
2664 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2665}
2666
2667static void vxlan_ether_setup(struct net_device *dev)
2668{
2669 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2670 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2671 dev->netdev_ops = &vxlan_netdev_ether_ops;
2672}
2673
2674static void vxlan_raw_setup(struct net_device *dev)
2675{
2676 dev->header_ops = NULL;
2677 dev->type = ARPHRD_NONE;
2678 dev->hard_header_len = 0;
2679 dev->addr_len = 0;
2680 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2681 dev->netdev_ops = &vxlan_netdev_raw_ops;
2682}
2683
2684static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2685 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2686 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2687 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2688 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2689 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2690 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2691 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2692 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2693 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2694 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2695 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2696 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2697 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2698 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2699 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2700 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2701 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2702 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2703 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2704 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2705 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2706 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2707 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2708 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2709 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2710 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2711 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2712};
2713
2714static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
2715 struct netlink_ext_ack *extack)
2716{
2717 if (tb[IFLA_ADDRESS]) {
2718 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2719 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2720 "Provided link layer address is not Ethernet");
2721 return -EINVAL;
2722 }
2723
2724 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2725 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2726 "Provided Ethernet address is not unicast");
2727 return -EADDRNOTAVAIL;
2728 }
2729 }
2730
2731 if (tb[IFLA_MTU]) {
2732 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
2733
2734 if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) {
2735 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
2736 "MTU must be between 68 and 65535");
2737 return -EINVAL;
2738 }
2739 }
2740
2741 if (!data) {
2742 NL_SET_ERR_MSG(extack,
2743 "Required attributes not provided to perform the operation");
2744 return -EINVAL;
2745 }
2746
2747 if (data[IFLA_VXLAN_ID]) {
2748 u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2749
2750 if (id >= VXLAN_N_VID) {
2751 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
2752 "VXLAN ID must be lower than 16777216");
2753 return -ERANGE;
2754 }
2755 }
2756
2757 if (data[IFLA_VXLAN_PORT_RANGE]) {
2758 const struct ifla_vxlan_port_range *p
2759 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2760
2761 if (ntohs(p->high) < ntohs(p->low)) {
2762 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
2763 "Invalid source port range");
2764 return -EINVAL;
2765 }
2766 }
2767
2768 return 0;
2769}
2770
2771static void vxlan_get_drvinfo(struct net_device *netdev,
2772 struct ethtool_drvinfo *drvinfo)
2773{
2774 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2775 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2776}
2777
2778static const struct ethtool_ops vxlan_ethtool_ops = {
2779 .get_drvinfo = vxlan_get_drvinfo,
2780 .get_link = ethtool_op_get_link,
2781};
2782
2783static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2784 __be16 port, u32 flags)
2785{
2786 struct socket *sock;
2787 struct udp_port_cfg udp_conf;
2788 int err;
2789
2790 memset(&udp_conf, 0, sizeof(udp_conf));
2791
2792 if (ipv6) {
2793 udp_conf.family = AF_INET6;
2794 udp_conf.use_udp6_rx_checksums =
2795 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2796 udp_conf.ipv6_v6only = 1;
2797 } else {
2798 udp_conf.family = AF_INET;
2799 }
2800
2801 udp_conf.local_udp_port = port;
2802
2803 /* Open UDP socket */
2804 err = udp_sock_create(net, &udp_conf, &sock);
2805 if (err < 0)
2806 return ERR_PTR(err);
2807
2808 return sock;
2809}
2810
2811/* Create new listen socket if needed */
2812static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2813 __be16 port, u32 flags)
2814{
2815 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2816 struct vxlan_sock *vs;
2817 struct socket *sock;
2818 unsigned int h;
2819 struct udp_tunnel_sock_cfg tunnel_cfg;
2820
2821 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2822 if (!vs)
2823 return ERR_PTR(-ENOMEM);
2824
2825 for (h = 0; h < VNI_HASH_SIZE; ++h)
2826 INIT_HLIST_HEAD(&vs->vni_list[h]);
2827
2828 sock = vxlan_create_sock(net, ipv6, port, flags);
2829 if (IS_ERR(sock)) {
2830 kfree(vs);
2831 return ERR_CAST(sock);
2832 }
2833
2834 vs->sock = sock;
2835 refcount_set(&vs->refcnt, 1);
2836 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2837
2838 spin_lock(&vn->sock_lock);
2839 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2840 udp_tunnel_notify_add_rx_port(sock,
2841 (vs->flags & VXLAN_F_GPE) ?
2842 UDP_TUNNEL_TYPE_VXLAN_GPE :
2843 UDP_TUNNEL_TYPE_VXLAN);
2844 spin_unlock(&vn->sock_lock);
2845
2846 /* Mark socket as an encapsulation socket. */
2847 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2848 tunnel_cfg.sk_user_data = vs;
2849 tunnel_cfg.encap_type = 1;
2850 tunnel_cfg.encap_rcv = vxlan_rcv;
2851 tunnel_cfg.encap_destroy = NULL;
2852 tunnel_cfg.gro_receive = vxlan_gro_receive;
2853 tunnel_cfg.gro_complete = vxlan_gro_complete;
2854
2855 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2856
2857 return vs;
2858}
2859
2860static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2861{
2862 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2863 struct vxlan_sock *vs = NULL;
2864 struct vxlan_dev_node *node;
2865
2866 if (!vxlan->cfg.no_share) {
2867 spin_lock(&vn->sock_lock);
2868 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2869 vxlan->cfg.dst_port, vxlan->cfg.flags);
2870 if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
2871 spin_unlock(&vn->sock_lock);
2872 return -EBUSY;
2873 }
2874 spin_unlock(&vn->sock_lock);
2875 }
2876 if (!vs)
2877 vs = vxlan_socket_create(vxlan->net, ipv6,
2878 vxlan->cfg.dst_port, vxlan->cfg.flags);
2879 if (IS_ERR(vs))
2880 return PTR_ERR(vs);
2881#if IS_ENABLED(CONFIG_IPV6)
2882 if (ipv6) {
2883 rcu_assign_pointer(vxlan->vn6_sock, vs);
2884 node = &vxlan->hlist6;
2885 } else
2886#endif
2887 {
2888 rcu_assign_pointer(vxlan->vn4_sock, vs);
2889 node = &vxlan->hlist4;
2890 }
2891 vxlan_vs_add_dev(vs, vxlan, node);
2892 return 0;
2893}
2894
2895static int vxlan_sock_add(struct vxlan_dev *vxlan)
2896{
2897 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
2898 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
2899 bool ipv4 = !ipv6 || metadata;
2900 int ret = 0;
2901
2902 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
2903#if IS_ENABLED(CONFIG_IPV6)
2904 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
2905 if (ipv6) {
2906 ret = __vxlan_sock_add(vxlan, true);
2907 if (ret < 0 && ret != -EAFNOSUPPORT)
2908 ipv4 = false;
2909 }
2910#endif
2911 if (ipv4)
2912 ret = __vxlan_sock_add(vxlan, false);
2913 if (ret < 0)
2914 vxlan_sock_release(vxlan);
2915 return ret;
2916}
2917
2918static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
2919 struct net_device **lower,
2920 struct vxlan_dev *old,
2921 struct netlink_ext_ack *extack)
2922{
2923 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2924 struct vxlan_dev *tmp;
2925 bool use_ipv6 = false;
2926
2927 if (conf->flags & VXLAN_F_GPE) {
2928 /* For now, allow GPE only together with
2929 * COLLECT_METADATA. This can be relaxed later; in such
2930 * case, the other side of the PtP link will have to be
2931 * provided.
2932 */
2933 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
2934 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
2935 NL_SET_ERR_MSG(extack,
2936 "VXLAN GPE does not support this combination of attributes");
2937 return -EINVAL;
2938 }
2939 }
2940
2941 if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
2942 /* Unless IPv6 is explicitly requested, assume IPv4 */
2943 conf->remote_ip.sa.sa_family = AF_INET;
2944 conf->saddr.sa.sa_family = AF_INET;
2945 } else if (!conf->remote_ip.sa.sa_family) {
2946 conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
2947 } else if (!conf->saddr.sa.sa_family) {
2948 conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
2949 }
2950
2951 if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) {
2952 NL_SET_ERR_MSG(extack,
2953 "Local and remote address must be from the same family");
2954 return -EINVAL;
2955 }
2956
2957 if (vxlan_addr_multicast(&conf->saddr)) {
2958 NL_SET_ERR_MSG(extack, "Local address cannot be multicast");
2959 return -EINVAL;
2960 }
2961
2962 if (conf->saddr.sa.sa_family == AF_INET6) {
2963 if (!IS_ENABLED(CONFIG_IPV6)) {
2964 NL_SET_ERR_MSG(extack,
2965 "IPv6 support not enabled in the kernel");
2966 return -EPFNOSUPPORT;
2967 }
2968 use_ipv6 = true;
2969 conf->flags |= VXLAN_F_IPV6;
2970
2971 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
2972 int local_type =
2973 ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
2974 int remote_type =
2975 ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
2976
2977 if (local_type & IPV6_ADDR_LINKLOCAL) {
2978 if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
2979 (remote_type != IPV6_ADDR_ANY)) {
2980 NL_SET_ERR_MSG(extack,
2981 "Invalid combination of local and remote address scopes");
2982 return -EINVAL;
2983 }
2984
2985 conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
2986 } else {
2987 if (remote_type ==
2988 (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) {
2989 NL_SET_ERR_MSG(extack,
2990 "Invalid combination of local and remote address scopes");
2991 return -EINVAL;
2992 }
2993
2994 conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
2995 }
2996 }
2997 }
2998
2999 if (conf->label && !use_ipv6) {
3000 NL_SET_ERR_MSG(extack,
3001 "Label attribute only applies to IPv6 VXLAN devices");
3002 return -EINVAL;
3003 }
3004
3005 if (conf->remote_ifindex) {
3006 struct net_device *lowerdev;
3007
3008 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
3009 if (!lowerdev) {
3010 NL_SET_ERR_MSG(extack,
3011 "Invalid local interface, device not found");
3012 return -ENODEV;
3013 }
3014
3015#if IS_ENABLED(CONFIG_IPV6)
3016 if (use_ipv6) {
3017 struct inet6_dev *idev = __in6_dev_get(lowerdev);
3018 if (idev && idev->cnf.disable_ipv6) {
3019 NL_SET_ERR_MSG(extack,
3020 "IPv6 support disabled by administrator");
3021 return -EPERM;
3022 }
3023 }
3024#endif
3025
3026 *lower = lowerdev;
3027 } else {
3028 if (vxlan_addr_multicast(&conf->remote_ip)) {
3029 NL_SET_ERR_MSG(extack,
3030 "Local interface required for multicast remote destination");
3031
3032 return -EINVAL;
3033 }
3034
3035#if IS_ENABLED(CONFIG_IPV6)
3036 if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) {
3037 NL_SET_ERR_MSG(extack,
3038 "Local interface required for link-local local/remote addresses");
3039 return -EINVAL;
3040 }
3041#endif
3042
3043 *lower = NULL;
3044 }
3045
3046 if (!conf->dst_port) {
3047 if (conf->flags & VXLAN_F_GPE)
3048 conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
3049 else
3050 conf->dst_port = htons(vxlan_port);
3051 }
3052
3053 if (!conf->age_interval)
3054 conf->age_interval = FDB_AGE_DEFAULT;
3055
3056 list_for_each_entry(tmp, &vn->vxlan_list, next) {
3057 if (tmp == old)
3058 continue;
3059
3060 if (tmp->cfg.vni != conf->vni)
3061 continue;
3062 if (tmp->cfg.dst_port != conf->dst_port)
3063 continue;
3064 if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
3065 (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
3066 continue;
3067
3068 if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
3069 tmp->cfg.remote_ifindex != conf->remote_ifindex)
3070 continue;
3071
3072 NL_SET_ERR_MSG(extack,
3073 "A VXLAN device with the specified VNI already exists");
3074 return -EEXIST;
3075 }
3076
3077 return 0;
3078}
3079
3080static void vxlan_config_apply(struct net_device *dev,
3081 struct vxlan_config *conf,
3082 struct net_device *lowerdev,
3083 struct net *src_net,
3084 bool changelink)
3085{
3086 struct vxlan_dev *vxlan = netdev_priv(dev);
3087 struct vxlan_rdst *dst = &vxlan->default_dst;
3088 unsigned short needed_headroom = ETH_HLEN;
3089 bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
3090 int max_mtu = ETH_MAX_MTU;
3091
3092 if (!changelink) {
3093 if (conf->flags & VXLAN_F_GPE)
3094 vxlan_raw_setup(dev);
3095 else
3096 vxlan_ether_setup(dev);
3097
3098 if (conf->mtu)
3099 dev->mtu = conf->mtu;
3100
3101 vxlan->net = src_net;
3102 }
3103
3104 dst->remote_vni = conf->vni;
3105
3106 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
3107
3108 if (lowerdev) {
3109 dst->remote_ifindex = conf->remote_ifindex;
3110
3111 dev->gso_max_size = lowerdev->gso_max_size;
3112 dev->gso_max_segs = lowerdev->gso_max_segs;
3113
3114 needed_headroom = lowerdev->hard_header_len;
3115
3116 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
3117 VXLAN_HEADROOM);
3118 if (max_mtu < ETH_MIN_MTU)
3119 max_mtu = ETH_MIN_MTU;
3120
3121 if (!changelink && !conf->mtu)
3122 dev->mtu = max_mtu;
3123 }
3124
3125 if (dev->mtu > max_mtu)
3126 dev->mtu = max_mtu;
3127
3128 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
3129 needed_headroom += VXLAN6_HEADROOM;
3130 else
3131 needed_headroom += VXLAN_HEADROOM;
3132 dev->needed_headroom = needed_headroom;
3133
3134 memcpy(&vxlan->cfg, conf, sizeof(*conf));
3135}
3136
3137static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
3138 struct vxlan_config *conf, bool changelink,
3139 struct netlink_ext_ack *extack)
3140{
3141 struct vxlan_dev *vxlan = netdev_priv(dev);
3142 struct net_device *lowerdev;
3143 int ret;
3144
3145 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack);
3146 if (ret)
3147 return ret;
3148
3149 vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
3150
3151 return 0;
3152}
3153
3154static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3155 struct vxlan_config *conf,
3156 struct netlink_ext_ack *extack)
3157{
3158 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3159 struct vxlan_dev *vxlan = netdev_priv(dev);
3160 int err;
3161
3162 err = vxlan_dev_configure(net, dev, conf, false, extack);
3163 if (err)
3164 return err;
3165
3166 dev->ethtool_ops = &vxlan_ethtool_ops;
3167
3168 /* create an fdb entry for a valid default destination */
3169 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3170 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3171 &vxlan->default_dst.remote_ip,
3172 NUD_REACHABLE | NUD_PERMANENT,
3173 NLM_F_EXCL | NLM_F_CREATE,
3174 vxlan->cfg.dst_port,
3175 vxlan->default_dst.remote_vni,
3176 vxlan->default_dst.remote_vni,
3177 vxlan->default_dst.remote_ifindex,
3178 NTF_SELF);
3179 if (err)
3180 return err;
3181 }
3182
3183 err = register_netdevice(dev);
3184 if (err) {
3185 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
3186 return err;
3187 }
3188
3189 list_add(&vxlan->next, &vn->vxlan_list);
3190 return 0;
3191}
3192
3193static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3194 struct net_device *dev, struct vxlan_config *conf,
3195 bool changelink)
3196{
3197 struct vxlan_dev *vxlan = netdev_priv(dev);
3198
3199 memset(conf, 0, sizeof(*conf));
3200
3201 /* if changelink operation, start with old existing cfg */
3202 if (changelink)
3203 memcpy(conf, &vxlan->cfg, sizeof(*conf));
3204
3205 if (data[IFLA_VXLAN_ID]) {
3206 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3207
3208 if (changelink && (vni != conf->vni))
3209 return -EOPNOTSUPP;
3210 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3211 }
3212
3213 if (data[IFLA_VXLAN_GROUP]) {
3214 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET))
3215 return -EOPNOTSUPP;
3216
3217 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
3218 conf->remote_ip.sa.sa_family = AF_INET;
3219 } else if (data[IFLA_VXLAN_GROUP6]) {
3220 if (!IS_ENABLED(CONFIG_IPV6))
3221 return -EPFNOSUPPORT;
3222
3223 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6))
3224 return -EOPNOTSUPP;
3225
3226 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
3227 conf->remote_ip.sa.sa_family = AF_INET6;
3228 }
3229
3230 if (data[IFLA_VXLAN_LOCAL]) {
3231 if (changelink && (conf->saddr.sa.sa_family != AF_INET))
3232 return -EOPNOTSUPP;
3233
3234 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
3235 conf->saddr.sa.sa_family = AF_INET;
3236 } else if (data[IFLA_VXLAN_LOCAL6]) {
3237 if (!IS_ENABLED(CONFIG_IPV6))
3238 return -EPFNOSUPPORT;
3239
3240 if (changelink && (conf->saddr.sa.sa_family != AF_INET6))
3241 return -EOPNOTSUPP;
3242
3243 /* TODO: respect scope id */
3244 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
3245 conf->saddr.sa.sa_family = AF_INET6;
3246 }
3247
3248 if (data[IFLA_VXLAN_LINK])
3249 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
3250
3251 if (data[IFLA_VXLAN_TOS])
3252 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
3253
3254 if (data[IFLA_VXLAN_TTL])
3255 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
3256
3257 if (data[IFLA_VXLAN_LABEL])
3258 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
3259 IPV6_FLOWLABEL_MASK;
3260
3261 if (data[IFLA_VXLAN_LEARNING]) {
3262 if (nla_get_u8(data[IFLA_VXLAN_LEARNING]))
3263 conf->flags |= VXLAN_F_LEARN;
3264 else
3265 conf->flags &= ~VXLAN_F_LEARN;
3266 } else if (!changelink) {
3267 /* default to learn on a new device */
3268 conf->flags |= VXLAN_F_LEARN;
3269 }
3270
3271 if (data[IFLA_VXLAN_AGEING]) {
3272 if (changelink)
3273 return -EOPNOTSUPP;
3274 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
3275 }
3276
3277 if (data[IFLA_VXLAN_PROXY]) {
3278 if (changelink)
3279 return -EOPNOTSUPP;
3280 if (nla_get_u8(data[IFLA_VXLAN_PROXY]))
3281 conf->flags |= VXLAN_F_PROXY;
3282 }
3283
3284 if (data[IFLA_VXLAN_RSC]) {
3285 if (changelink)
3286 return -EOPNOTSUPP;
3287 if (nla_get_u8(data[IFLA_VXLAN_RSC]))
3288 conf->flags |= VXLAN_F_RSC;
3289 }
3290
3291 if (data[IFLA_VXLAN_L2MISS]) {
3292 if (changelink)
3293 return -EOPNOTSUPP;
3294 if (nla_get_u8(data[IFLA_VXLAN_L2MISS]))
3295 conf->flags |= VXLAN_F_L2MISS;
3296 }
3297
3298 if (data[IFLA_VXLAN_L3MISS]) {
3299 if (changelink)
3300 return -EOPNOTSUPP;
3301 if (nla_get_u8(data[IFLA_VXLAN_L3MISS]))
3302 conf->flags |= VXLAN_F_L3MISS;
3303 }
3304
3305 if (data[IFLA_VXLAN_LIMIT]) {
3306 if (changelink)
3307 return -EOPNOTSUPP;
3308 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3309 }
3310
3311 if (data[IFLA_VXLAN_COLLECT_METADATA]) {
3312 if (changelink)
3313 return -EOPNOTSUPP;
3314 if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
3315 conf->flags |= VXLAN_F_COLLECT_METADATA;
3316 }
3317
3318 if (data[IFLA_VXLAN_PORT_RANGE]) {
3319 if (!changelink) {
3320 const struct ifla_vxlan_port_range *p
3321 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3322 conf->port_min = ntohs(p->low);
3323 conf->port_max = ntohs(p->high);
3324 } else {
3325 return -EOPNOTSUPP;
3326 }
3327 }
3328
3329 if (data[IFLA_VXLAN_PORT]) {
3330 if (changelink)
3331 return -EOPNOTSUPP;
3332 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3333 }
3334
3335 if (data[IFLA_VXLAN_UDP_CSUM]) {
3336 if (changelink)
3337 return -EOPNOTSUPP;
3338 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3339 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3340 }
3341
3342 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
3343 if (changelink)
3344 return -EOPNOTSUPP;
3345 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
3346 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
3347 }
3348
3349 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
3350 if (changelink)
3351 return -EOPNOTSUPP;
3352 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
3353 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
3354 }
3355
3356 if (data[IFLA_VXLAN_REMCSUM_TX]) {
3357 if (changelink)
3358 return -EOPNOTSUPP;
3359 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
3360 conf->flags |= VXLAN_F_REMCSUM_TX;
3361 }
3362
3363 if (data[IFLA_VXLAN_REMCSUM_RX]) {
3364 if (changelink)
3365 return -EOPNOTSUPP;
3366 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
3367 conf->flags |= VXLAN_F_REMCSUM_RX;
3368 }
3369
3370 if (data[IFLA_VXLAN_GBP]) {
3371 if (changelink)
3372 return -EOPNOTSUPP;
3373 conf->flags |= VXLAN_F_GBP;
3374 }
3375
3376 if (data[IFLA_VXLAN_GPE]) {
3377 if (changelink)
3378 return -EOPNOTSUPP;
3379 conf->flags |= VXLAN_F_GPE;
3380 }
3381
3382 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
3383 if (changelink)
3384 return -EOPNOTSUPP;
3385 conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3386 }
3387
3388 if (tb[IFLA_MTU]) {
3389 if (changelink)
3390 return -EOPNOTSUPP;
3391 conf->mtu = nla_get_u32(tb[IFLA_MTU]);
3392 }
3393
3394 return 0;
3395}
3396
3397static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3398 struct nlattr *tb[], struct nlattr *data[],
3399 struct netlink_ext_ack *extack)
3400{
3401 struct vxlan_config conf;
3402 int err;
3403
3404 err = vxlan_nl2conf(tb, data, dev, &conf, false);
3405 if (err)
3406 return err;
3407
3408 return __vxlan_dev_create(src_net, dev, &conf, extack);
3409}
3410
3411static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3412 struct nlattr *data[],
3413 struct netlink_ext_ack *extack)
3414{
3415 struct vxlan_dev *vxlan = netdev_priv(dev);
3416 struct vxlan_rdst *dst = &vxlan->default_dst;
3417 struct vxlan_rdst old_dst;
3418 struct vxlan_config conf;
3419 int err;
3420
3421 err = vxlan_nl2conf(tb, data,
3422 dev, &conf, true);
3423 if (err)
3424 return err;
3425
3426 memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
3427
3428 err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
3429 if (err)
3430 return err;
3431
3432 /* handle default dst entry */
3433 if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
3434 spin_lock_bh(&vxlan->hash_lock);
3435 if (!vxlan_addr_any(&old_dst.remote_ip))
3436 __vxlan_fdb_delete(vxlan, all_zeros_mac,
3437 old_dst.remote_ip,
3438 vxlan->cfg.dst_port,
3439 old_dst.remote_vni,
3440 old_dst.remote_vni,
3441 old_dst.remote_ifindex, 0);
3442
3443 if (!vxlan_addr_any(&dst->remote_ip)) {
3444 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3445 &dst->remote_ip,
3446 NUD_REACHABLE | NUD_PERMANENT,
3447 NLM_F_CREATE | NLM_F_APPEND,
3448 vxlan->cfg.dst_port,
3449 dst->remote_vni,
3450 dst->remote_vni,
3451 dst->remote_ifindex,
3452 NTF_SELF);
3453 if (err) {
3454 spin_unlock_bh(&vxlan->hash_lock);
3455 return err;
3456 }
3457 }
3458 spin_unlock_bh(&vxlan->hash_lock);
3459 }
3460
3461 return 0;
3462}
3463
3464static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3465{
3466 struct vxlan_dev *vxlan = netdev_priv(dev);
3467
3468 vxlan_flush(vxlan, true);
3469
3470 gro_cells_destroy(&vxlan->gro_cells);
3471 list_del(&vxlan->next);
3472 unregister_netdevice_queue(dev, head);
3473}
3474
3475static size_t vxlan_get_size(const struct net_device *dev)
3476{
3477
3478 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3479 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3480 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3481 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3482 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3483 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3484 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3485 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3486 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3487 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3488 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3489 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3490 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3491 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3492 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3493 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3494 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3495 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3496 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3497 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3498 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3499 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3500 0;
3501}
3502
3503static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3504{
3505 const struct vxlan_dev *vxlan = netdev_priv(dev);
3506 const struct vxlan_rdst *dst = &vxlan->default_dst;
3507 struct ifla_vxlan_port_range ports = {
3508 .low = htons(vxlan->cfg.port_min),
3509 .high = htons(vxlan->cfg.port_max),
3510 };
3511
3512 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3513 goto nla_put_failure;
3514
3515 if (!vxlan_addr_any(&dst->remote_ip)) {
3516 if (dst->remote_ip.sa.sa_family == AF_INET) {
3517 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3518 dst->remote_ip.sin.sin_addr.s_addr))
3519 goto nla_put_failure;
3520#if IS_ENABLED(CONFIG_IPV6)
3521 } else {
3522 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3523 &dst->remote_ip.sin6.sin6_addr))
3524 goto nla_put_failure;
3525#endif
3526 }
3527 }
3528
3529 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3530 goto nla_put_failure;
3531
3532 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3533 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3534 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3535 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3536 goto nla_put_failure;
3537#if IS_ENABLED(CONFIG_IPV6)
3538 } else {
3539 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3540 &vxlan->cfg.saddr.sin6.sin6_addr))
3541 goto nla_put_failure;
3542#endif
3543 }
3544 }
3545
3546 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3547 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3548 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3549 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3550 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
3551 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3552 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
3553 nla_put_u8(skb, IFLA_VXLAN_RSC,
3554 !!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
3555 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3556 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
3557 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3558 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
3559 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3560 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
3561 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3562 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3563 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3564 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3565 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3566 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3567 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3568 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3569 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3570 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3571 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
3572 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3573 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
3574 goto nla_put_failure;
3575
3576 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3577 goto nla_put_failure;
3578
3579 if (vxlan->cfg.flags & VXLAN_F_GBP &&
3580 nla_put_flag(skb, IFLA_VXLAN_GBP))
3581 goto nla_put_failure;
3582
3583 if (vxlan->cfg.flags & VXLAN_F_GPE &&
3584 nla_put_flag(skb, IFLA_VXLAN_GPE))
3585 goto nla_put_failure;
3586
3587 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3588 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3589 goto nla_put_failure;
3590
3591 return 0;
3592
3593nla_put_failure:
3594 return -EMSGSIZE;
3595}
3596
3597static struct net *vxlan_get_link_net(const struct net_device *dev)
3598{
3599 struct vxlan_dev *vxlan = netdev_priv(dev);
3600
3601 return vxlan->net;
3602}
3603
3604static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3605 .kind = "vxlan",
3606 .maxtype = IFLA_VXLAN_MAX,
3607 .policy = vxlan_policy,
3608 .priv_size = sizeof(struct vxlan_dev),
3609 .setup = vxlan_setup,
3610 .validate = vxlan_validate,
3611 .newlink = vxlan_newlink,
3612 .changelink = vxlan_changelink,
3613 .dellink = vxlan_dellink,
3614 .get_size = vxlan_get_size,
3615 .fill_info = vxlan_fill_info,
3616 .get_link_net = vxlan_get_link_net,
3617};
3618
3619struct net_device *vxlan_dev_create(struct net *net, const char *name,
3620 u8 name_assign_type,
3621 struct vxlan_config *conf)
3622{
3623 struct nlattr *tb[IFLA_MAX + 1];
3624 struct net_device *dev;
3625 int err;
3626
3627 memset(&tb, 0, sizeof(tb));
3628
3629 dev = rtnl_create_link(net, name, name_assign_type,
3630 &vxlan_link_ops, tb);
3631 if (IS_ERR(dev))
3632 return dev;
3633
3634 err = __vxlan_dev_create(net, dev, conf, NULL);
3635 if (err < 0) {
3636 free_netdev(dev);
3637 return ERR_PTR(err);
3638 }
3639
3640 err = rtnl_configure_link(dev, NULL);
3641 if (err < 0) {
3642 LIST_HEAD(list_kill);
3643
3644 vxlan_dellink(dev, &list_kill);
3645 unregister_netdevice_many(&list_kill);
3646 return ERR_PTR(err);
3647 }
3648
3649 return dev;
3650}
3651EXPORT_SYMBOL_GPL(vxlan_dev_create);
3652
3653static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3654 struct net_device *dev)
3655{
3656 struct vxlan_dev *vxlan, *next;
3657 LIST_HEAD(list_kill);
3658
3659 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3660 struct vxlan_rdst *dst = &vxlan->default_dst;
3661
3662 /* In case we created vxlan device with carrier
3663 * and we loose the carrier due to module unload
3664 * we also need to remove vxlan device. In other
3665 * cases, it's not necessary and remote_ifindex
3666 * is 0 here, so no matches.
3667 */
3668 if (dst->remote_ifindex == dev->ifindex)
3669 vxlan_dellink(vxlan->dev, &list_kill);
3670 }
3671
3672 unregister_netdevice_many(&list_kill);
3673}
3674
3675static int vxlan_netdevice_event(struct notifier_block *unused,
3676 unsigned long event, void *ptr)
3677{
3678 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3679 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3680
3681 if (event == NETDEV_UNREGISTER) {
3682 vxlan_offload_rx_ports(dev, false);
3683 vxlan_handle_lowerdev_unregister(vn, dev);
3684 } else if (event == NETDEV_REGISTER) {
3685 vxlan_offload_rx_ports(dev, true);
3686 } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
3687 event == NETDEV_UDP_TUNNEL_DROP_INFO) {
3688 vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
3689 }
3690
3691 return NOTIFY_DONE;
3692}
3693
3694static struct notifier_block vxlan_notifier_block __read_mostly = {
3695 .notifier_call = vxlan_netdevice_event,
3696};
3697
3698static __net_init int vxlan_init_net(struct net *net)
3699{
3700 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3701 unsigned int h;
3702
3703 INIT_LIST_HEAD(&vn->vxlan_list);
3704 spin_lock_init(&vn->sock_lock);
3705
3706 for (h = 0; h < PORT_HASH_SIZE; ++h)
3707 INIT_HLIST_HEAD(&vn->sock_list[h]);
3708
3709 return 0;
3710}
3711
3712static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
3713{
3714 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3715 struct vxlan_dev *vxlan, *next;
3716 struct net_device *dev, *aux;
3717 unsigned int h;
3718
3719 for_each_netdev_safe(net, dev, aux)
3720 if (dev->rtnl_link_ops == &vxlan_link_ops)
3721 unregister_netdevice_queue(dev, head);
3722
3723 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3724 /* If vxlan->dev is in the same netns, it has already been added
3725 * to the list by the previous loop.
3726 */
3727 if (!net_eq(dev_net(vxlan->dev), net)) {
3728 gro_cells_destroy(&vxlan->gro_cells);
3729 unregister_netdevice_queue(vxlan->dev, head);
3730 }
3731 }
3732
3733 for (h = 0; h < PORT_HASH_SIZE; ++h)
3734 WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
3735}
3736
3737static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
3738{
3739 struct net *net;
3740 LIST_HEAD(list);
3741
3742 rtnl_lock();
3743 list_for_each_entry(net, net_list, exit_list)
3744 vxlan_destroy_tunnels(net, &list);
3745
3746 unregister_netdevice_many(&list);
3747 rtnl_unlock();
3748}
3749
3750static struct pernet_operations vxlan_net_ops = {
3751 .init = vxlan_init_net,
3752 .exit_batch = vxlan_exit_batch_net,
3753 .id = &vxlan_net_id,
3754 .size = sizeof(struct vxlan_net),
3755};
3756
3757static int __init vxlan_init_module(void)
3758{
3759 int rc;
3760
3761 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3762
3763 rc = register_pernet_subsys(&vxlan_net_ops);
3764 if (rc)
3765 goto out1;
3766
3767 rc = register_netdevice_notifier(&vxlan_notifier_block);
3768 if (rc)
3769 goto out2;
3770
3771 rc = rtnl_link_register(&vxlan_link_ops);
3772 if (rc)
3773 goto out3;
3774
3775 return 0;
3776out3:
3777 unregister_netdevice_notifier(&vxlan_notifier_block);
3778out2:
3779 unregister_pernet_subsys(&vxlan_net_ops);
3780out1:
3781 return rc;
3782}
3783late_initcall(vxlan_init_module);
3784
3785static void __exit vxlan_cleanup_module(void)
3786{
3787 rtnl_link_unregister(&vxlan_link_ops);
3788 unregister_netdevice_notifier(&vxlan_notifier_block);
3789 unregister_pernet_subsys(&vxlan_net_ops);
3790 /* rcu_barrier() is called by netns */
3791}
3792module_exit(vxlan_cleanup_module);
3793
3794MODULE_LICENSE("GPL");
3795MODULE_VERSION(VXLAN_VERSION);
3796MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3797MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3798MODULE_ALIAS_RTNL_LINK("vxlan");
1/*
2 * VXLAN: Virtual eXtensible Local Area Network
3 *
4 * Copyright (c) 2012-2013 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/slab.h>
17#include <linux/udp.h>
18#include <linux/igmp.h>
19#include <linux/if_ether.h>
20#include <linux/ethtool.h>
21#include <net/arp.h>
22#include <net/ndisc.h>
23#include <net/ip.h>
24#include <net/icmp.h>
25#include <net/rtnetlink.h>
26#include <net/inet_ecn.h>
27#include <net/net_namespace.h>
28#include <net/netns/generic.h>
29#include <net/vxlan.h>
30
31#if IS_ENABLED(CONFIG_IPV6)
32#include <net/ip6_tunnel.h>
33#include <net/ip6_checksum.h>
34#endif
35
36#define VXLAN_VERSION "0.1"
37
38#define PORT_HASH_BITS 8
39#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
40#define FDB_AGE_DEFAULT 300 /* 5 min */
41#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
42
43/* UDP port for VXLAN traffic.
44 * The IANA assigned port is 4789, but the Linux default is 8472
45 * for compatibility with early adopters.
46 */
47static unsigned short vxlan_port __read_mostly = 8472;
48module_param_named(udp_port, vxlan_port, ushort, 0444);
49MODULE_PARM_DESC(udp_port, "Destination UDP port");
50
51static bool log_ecn_error = true;
52module_param(log_ecn_error, bool, 0644);
53MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
54
55static unsigned int vxlan_net_id;
56static struct rtnl_link_ops vxlan_link_ops;
57
58static const u8 all_zeros_mac[ETH_ALEN + 2];
59
60static int vxlan_sock_add(struct vxlan_dev *vxlan);
61
62/* per-network namespace private data for this module */
63struct vxlan_net {
64 struct list_head vxlan_list;
65 struct hlist_head sock_list[PORT_HASH_SIZE];
66 spinlock_t sock_lock;
67};
68
69/* Forwarding table entry */
70struct vxlan_fdb {
71 struct hlist_node hlist; /* linked list of entries */
72 struct rcu_head rcu;
73 unsigned long updated; /* jiffies */
74 unsigned long used;
75 struct list_head remotes;
76 u8 eth_addr[ETH_ALEN];
77 u16 state; /* see ndm_state */
78 u8 flags; /* see ndm_flags */
79};
80
81/* salt for hash table */
82static u32 vxlan_salt __read_mostly;
83
84static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
85{
86 return vs->flags & VXLAN_F_COLLECT_METADATA ||
87 ip_tunnel_collect_metadata();
88}
89
90#if IS_ENABLED(CONFIG_IPV6)
91static inline
92bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
93{
94 if (a->sa.sa_family != b->sa.sa_family)
95 return false;
96 if (a->sa.sa_family == AF_INET6)
97 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
98 else
99 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
100}
101
102static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
103{
104 if (ipa->sa.sa_family == AF_INET6)
105 return ipv6_addr_any(&ipa->sin6.sin6_addr);
106 else
107 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
108}
109
110static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
111{
112 if (ipa->sa.sa_family == AF_INET6)
113 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
114 else
115 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
116}
117
118static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
119{
120 if (nla_len(nla) >= sizeof(struct in6_addr)) {
121 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
122 ip->sa.sa_family = AF_INET6;
123 return 0;
124 } else if (nla_len(nla) >= sizeof(__be32)) {
125 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
126 ip->sa.sa_family = AF_INET;
127 return 0;
128 } else {
129 return -EAFNOSUPPORT;
130 }
131}
132
133static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
134 const union vxlan_addr *ip)
135{
136 if (ip->sa.sa_family == AF_INET6)
137 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
138 else
139 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
140}
141
142#else /* !CONFIG_IPV6 */
143
144static inline
145bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
146{
147 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
148}
149
150static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
151{
152 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
153}
154
155static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
156{
157 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
158}
159
160static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
161{
162 if (nla_len(nla) >= sizeof(struct in6_addr)) {
163 return -EAFNOSUPPORT;
164 } else if (nla_len(nla) >= sizeof(__be32)) {
165 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
166 ip->sa.sa_family = AF_INET;
167 return 0;
168 } else {
169 return -EAFNOSUPPORT;
170 }
171}
172
173static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
174 const union vxlan_addr *ip)
175{
176 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
177}
178#endif
179
180/* Virtual Network hash table head */
181static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
182{
183 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
184}
185
186/* Socket hash table head */
187static inline struct hlist_head *vs_head(struct net *net, __be16 port)
188{
189 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
190
191 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
192}
193
194/* First remote destination for a forwarding entry.
195 * Guaranteed to be non-NULL because remotes are never deleted.
196 */
197static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
198{
199 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
200}
201
202static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
203{
204 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
205}
206
207/* Find VXLAN socket based on network namespace, address family and UDP port
208 * and enabled unshareable flags.
209 */
210static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
211 __be16 port, u32 flags)
212{
213 struct vxlan_sock *vs;
214
215 flags &= VXLAN_F_RCV_FLAGS;
216
217 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
218 if (inet_sk(vs->sock->sk)->inet_sport == port &&
219 vxlan_get_sk_family(vs) == family &&
220 vs->flags == flags)
221 return vs;
222 }
223 return NULL;
224}
225
226static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
227{
228 struct vxlan_dev *vxlan;
229
230 /* For flow based devices, map all packets to VNI 0 */
231 if (vs->flags & VXLAN_F_COLLECT_METADATA)
232 vni = 0;
233
234 hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
235 if (vxlan->default_dst.remote_vni == vni)
236 return vxlan;
237 }
238
239 return NULL;
240}
241
242/* Look up VNI in a per net namespace table */
243static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
244 sa_family_t family, __be16 port,
245 u32 flags)
246{
247 struct vxlan_sock *vs;
248
249 vs = vxlan_find_sock(net, family, port, flags);
250 if (!vs)
251 return NULL;
252
253 return vxlan_vs_find_vni(vs, vni);
254}
255
256/* Fill in neighbour message in skbuff. */
257static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
258 const struct vxlan_fdb *fdb,
259 u32 portid, u32 seq, int type, unsigned int flags,
260 const struct vxlan_rdst *rdst)
261{
262 unsigned long now = jiffies;
263 struct nda_cacheinfo ci;
264 struct nlmsghdr *nlh;
265 struct ndmsg *ndm;
266 bool send_ip, send_eth;
267
268 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
269 if (nlh == NULL)
270 return -EMSGSIZE;
271
272 ndm = nlmsg_data(nlh);
273 memset(ndm, 0, sizeof(*ndm));
274
275 send_eth = send_ip = true;
276
277 if (type == RTM_GETNEIGH) {
278 ndm->ndm_family = AF_INET;
279 send_ip = !vxlan_addr_any(&rdst->remote_ip);
280 send_eth = !is_zero_ether_addr(fdb->eth_addr);
281 } else
282 ndm->ndm_family = AF_BRIDGE;
283 ndm->ndm_state = fdb->state;
284 ndm->ndm_ifindex = vxlan->dev->ifindex;
285 ndm->ndm_flags = fdb->flags;
286 ndm->ndm_type = RTN_UNICAST;
287
288 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
289 nla_put_s32(skb, NDA_LINK_NETNSID,
290 peernet2id(dev_net(vxlan->dev), vxlan->net)))
291 goto nla_put_failure;
292
293 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
294 goto nla_put_failure;
295
296 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
297 goto nla_put_failure;
298
299 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
300 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
301 goto nla_put_failure;
302 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
303 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
304 goto nla_put_failure;
305 if (rdst->remote_ifindex &&
306 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
307 goto nla_put_failure;
308
309 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
310 ci.ndm_confirmed = 0;
311 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
312 ci.ndm_refcnt = 0;
313
314 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
315 goto nla_put_failure;
316
317 nlmsg_end(skb, nlh);
318 return 0;
319
320nla_put_failure:
321 nlmsg_cancel(skb, nlh);
322 return -EMSGSIZE;
323}
324
325static inline size_t vxlan_nlmsg_size(void)
326{
327 return NLMSG_ALIGN(sizeof(struct ndmsg))
328 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
329 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
330 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
331 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
332 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
333 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
334 + nla_total_size(sizeof(struct nda_cacheinfo));
335}
336
337static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
338 struct vxlan_rdst *rd, int type)
339{
340 struct net *net = dev_net(vxlan->dev);
341 struct sk_buff *skb;
342 int err = -ENOBUFS;
343
344 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
345 if (skb == NULL)
346 goto errout;
347
348 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
349 if (err < 0) {
350 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
351 WARN_ON(err == -EMSGSIZE);
352 kfree_skb(skb);
353 goto errout;
354 }
355
356 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
357 return;
358errout:
359 if (err < 0)
360 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
361}
362
363static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
364{
365 struct vxlan_dev *vxlan = netdev_priv(dev);
366 struct vxlan_fdb f = {
367 .state = NUD_STALE,
368 };
369 struct vxlan_rdst remote = {
370 .remote_ip = *ipa, /* goes to NDA_DST */
371 .remote_vni = cpu_to_be32(VXLAN_N_VID),
372 };
373
374 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
375}
376
377static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
378{
379 struct vxlan_fdb f = {
380 .state = NUD_STALE,
381 };
382 struct vxlan_rdst remote = { };
383
384 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
385
386 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
387}
388
389/* Hash Ethernet address */
390static u32 eth_hash(const unsigned char *addr)
391{
392 u64 value = get_unaligned((u64 *)addr);
393
394 /* only want 6 bytes */
395#ifdef __BIG_ENDIAN
396 value >>= 16;
397#else
398 value <<= 16;
399#endif
400 return hash_64(value, FDB_HASH_BITS);
401}
402
403/* Hash chain to use given mac address */
404static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
405 const u8 *mac)
406{
407 return &vxlan->fdb_head[eth_hash(mac)];
408}
409
410/* Look up Ethernet address in forwarding table */
411static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
412 const u8 *mac)
413{
414 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
415 struct vxlan_fdb *f;
416
417 hlist_for_each_entry_rcu(f, head, hlist) {
418 if (ether_addr_equal(mac, f->eth_addr))
419 return f;
420 }
421
422 return NULL;
423}
424
425static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
426 const u8 *mac)
427{
428 struct vxlan_fdb *f;
429
430 f = __vxlan_find_mac(vxlan, mac);
431 if (f)
432 f->used = jiffies;
433
434 return f;
435}
436
437/* caller should hold vxlan->hash_lock */
438static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
439 union vxlan_addr *ip, __be16 port,
440 __be32 vni, __u32 ifindex)
441{
442 struct vxlan_rdst *rd;
443
444 list_for_each_entry(rd, &f->remotes, list) {
445 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
446 rd->remote_port == port &&
447 rd->remote_vni == vni &&
448 rd->remote_ifindex == ifindex)
449 return rd;
450 }
451
452 return NULL;
453}
454
455/* Replace destination of unicast mac */
456static int vxlan_fdb_replace(struct vxlan_fdb *f,
457 union vxlan_addr *ip, __be16 port, __be32 vni,
458 __u32 ifindex)
459{
460 struct vxlan_rdst *rd;
461
462 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
463 if (rd)
464 return 0;
465
466 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
467 if (!rd)
468 return 0;
469
470 dst_cache_reset(&rd->dst_cache);
471 rd->remote_ip = *ip;
472 rd->remote_port = port;
473 rd->remote_vni = vni;
474 rd->remote_ifindex = ifindex;
475 return 1;
476}
477
478/* Add/update destinations for multicast */
479static int vxlan_fdb_append(struct vxlan_fdb *f,
480 union vxlan_addr *ip, __be16 port, __be32 vni,
481 __u32 ifindex, struct vxlan_rdst **rdp)
482{
483 struct vxlan_rdst *rd;
484
485 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
486 if (rd)
487 return 0;
488
489 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
490 if (rd == NULL)
491 return -ENOBUFS;
492
493 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
494 kfree(rd);
495 return -ENOBUFS;
496 }
497
498 rd->remote_ip = *ip;
499 rd->remote_port = port;
500 rd->remote_vni = vni;
501 rd->remote_ifindex = ifindex;
502
503 list_add_tail_rcu(&rd->list, &f->remotes);
504
505 *rdp = rd;
506 return 1;
507}
508
509static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
510 unsigned int off,
511 struct vxlanhdr *vh, size_t hdrlen,
512 __be32 vni_field,
513 struct gro_remcsum *grc,
514 bool nopartial)
515{
516 size_t start, offset;
517
518 if (skb->remcsum_offload)
519 return vh;
520
521 if (!NAPI_GRO_CB(skb)->csum_valid)
522 return NULL;
523
524 start = vxlan_rco_start(vni_field);
525 offset = start + vxlan_rco_offset(vni_field);
526
527 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
528 start, offset, grc, nopartial);
529
530 skb->remcsum_offload = 1;
531
532 return vh;
533}
534
535static struct sk_buff **vxlan_gro_receive(struct sock *sk,
536 struct sk_buff **head,
537 struct sk_buff *skb)
538{
539 struct sk_buff *p, **pp = NULL;
540 struct vxlanhdr *vh, *vh2;
541 unsigned int hlen, off_vx;
542 int flush = 1;
543 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
544 __be32 flags;
545 struct gro_remcsum grc;
546
547 skb_gro_remcsum_init(&grc);
548
549 off_vx = skb_gro_offset(skb);
550 hlen = off_vx + sizeof(*vh);
551 vh = skb_gro_header_fast(skb, off_vx);
552 if (skb_gro_header_hard(skb, hlen)) {
553 vh = skb_gro_header_slow(skb, hlen, off_vx);
554 if (unlikely(!vh))
555 goto out;
556 }
557
558 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
559
560 flags = vh->vx_flags;
561
562 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
563 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
564 vh->vx_vni, &grc,
565 !!(vs->flags &
566 VXLAN_F_REMCSUM_NOPARTIAL));
567
568 if (!vh)
569 goto out;
570 }
571
572 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
573
574 for (p = *head; p; p = p->next) {
575 if (!NAPI_GRO_CB(p)->same_flow)
576 continue;
577
578 vh2 = (struct vxlanhdr *)(p->data + off_vx);
579 if (vh->vx_flags != vh2->vx_flags ||
580 vh->vx_vni != vh2->vx_vni) {
581 NAPI_GRO_CB(p)->same_flow = 0;
582 continue;
583 }
584 }
585
586 pp = call_gro_receive(eth_gro_receive, head, skb);
587 flush = 0;
588
589out:
590 skb_gro_remcsum_cleanup(skb, &grc);
591 NAPI_GRO_CB(skb)->flush |= flush;
592
593 return pp;
594}
595
596static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
597{
598 /* Sets 'skb->inner_mac_header' since we are always called with
599 * 'skb->encapsulation' set.
600 */
601 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
602}
603
604/* Add new entry to forwarding table -- assumes lock held */
605static int vxlan_fdb_create(struct vxlan_dev *vxlan,
606 const u8 *mac, union vxlan_addr *ip,
607 __u16 state, __u16 flags,
608 __be16 port, __be32 vni, __u32 ifindex,
609 __u8 ndm_flags)
610{
611 struct vxlan_rdst *rd = NULL;
612 struct vxlan_fdb *f;
613 int notify = 0;
614 int rc;
615
616 f = __vxlan_find_mac(vxlan, mac);
617 if (f) {
618 if (flags & NLM_F_EXCL) {
619 netdev_dbg(vxlan->dev,
620 "lost race to create %pM\n", mac);
621 return -EEXIST;
622 }
623 if (f->state != state) {
624 f->state = state;
625 f->updated = jiffies;
626 notify = 1;
627 }
628 if (f->flags != ndm_flags) {
629 f->flags = ndm_flags;
630 f->updated = jiffies;
631 notify = 1;
632 }
633 if ((flags & NLM_F_REPLACE)) {
634 /* Only change unicasts */
635 if (!(is_multicast_ether_addr(f->eth_addr) ||
636 is_zero_ether_addr(f->eth_addr))) {
637 notify |= vxlan_fdb_replace(f, ip, port, vni,
638 ifindex);
639 } else
640 return -EOPNOTSUPP;
641 }
642 if ((flags & NLM_F_APPEND) &&
643 (is_multicast_ether_addr(f->eth_addr) ||
644 is_zero_ether_addr(f->eth_addr))) {
645 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
646
647 if (rc < 0)
648 return rc;
649 notify |= rc;
650 }
651 } else {
652 if (!(flags & NLM_F_CREATE))
653 return -ENOENT;
654
655 if (vxlan->cfg.addrmax &&
656 vxlan->addrcnt >= vxlan->cfg.addrmax)
657 return -ENOSPC;
658
659 /* Disallow replace to add a multicast entry */
660 if ((flags & NLM_F_REPLACE) &&
661 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
662 return -EOPNOTSUPP;
663
664 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
665 f = kmalloc(sizeof(*f), GFP_ATOMIC);
666 if (!f)
667 return -ENOMEM;
668
669 notify = 1;
670 f->state = state;
671 f->flags = ndm_flags;
672 f->updated = f->used = jiffies;
673 INIT_LIST_HEAD(&f->remotes);
674 memcpy(f->eth_addr, mac, ETH_ALEN);
675
676 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
677 if (rc < 0) {
678 kfree(f);
679 return rc;
680 }
681
682 ++vxlan->addrcnt;
683 hlist_add_head_rcu(&f->hlist,
684 vxlan_fdb_head(vxlan, mac));
685 }
686
687 if (notify) {
688 if (rd == NULL)
689 rd = first_remote_rtnl(f);
690 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
691 }
692
693 return 0;
694}
695
696static void vxlan_fdb_free(struct rcu_head *head)
697{
698 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
699 struct vxlan_rdst *rd, *nd;
700
701 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
702 dst_cache_destroy(&rd->dst_cache);
703 kfree(rd);
704 }
705 kfree(f);
706}
707
708static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
709{
710 netdev_dbg(vxlan->dev,
711 "delete %pM\n", f->eth_addr);
712
713 --vxlan->addrcnt;
714 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
715
716 hlist_del_rcu(&f->hlist);
717 call_rcu(&f->rcu, vxlan_fdb_free);
718}
719
720static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
721 union vxlan_addr *ip, __be16 *port, __be32 *vni,
722 u32 *ifindex)
723{
724 struct net *net = dev_net(vxlan->dev);
725 int err;
726
727 if (tb[NDA_DST]) {
728 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
729 if (err)
730 return err;
731 } else {
732 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
733 if (remote->sa.sa_family == AF_INET) {
734 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
735 ip->sa.sa_family = AF_INET;
736#if IS_ENABLED(CONFIG_IPV6)
737 } else {
738 ip->sin6.sin6_addr = in6addr_any;
739 ip->sa.sa_family = AF_INET6;
740#endif
741 }
742 }
743
744 if (tb[NDA_PORT]) {
745 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
746 return -EINVAL;
747 *port = nla_get_be16(tb[NDA_PORT]);
748 } else {
749 *port = vxlan->cfg.dst_port;
750 }
751
752 if (tb[NDA_VNI]) {
753 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
754 return -EINVAL;
755 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
756 } else {
757 *vni = vxlan->default_dst.remote_vni;
758 }
759
760 if (tb[NDA_IFINDEX]) {
761 struct net_device *tdev;
762
763 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
764 return -EINVAL;
765 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
766 tdev = __dev_get_by_index(net, *ifindex);
767 if (!tdev)
768 return -EADDRNOTAVAIL;
769 } else {
770 *ifindex = 0;
771 }
772
773 return 0;
774}
775
776/* Add static entry (via netlink) */
777static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
778 struct net_device *dev,
779 const unsigned char *addr, u16 vid, u16 flags)
780{
781 struct vxlan_dev *vxlan = netdev_priv(dev);
782 /* struct net *net = dev_net(vxlan->dev); */
783 union vxlan_addr ip;
784 __be16 port;
785 __be32 vni;
786 u32 ifindex;
787 int err;
788
789 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
790 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
791 ndm->ndm_state);
792 return -EINVAL;
793 }
794
795 if (tb[NDA_DST] == NULL)
796 return -EINVAL;
797
798 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
799 if (err)
800 return err;
801
802 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
803 return -EAFNOSUPPORT;
804
805 spin_lock_bh(&vxlan->hash_lock);
806 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
807 port, vni, ifindex, ndm->ndm_flags);
808 spin_unlock_bh(&vxlan->hash_lock);
809
810 return err;
811}
812
813/* Delete entry (via netlink) */
814static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
815 struct net_device *dev,
816 const unsigned char *addr, u16 vid)
817{
818 struct vxlan_dev *vxlan = netdev_priv(dev);
819 struct vxlan_fdb *f;
820 struct vxlan_rdst *rd = NULL;
821 union vxlan_addr ip;
822 __be16 port;
823 __be32 vni;
824 u32 ifindex;
825 int err;
826
827 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
828 if (err)
829 return err;
830
831 err = -ENOENT;
832
833 spin_lock_bh(&vxlan->hash_lock);
834 f = vxlan_find_mac(vxlan, addr);
835 if (!f)
836 goto out;
837
838 if (!vxlan_addr_any(&ip)) {
839 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
840 if (!rd)
841 goto out;
842 }
843
844 err = 0;
845
846 /* remove a destination if it's not the only one on the list,
847 * otherwise destroy the fdb entry
848 */
849 if (rd && !list_is_singular(&f->remotes)) {
850 list_del_rcu(&rd->list);
851 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
852 kfree_rcu(rd, rcu);
853 goto out;
854 }
855
856 vxlan_fdb_destroy(vxlan, f);
857
858out:
859 spin_unlock_bh(&vxlan->hash_lock);
860
861 return err;
862}
863
864/* Dump forwarding table */
865static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
866 struct net_device *dev,
867 struct net_device *filter_dev, int *idx)
868{
869 struct vxlan_dev *vxlan = netdev_priv(dev);
870 unsigned int h;
871 int err = 0;
872
873 for (h = 0; h < FDB_HASH_SIZE; ++h) {
874 struct vxlan_fdb *f;
875
876 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
877 struct vxlan_rdst *rd;
878
879 list_for_each_entry_rcu(rd, &f->remotes, list) {
880 if (*idx < cb->args[2])
881 goto skip;
882
883 err = vxlan_fdb_info(skb, vxlan, f,
884 NETLINK_CB(cb->skb).portid,
885 cb->nlh->nlmsg_seq,
886 RTM_NEWNEIGH,
887 NLM_F_MULTI, rd);
888 if (err < 0)
889 goto out;
890skip:
891 *idx += 1;
892 }
893 }
894 }
895out:
896 return err;
897}
898
899/* Watch incoming packets to learn mapping between Ethernet address
900 * and Tunnel endpoint.
901 * Return true if packet is bogus and should be dropped.
902 */
903static bool vxlan_snoop(struct net_device *dev,
904 union vxlan_addr *src_ip, const u8 *src_mac)
905{
906 struct vxlan_dev *vxlan = netdev_priv(dev);
907 struct vxlan_fdb *f;
908
909 f = vxlan_find_mac(vxlan, src_mac);
910 if (likely(f)) {
911 struct vxlan_rdst *rdst = first_remote_rcu(f);
912
913 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
914 return false;
915
916 /* Don't migrate static entries, drop packets */
917 if (f->state & NUD_NOARP)
918 return true;
919
920 if (net_ratelimit())
921 netdev_info(dev,
922 "%pM migrated from %pIS to %pIS\n",
923 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
924
925 rdst->remote_ip = *src_ip;
926 f->updated = jiffies;
927 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
928 } else {
929 /* learned new entry */
930 spin_lock(&vxlan->hash_lock);
931
932 /* close off race between vxlan_flush and incoming packets */
933 if (netif_running(dev))
934 vxlan_fdb_create(vxlan, src_mac, src_ip,
935 NUD_REACHABLE,
936 NLM_F_EXCL|NLM_F_CREATE,
937 vxlan->cfg.dst_port,
938 vxlan->default_dst.remote_vni,
939 0, NTF_SELF);
940 spin_unlock(&vxlan->hash_lock);
941 }
942
943 return false;
944}
945
946/* See if multicast group is already in use by other ID */
947static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
948{
949 struct vxlan_dev *vxlan;
950 struct vxlan_sock *sock4;
951#if IS_ENABLED(CONFIG_IPV6)
952 struct vxlan_sock *sock6;
953#endif
954 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
955
956 sock4 = rtnl_dereference(dev->vn4_sock);
957
958 /* The vxlan_sock is only used by dev, leaving group has
959 * no effect on other vxlan devices.
960 */
961 if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
962 return false;
963#if IS_ENABLED(CONFIG_IPV6)
964 sock6 = rtnl_dereference(dev->vn6_sock);
965 if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
966 return false;
967#endif
968
969 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
970 if (!netif_running(vxlan->dev) || vxlan == dev)
971 continue;
972
973 if (family == AF_INET &&
974 rtnl_dereference(vxlan->vn4_sock) != sock4)
975 continue;
976#if IS_ENABLED(CONFIG_IPV6)
977 if (family == AF_INET6 &&
978 rtnl_dereference(vxlan->vn6_sock) != sock6)
979 continue;
980#endif
981
982 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
983 &dev->default_dst.remote_ip))
984 continue;
985
986 if (vxlan->default_dst.remote_ifindex !=
987 dev->default_dst.remote_ifindex)
988 continue;
989
990 return true;
991 }
992
993 return false;
994}
995
996static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
997{
998 struct vxlan_net *vn;
999
1000 if (!vs)
1001 return false;
1002 if (!atomic_dec_and_test(&vs->refcnt))
1003 return false;
1004
1005 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1006 spin_lock(&vn->sock_lock);
1007 hlist_del_rcu(&vs->hlist);
1008 udp_tunnel_notify_del_rx_port(vs->sock,
1009 (vs->flags & VXLAN_F_GPE) ?
1010 UDP_TUNNEL_TYPE_VXLAN_GPE :
1011 UDP_TUNNEL_TYPE_VXLAN);
1012 spin_unlock(&vn->sock_lock);
1013
1014 return true;
1015}
1016
1017static void vxlan_sock_release(struct vxlan_dev *vxlan)
1018{
1019 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1020#if IS_ENABLED(CONFIG_IPV6)
1021 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1022
1023 rcu_assign_pointer(vxlan->vn6_sock, NULL);
1024#endif
1025
1026 rcu_assign_pointer(vxlan->vn4_sock, NULL);
1027 synchronize_net();
1028
1029 if (__vxlan_sock_release_prep(sock4)) {
1030 udp_tunnel_sock_release(sock4->sock);
1031 kfree(sock4);
1032 }
1033
1034#if IS_ENABLED(CONFIG_IPV6)
1035 if (__vxlan_sock_release_prep(sock6)) {
1036 udp_tunnel_sock_release(sock6->sock);
1037 kfree(sock6);
1038 }
1039#endif
1040}
1041
1042/* Update multicast group membership when first VNI on
1043 * multicast address is brought up
1044 */
1045static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1046{
1047 struct sock *sk;
1048 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1049 int ifindex = vxlan->default_dst.remote_ifindex;
1050 int ret = -EINVAL;
1051
1052 if (ip->sa.sa_family == AF_INET) {
1053 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1054 struct ip_mreqn mreq = {
1055 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1056 .imr_ifindex = ifindex,
1057 };
1058
1059 sk = sock4->sock->sk;
1060 lock_sock(sk);
1061 ret = ip_mc_join_group(sk, &mreq);
1062 release_sock(sk);
1063#if IS_ENABLED(CONFIG_IPV6)
1064 } else {
1065 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1066
1067 sk = sock6->sock->sk;
1068 lock_sock(sk);
1069 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1070 &ip->sin6.sin6_addr);
1071 release_sock(sk);
1072#endif
1073 }
1074
1075 return ret;
1076}
1077
1078/* Inverse of vxlan_igmp_join when last VNI is brought down */
1079static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1080{
1081 struct sock *sk;
1082 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1083 int ifindex = vxlan->default_dst.remote_ifindex;
1084 int ret = -EINVAL;
1085
1086 if (ip->sa.sa_family == AF_INET) {
1087 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1088 struct ip_mreqn mreq = {
1089 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1090 .imr_ifindex = ifindex,
1091 };
1092
1093 sk = sock4->sock->sk;
1094 lock_sock(sk);
1095 ret = ip_mc_leave_group(sk, &mreq);
1096 release_sock(sk);
1097#if IS_ENABLED(CONFIG_IPV6)
1098 } else {
1099 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1100
1101 sk = sock6->sock->sk;
1102 lock_sock(sk);
1103 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1104 &ip->sin6.sin6_addr);
1105 release_sock(sk);
1106#endif
1107 }
1108
1109 return ret;
1110}
1111
1112static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1113 struct sk_buff *skb, u32 vxflags)
1114{
1115 size_t start, offset;
1116
1117 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1118 goto out;
1119
1120 start = vxlan_rco_start(unparsed->vx_vni);
1121 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1122
1123 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1124 return false;
1125
1126 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1127 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1128out:
1129 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1130 unparsed->vx_vni &= VXLAN_VNI_MASK;
1131 return true;
1132}
1133
1134static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1135 struct sk_buff *skb, u32 vxflags,
1136 struct vxlan_metadata *md)
1137{
1138 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1139 struct metadata_dst *tun_dst;
1140
1141 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1142 goto out;
1143
1144 md->gbp = ntohs(gbp->policy_id);
1145
1146 tun_dst = (struct metadata_dst *)skb_dst(skb);
1147 if (tun_dst) {
1148 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1149 tun_dst->u.tun_info.options_len = sizeof(*md);
1150 }
1151 if (gbp->dont_learn)
1152 md->gbp |= VXLAN_GBP_DONT_LEARN;
1153
1154 if (gbp->policy_applied)
1155 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1156
1157 /* In flow-based mode, GBP is carried in dst_metadata */
1158 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1159 skb->mark = md->gbp;
1160out:
1161 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1162}
1163
1164static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1165 __be16 *protocol,
1166 struct sk_buff *skb, u32 vxflags)
1167{
1168 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1169
1170 /* Need to have Next Protocol set for interfaces in GPE mode. */
1171 if (!gpe->np_applied)
1172 return false;
1173 /* "The initial version is 0. If a receiver does not support the
1174 * version indicated it MUST drop the packet.
1175 */
1176 if (gpe->version != 0)
1177 return false;
1178 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1179 * processing MUST occur." However, we don't implement OAM
1180 * processing, thus drop the packet.
1181 */
1182 if (gpe->oam_flag)
1183 return false;
1184
1185 switch (gpe->next_protocol) {
1186 case VXLAN_GPE_NP_IPV4:
1187 *protocol = htons(ETH_P_IP);
1188 break;
1189 case VXLAN_GPE_NP_IPV6:
1190 *protocol = htons(ETH_P_IPV6);
1191 break;
1192 case VXLAN_GPE_NP_ETHERNET:
1193 *protocol = htons(ETH_P_TEB);
1194 break;
1195 default:
1196 return false;
1197 }
1198
1199 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1200 return true;
1201}
1202
1203static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1204 struct vxlan_sock *vs,
1205 struct sk_buff *skb)
1206{
1207 union vxlan_addr saddr;
1208
1209 skb_reset_mac_header(skb);
1210 skb->protocol = eth_type_trans(skb, vxlan->dev);
1211 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1212
1213 /* Ignore packet loops (and multicast echo) */
1214 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1215 return false;
1216
1217 /* Get address from the outer IP header */
1218 if (vxlan_get_sk_family(vs) == AF_INET) {
1219 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1220 saddr.sa.sa_family = AF_INET;
1221#if IS_ENABLED(CONFIG_IPV6)
1222 } else {
1223 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1224 saddr.sa.sa_family = AF_INET6;
1225#endif
1226 }
1227
1228 if ((vxlan->flags & VXLAN_F_LEARN) &&
1229 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1230 return false;
1231
1232 return true;
1233}
1234
1235static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1236 struct sk_buff *skb)
1237{
1238 int err = 0;
1239
1240 if (vxlan_get_sk_family(vs) == AF_INET)
1241 err = IP_ECN_decapsulate(oiph, skb);
1242#if IS_ENABLED(CONFIG_IPV6)
1243 else
1244 err = IP6_ECN_decapsulate(oiph, skb);
1245#endif
1246
1247 if (unlikely(err) && log_ecn_error) {
1248 if (vxlan_get_sk_family(vs) == AF_INET)
1249 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1250 &((struct iphdr *)oiph)->saddr,
1251 ((struct iphdr *)oiph)->tos);
1252 else
1253 net_info_ratelimited("non-ECT from %pI6\n",
1254 &((struct ipv6hdr *)oiph)->saddr);
1255 }
1256 return err <= 1;
1257}
1258
1259/* Callback from net/ipv4/udp.c to receive packets */
1260static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1261{
1262 struct pcpu_sw_netstats *stats;
1263 struct vxlan_dev *vxlan;
1264 struct vxlan_sock *vs;
1265 struct vxlanhdr unparsed;
1266 struct vxlan_metadata _md;
1267 struct vxlan_metadata *md = &_md;
1268 __be16 protocol = htons(ETH_P_TEB);
1269 bool raw_proto = false;
1270 void *oiph;
1271
1272 /* Need UDP and VXLAN header to be present */
1273 if (!pskb_may_pull(skb, VXLAN_HLEN))
1274 goto drop;
1275
1276 unparsed = *vxlan_hdr(skb);
1277 /* VNI flag always required to be set */
1278 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1279 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1280 ntohl(vxlan_hdr(skb)->vx_flags),
1281 ntohl(vxlan_hdr(skb)->vx_vni));
1282 /* Return non vxlan pkt */
1283 goto drop;
1284 }
1285 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1286 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1287
1288 vs = rcu_dereference_sk_user_data(sk);
1289 if (!vs)
1290 goto drop;
1291
1292 vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
1293 if (!vxlan)
1294 goto drop;
1295
1296 /* For backwards compatibility, only allow reserved fields to be
1297 * used by VXLAN extensions if explicitly requested.
1298 */
1299 if (vs->flags & VXLAN_F_GPE) {
1300 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1301 goto drop;
1302 raw_proto = true;
1303 }
1304
1305 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1306 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1307 goto drop;
1308
1309 if (vxlan_collect_metadata(vs)) {
1310 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1311 struct metadata_dst *tun_dst;
1312
1313 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1314 key32_to_tunnel_id(vni), sizeof(*md));
1315
1316 if (!tun_dst)
1317 goto drop;
1318
1319 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1320
1321 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1322 } else {
1323 memset(md, 0, sizeof(*md));
1324 }
1325
1326 if (vs->flags & VXLAN_F_REMCSUM_RX)
1327 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1328 goto drop;
1329 if (vs->flags & VXLAN_F_GBP)
1330 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1331 /* Note that GBP and GPE can never be active together. This is
1332 * ensured in vxlan_dev_configure.
1333 */
1334
1335 if (unparsed.vx_flags || unparsed.vx_vni) {
1336 /* If there are any unprocessed flags remaining treat
1337 * this as a malformed packet. This behavior diverges from
1338 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1339 * in reserved fields are to be ignored. The approach here
1340 * maintains compatibility with previous stack code, and also
1341 * is more robust and provides a little more security in
1342 * adding extensions to VXLAN.
1343 */
1344 goto drop;
1345 }
1346
1347 if (!raw_proto) {
1348 if (!vxlan_set_mac(vxlan, vs, skb))
1349 goto drop;
1350 } else {
1351 skb_reset_mac_header(skb);
1352 skb->dev = vxlan->dev;
1353 skb->pkt_type = PACKET_HOST;
1354 }
1355
1356 oiph = skb_network_header(skb);
1357 skb_reset_network_header(skb);
1358
1359 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1360 ++vxlan->dev->stats.rx_frame_errors;
1361 ++vxlan->dev->stats.rx_errors;
1362 goto drop;
1363 }
1364
1365 stats = this_cpu_ptr(vxlan->dev->tstats);
1366 u64_stats_update_begin(&stats->syncp);
1367 stats->rx_packets++;
1368 stats->rx_bytes += skb->len;
1369 u64_stats_update_end(&stats->syncp);
1370
1371 gro_cells_receive(&vxlan->gro_cells, skb);
1372 return 0;
1373
1374drop:
1375 /* Consume bad packet */
1376 kfree_skb(skb);
1377 return 0;
1378}
1379
1380static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1381{
1382 struct vxlan_dev *vxlan = netdev_priv(dev);
1383 struct arphdr *parp;
1384 u8 *arpptr, *sha;
1385 __be32 sip, tip;
1386 struct neighbour *n;
1387
1388 if (dev->flags & IFF_NOARP)
1389 goto out;
1390
1391 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1392 dev->stats.tx_dropped++;
1393 goto out;
1394 }
1395 parp = arp_hdr(skb);
1396
1397 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1398 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1399 parp->ar_pro != htons(ETH_P_IP) ||
1400 parp->ar_op != htons(ARPOP_REQUEST) ||
1401 parp->ar_hln != dev->addr_len ||
1402 parp->ar_pln != 4)
1403 goto out;
1404 arpptr = (u8 *)parp + sizeof(struct arphdr);
1405 sha = arpptr;
1406 arpptr += dev->addr_len; /* sha */
1407 memcpy(&sip, arpptr, sizeof(sip));
1408 arpptr += sizeof(sip);
1409 arpptr += dev->addr_len; /* tha */
1410 memcpy(&tip, arpptr, sizeof(tip));
1411
1412 if (ipv4_is_loopback(tip) ||
1413 ipv4_is_multicast(tip))
1414 goto out;
1415
1416 n = neigh_lookup(&arp_tbl, &tip, dev);
1417
1418 if (n) {
1419 struct vxlan_fdb *f;
1420 struct sk_buff *reply;
1421
1422 if (!(n->nud_state & NUD_CONNECTED)) {
1423 neigh_release(n);
1424 goto out;
1425 }
1426
1427 f = vxlan_find_mac(vxlan, n->ha);
1428 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1429 /* bridge-local neighbor */
1430 neigh_release(n);
1431 goto out;
1432 }
1433
1434 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1435 n->ha, sha);
1436
1437 neigh_release(n);
1438
1439 if (reply == NULL)
1440 goto out;
1441
1442 skb_reset_mac_header(reply);
1443 __skb_pull(reply, skb_network_offset(reply));
1444 reply->ip_summed = CHECKSUM_UNNECESSARY;
1445 reply->pkt_type = PACKET_HOST;
1446
1447 if (netif_rx_ni(reply) == NET_RX_DROP)
1448 dev->stats.rx_dropped++;
1449 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1450 union vxlan_addr ipa = {
1451 .sin.sin_addr.s_addr = tip,
1452 .sin.sin_family = AF_INET,
1453 };
1454
1455 vxlan_ip_miss(dev, &ipa);
1456 }
1457out:
1458 consume_skb(skb);
1459 return NETDEV_TX_OK;
1460}
1461
1462#if IS_ENABLED(CONFIG_IPV6)
1463static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1464 struct neighbour *n, bool isrouter)
1465{
1466 struct net_device *dev = request->dev;
1467 struct sk_buff *reply;
1468 struct nd_msg *ns, *na;
1469 struct ipv6hdr *pip6;
1470 u8 *daddr;
1471 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1472 int ns_olen;
1473 int i, len;
1474
1475 if (dev == NULL)
1476 return NULL;
1477
1478 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1479 sizeof(*na) + na_olen + dev->needed_tailroom;
1480 reply = alloc_skb(len, GFP_ATOMIC);
1481 if (reply == NULL)
1482 return NULL;
1483
1484 reply->protocol = htons(ETH_P_IPV6);
1485 reply->dev = dev;
1486 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1487 skb_push(reply, sizeof(struct ethhdr));
1488 skb_reset_mac_header(reply);
1489
1490 ns = (struct nd_msg *)skb_transport_header(request);
1491
1492 daddr = eth_hdr(request)->h_source;
1493 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1494 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1495 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1496 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1497 break;
1498 }
1499 }
1500
1501 /* Ethernet header */
1502 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1503 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1504 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1505 reply->protocol = htons(ETH_P_IPV6);
1506
1507 skb_pull(reply, sizeof(struct ethhdr));
1508 skb_reset_network_header(reply);
1509 skb_put(reply, sizeof(struct ipv6hdr));
1510
1511 /* IPv6 header */
1512
1513 pip6 = ipv6_hdr(reply);
1514 memset(pip6, 0, sizeof(struct ipv6hdr));
1515 pip6->version = 6;
1516 pip6->priority = ipv6_hdr(request)->priority;
1517 pip6->nexthdr = IPPROTO_ICMPV6;
1518 pip6->hop_limit = 255;
1519 pip6->daddr = ipv6_hdr(request)->saddr;
1520 pip6->saddr = *(struct in6_addr *)n->primary_key;
1521
1522 skb_pull(reply, sizeof(struct ipv6hdr));
1523 skb_reset_transport_header(reply);
1524
1525 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1526
1527 /* Neighbor Advertisement */
1528 memset(na, 0, sizeof(*na)+na_olen);
1529 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1530 na->icmph.icmp6_router = isrouter;
1531 na->icmph.icmp6_override = 1;
1532 na->icmph.icmp6_solicited = 1;
1533 na->target = ns->target;
1534 ether_addr_copy(&na->opt[2], n->ha);
1535 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1536 na->opt[1] = na_olen >> 3;
1537
1538 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1539 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1540 csum_partial(na, sizeof(*na)+na_olen, 0));
1541
1542 pip6->payload_len = htons(sizeof(*na)+na_olen);
1543
1544 skb_push(reply, sizeof(struct ipv6hdr));
1545
1546 reply->ip_summed = CHECKSUM_UNNECESSARY;
1547
1548 return reply;
1549}
1550
1551static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1552{
1553 struct vxlan_dev *vxlan = netdev_priv(dev);
1554 struct nd_msg *msg;
1555 const struct ipv6hdr *iphdr;
1556 const struct in6_addr *saddr, *daddr;
1557 struct neighbour *n;
1558 struct inet6_dev *in6_dev;
1559
1560 in6_dev = __in6_dev_get(dev);
1561 if (!in6_dev)
1562 goto out;
1563
1564 iphdr = ipv6_hdr(skb);
1565 saddr = &iphdr->saddr;
1566 daddr = &iphdr->daddr;
1567
1568 msg = (struct nd_msg *)skb_transport_header(skb);
1569 if (msg->icmph.icmp6_code != 0 ||
1570 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1571 goto out;
1572
1573 if (ipv6_addr_loopback(daddr) ||
1574 ipv6_addr_is_multicast(&msg->target))
1575 goto out;
1576
1577 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1578
1579 if (n) {
1580 struct vxlan_fdb *f;
1581 struct sk_buff *reply;
1582
1583 if (!(n->nud_state & NUD_CONNECTED)) {
1584 neigh_release(n);
1585 goto out;
1586 }
1587
1588 f = vxlan_find_mac(vxlan, n->ha);
1589 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1590 /* bridge-local neighbor */
1591 neigh_release(n);
1592 goto out;
1593 }
1594
1595 reply = vxlan_na_create(skb, n,
1596 !!(f ? f->flags & NTF_ROUTER : 0));
1597
1598 neigh_release(n);
1599
1600 if (reply == NULL)
1601 goto out;
1602
1603 if (netif_rx_ni(reply) == NET_RX_DROP)
1604 dev->stats.rx_dropped++;
1605
1606 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1607 union vxlan_addr ipa = {
1608 .sin6.sin6_addr = msg->target,
1609 .sin6.sin6_family = AF_INET6,
1610 };
1611
1612 vxlan_ip_miss(dev, &ipa);
1613 }
1614
1615out:
1616 consume_skb(skb);
1617 return NETDEV_TX_OK;
1618}
1619#endif
1620
1621static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1622{
1623 struct vxlan_dev *vxlan = netdev_priv(dev);
1624 struct neighbour *n;
1625
1626 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1627 return false;
1628
1629 n = NULL;
1630 switch (ntohs(eth_hdr(skb)->h_proto)) {
1631 case ETH_P_IP:
1632 {
1633 struct iphdr *pip;
1634
1635 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1636 return false;
1637 pip = ip_hdr(skb);
1638 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1639 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1640 union vxlan_addr ipa = {
1641 .sin.sin_addr.s_addr = pip->daddr,
1642 .sin.sin_family = AF_INET,
1643 };
1644
1645 vxlan_ip_miss(dev, &ipa);
1646 return false;
1647 }
1648
1649 break;
1650 }
1651#if IS_ENABLED(CONFIG_IPV6)
1652 case ETH_P_IPV6:
1653 {
1654 struct ipv6hdr *pip6;
1655
1656 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1657 return false;
1658 pip6 = ipv6_hdr(skb);
1659 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1660 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1661 union vxlan_addr ipa = {
1662 .sin6.sin6_addr = pip6->daddr,
1663 .sin6.sin6_family = AF_INET6,
1664 };
1665
1666 vxlan_ip_miss(dev, &ipa);
1667 return false;
1668 }
1669
1670 break;
1671 }
1672#endif
1673 default:
1674 return false;
1675 }
1676
1677 if (n) {
1678 bool diff;
1679
1680 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1681 if (diff) {
1682 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1683 dev->addr_len);
1684 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1685 }
1686 neigh_release(n);
1687 return diff;
1688 }
1689
1690 return false;
1691}
1692
1693static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1694 struct vxlan_metadata *md)
1695{
1696 struct vxlanhdr_gbp *gbp;
1697
1698 if (!md->gbp)
1699 return;
1700
1701 gbp = (struct vxlanhdr_gbp *)vxh;
1702 vxh->vx_flags |= VXLAN_HF_GBP;
1703
1704 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1705 gbp->dont_learn = 1;
1706
1707 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1708 gbp->policy_applied = 1;
1709
1710 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1711}
1712
1713static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1714 __be16 protocol)
1715{
1716 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1717
1718 gpe->np_applied = 1;
1719
1720 switch (protocol) {
1721 case htons(ETH_P_IP):
1722 gpe->next_protocol = VXLAN_GPE_NP_IPV4;
1723 return 0;
1724 case htons(ETH_P_IPV6):
1725 gpe->next_protocol = VXLAN_GPE_NP_IPV6;
1726 return 0;
1727 case htons(ETH_P_TEB):
1728 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
1729 return 0;
1730 }
1731 return -EPFNOSUPPORT;
1732}
1733
1734static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1735 int iphdr_len, __be32 vni,
1736 struct vxlan_metadata *md, u32 vxflags,
1737 bool udp_sum)
1738{
1739 struct vxlanhdr *vxh;
1740 int min_headroom;
1741 int err;
1742 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1743 __be16 inner_protocol = htons(ETH_P_TEB);
1744
1745 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1746 skb->ip_summed == CHECKSUM_PARTIAL) {
1747 int csum_start = skb_checksum_start_offset(skb);
1748
1749 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1750 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1751 (skb->csum_offset == offsetof(struct udphdr, check) ||
1752 skb->csum_offset == offsetof(struct tcphdr, check)))
1753 type |= SKB_GSO_TUNNEL_REMCSUM;
1754 }
1755
1756 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1757 + VXLAN_HLEN + iphdr_len;
1758
1759 /* Need space for new headers (invalidates iph ptr) */
1760 err = skb_cow_head(skb, min_headroom);
1761 if (unlikely(err))
1762 return err;
1763
1764 err = iptunnel_handle_offloads(skb, type);
1765 if (err)
1766 return err;
1767
1768 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1769 vxh->vx_flags = VXLAN_HF_VNI;
1770 vxh->vx_vni = vxlan_vni_field(vni);
1771
1772 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1773 unsigned int start;
1774
1775 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1776 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1777 vxh->vx_flags |= VXLAN_HF_RCO;
1778
1779 if (!skb_is_gso(skb)) {
1780 skb->ip_summed = CHECKSUM_NONE;
1781 skb->encapsulation = 0;
1782 }
1783 }
1784
1785 if (vxflags & VXLAN_F_GBP)
1786 vxlan_build_gbp_hdr(vxh, vxflags, md);
1787 if (vxflags & VXLAN_F_GPE) {
1788 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
1789 if (err < 0)
1790 return err;
1791 inner_protocol = skb->protocol;
1792 }
1793
1794 skb_set_inner_protocol(skb, inner_protocol);
1795 return 0;
1796}
1797
1798static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
1799 struct vxlan_sock *sock4,
1800 struct sk_buff *skb, int oif, u8 tos,
1801 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
1802 struct dst_cache *dst_cache,
1803 const struct ip_tunnel_info *info)
1804{
1805 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1806 struct rtable *rt = NULL;
1807 struct flowi4 fl4;
1808
1809 if (!sock4)
1810 return ERR_PTR(-EIO);
1811
1812 if (tos && !info)
1813 use_cache = false;
1814 if (use_cache) {
1815 rt = dst_cache_get_ip4(dst_cache, saddr);
1816 if (rt)
1817 return rt;
1818 }
1819
1820 memset(&fl4, 0, sizeof(fl4));
1821 fl4.flowi4_oif = oif;
1822 fl4.flowi4_tos = RT_TOS(tos);
1823 fl4.flowi4_mark = skb->mark;
1824 fl4.flowi4_proto = IPPROTO_UDP;
1825 fl4.daddr = daddr;
1826 fl4.saddr = *saddr;
1827 fl4.fl4_dport = dport;
1828 fl4.fl4_sport = sport;
1829
1830 rt = ip_route_output_key(vxlan->net, &fl4);
1831 if (likely(!IS_ERR(rt))) {
1832 if (rt->dst.dev == dev) {
1833 netdev_dbg(dev, "circular route to %pI4\n", &daddr);
1834 ip_rt_put(rt);
1835 return ERR_PTR(-ELOOP);
1836 }
1837
1838 *saddr = fl4.saddr;
1839 if (use_cache)
1840 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
1841 } else {
1842 netdev_dbg(dev, "no route to %pI4\n", &daddr);
1843 return ERR_PTR(-ENETUNREACH);
1844 }
1845 return rt;
1846}
1847
1848#if IS_ENABLED(CONFIG_IPV6)
1849static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1850 struct net_device *dev,
1851 struct vxlan_sock *sock6,
1852 struct sk_buff *skb, int oif, u8 tos,
1853 __be32 label,
1854 const struct in6_addr *daddr,
1855 struct in6_addr *saddr,
1856 __be16 dport, __be16 sport,
1857 struct dst_cache *dst_cache,
1858 const struct ip_tunnel_info *info)
1859{
1860 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1861 struct dst_entry *ndst;
1862 struct flowi6 fl6;
1863 int err;
1864
1865 if (!sock6)
1866 return ERR_PTR(-EIO);
1867
1868 if (tos && !info)
1869 use_cache = false;
1870 if (use_cache) {
1871 ndst = dst_cache_get_ip6(dst_cache, saddr);
1872 if (ndst)
1873 return ndst;
1874 }
1875
1876 memset(&fl6, 0, sizeof(fl6));
1877 fl6.flowi6_oif = oif;
1878 fl6.daddr = *daddr;
1879 fl6.saddr = *saddr;
1880 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1881 fl6.flowi6_mark = skb->mark;
1882 fl6.flowi6_proto = IPPROTO_UDP;
1883 fl6.fl6_dport = dport;
1884 fl6.fl6_sport = sport;
1885
1886 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1887 sock6->sock->sk,
1888 &ndst, &fl6);
1889 if (unlikely(err < 0)) {
1890 netdev_dbg(dev, "no route to %pI6\n", daddr);
1891 return ERR_PTR(-ENETUNREACH);
1892 }
1893
1894 if (unlikely(ndst->dev == dev)) {
1895 netdev_dbg(dev, "circular route to %pI6\n", daddr);
1896 dst_release(ndst);
1897 return ERR_PTR(-ELOOP);
1898 }
1899
1900 *saddr = fl6.saddr;
1901 if (use_cache)
1902 dst_cache_set_ip6(dst_cache, ndst, saddr);
1903 return ndst;
1904}
1905#endif
1906
1907/* Bypass encapsulation if the destination is local */
1908static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1909 struct vxlan_dev *dst_vxlan)
1910{
1911 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1912 union vxlan_addr loopback;
1913 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1914 struct net_device *dev = skb->dev;
1915 int len = skb->len;
1916
1917 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1918 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1919 skb->pkt_type = PACKET_HOST;
1920 skb->encapsulation = 0;
1921 skb->dev = dst_vxlan->dev;
1922 __skb_pull(skb, skb_network_offset(skb));
1923
1924 if (remote_ip->sa.sa_family == AF_INET) {
1925 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1926 loopback.sa.sa_family = AF_INET;
1927#if IS_ENABLED(CONFIG_IPV6)
1928 } else {
1929 loopback.sin6.sin6_addr = in6addr_loopback;
1930 loopback.sa.sa_family = AF_INET6;
1931#endif
1932 }
1933
1934 if (dst_vxlan->flags & VXLAN_F_LEARN)
1935 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1936
1937 u64_stats_update_begin(&tx_stats->syncp);
1938 tx_stats->tx_packets++;
1939 tx_stats->tx_bytes += len;
1940 u64_stats_update_end(&tx_stats->syncp);
1941
1942 if (netif_rx(skb) == NET_RX_SUCCESS) {
1943 u64_stats_update_begin(&rx_stats->syncp);
1944 rx_stats->rx_packets++;
1945 rx_stats->rx_bytes += len;
1946 u64_stats_update_end(&rx_stats->syncp);
1947 } else {
1948 dev->stats.rx_dropped++;
1949 }
1950}
1951
1952static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
1953 struct vxlan_dev *vxlan, union vxlan_addr *daddr,
1954 __be32 dst_port, __be32 vni, struct dst_entry *dst,
1955 u32 rt_flags)
1956{
1957#if IS_ENABLED(CONFIG_IPV6)
1958 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
1959 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
1960 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
1961 */
1962 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
1963#endif
1964 /* Bypass encapsulation if the destination is local */
1965 if (rt_flags & RTCF_LOCAL &&
1966 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1967 struct vxlan_dev *dst_vxlan;
1968
1969 dst_release(dst);
1970 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1971 daddr->sa.sa_family, dst_port,
1972 vxlan->flags);
1973 if (!dst_vxlan) {
1974 dev->stats.tx_errors++;
1975 kfree_skb(skb);
1976
1977 return -ENOENT;
1978 }
1979 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1980 return 1;
1981 }
1982
1983 return 0;
1984}
1985
1986static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1987 struct vxlan_rdst *rdst, bool did_rsc)
1988{
1989 struct dst_cache *dst_cache;
1990 struct ip_tunnel_info *info;
1991 struct vxlan_dev *vxlan = netdev_priv(dev);
1992 const struct iphdr *old_iph = ip_hdr(skb);
1993 union vxlan_addr *dst;
1994 union vxlan_addr remote_ip, local_ip;
1995 struct vxlan_metadata _md;
1996 struct vxlan_metadata *md = &_md;
1997 __be16 src_port = 0, dst_port;
1998 struct dst_entry *ndst = NULL;
1999 __be32 vni, label;
2000 __u8 tos, ttl;
2001 int err;
2002 u32 flags = vxlan->flags;
2003 bool udp_sum = false;
2004 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
2005
2006 info = skb_tunnel_info(skb);
2007
2008 if (rdst) {
2009 dst = &rdst->remote_ip;
2010 if (vxlan_addr_any(dst)) {
2011 if (did_rsc) {
2012 /* short-circuited back to local bridge */
2013 vxlan_encap_bypass(skb, vxlan, vxlan);
2014 return;
2015 }
2016 goto drop;
2017 }
2018
2019 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
2020 vni = rdst->remote_vni;
2021 local_ip = vxlan->cfg.saddr;
2022 dst_cache = &rdst->dst_cache;
2023 md->gbp = skb->mark;
2024 ttl = vxlan->cfg.ttl;
2025 if (!ttl && vxlan_addr_multicast(dst))
2026 ttl = 1;
2027
2028 tos = vxlan->cfg.tos;
2029 if (tos == 1)
2030 tos = ip_tunnel_get_dsfield(old_iph, skb);
2031
2032 if (dst->sa.sa_family == AF_INET)
2033 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2034 else
2035 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2036 label = vxlan->cfg.label;
2037 } else {
2038 if (!info) {
2039 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2040 dev->name);
2041 goto drop;
2042 }
2043 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
2044 if (remote_ip.sa.sa_family == AF_INET) {
2045 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
2046 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
2047 } else {
2048 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
2049 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2050 }
2051 dst = &remote_ip;
2052 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
2053 vni = tunnel_id_to_key32(info->key.tun_id);
2054 dst_cache = &info->dst_cache;
2055 if (info->options_len)
2056 md = ip_tunnel_info_opts(info);
2057 ttl = info->key.ttl;
2058 tos = info->key.tos;
2059 label = info->key.label;
2060 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2061 }
2062 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2063 vxlan->cfg.port_max, true);
2064
2065 rcu_read_lock();
2066 if (dst->sa.sa_family == AF_INET) {
2067 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2068 struct rtable *rt;
2069 __be16 df = 0;
2070
2071 rt = vxlan_get_route(vxlan, dev, sock4, skb,
2072 rdst ? rdst->remote_ifindex : 0, tos,
2073 dst->sin.sin_addr.s_addr,
2074 &local_ip.sin.sin_addr.s_addr,
2075 dst_port, src_port,
2076 dst_cache, info);
2077 if (IS_ERR(rt)) {
2078 err = PTR_ERR(rt);
2079 goto tx_error;
2080 }
2081
2082 /* Bypass encapsulation if the destination is local */
2083 if (!info) {
2084 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2085 dst_port, vni, &rt->dst,
2086 rt->rt_flags);
2087 if (err)
2088 goto out_unlock;
2089 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2090 df = htons(IP_DF);
2091 }
2092
2093 ndst = &rt->dst;
2094 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2095 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2096 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
2097 vni, md, flags, udp_sum);
2098 if (err < 0)
2099 goto tx_error;
2100
2101 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
2102 dst->sin.sin_addr.s_addr, tos, ttl, df,
2103 src_port, dst_port, xnet, !udp_sum);
2104#if IS_ENABLED(CONFIG_IPV6)
2105 } else {
2106 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2107
2108 ndst = vxlan6_get_route(vxlan, dev, sock6, skb,
2109 rdst ? rdst->remote_ifindex : 0, tos,
2110 label, &dst->sin6.sin6_addr,
2111 &local_ip.sin6.sin6_addr,
2112 dst_port, src_port,
2113 dst_cache, info);
2114 if (IS_ERR(ndst)) {
2115 err = PTR_ERR(ndst);
2116 ndst = NULL;
2117 goto tx_error;
2118 }
2119
2120 if (!info) {
2121 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2122
2123 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2124 dst_port, vni, ndst,
2125 rt6i_flags);
2126 if (err)
2127 goto out_unlock;
2128 }
2129
2130 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2131 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2132 skb_scrub_packet(skb, xnet);
2133 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2134 vni, md, flags, udp_sum);
2135 if (err < 0)
2136 goto tx_error;
2137
2138 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
2139 &local_ip.sin6.sin6_addr,
2140 &dst->sin6.sin6_addr, tos, ttl,
2141 label, src_port, dst_port, !udp_sum);
2142#endif
2143 }
2144out_unlock:
2145 rcu_read_unlock();
2146 return;
2147
2148drop:
2149 dev->stats.tx_dropped++;
2150 dev_kfree_skb(skb);
2151 return;
2152
2153tx_error:
2154 rcu_read_unlock();
2155 if (err == -ELOOP)
2156 dev->stats.collisions++;
2157 else if (err == -ENETUNREACH)
2158 dev->stats.tx_carrier_errors++;
2159 dst_release(ndst);
2160 dev->stats.tx_errors++;
2161 kfree_skb(skb);
2162}
2163
2164/* Transmit local packets over Vxlan
2165 *
2166 * Outer IP header inherits ECN and DF from inner header.
2167 * Outer UDP destination is the VXLAN assigned port.
2168 * source port is based on hash of flow
2169 */
2170static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2171{
2172 struct vxlan_dev *vxlan = netdev_priv(dev);
2173 const struct ip_tunnel_info *info;
2174 struct ethhdr *eth;
2175 bool did_rsc = false;
2176 struct vxlan_rdst *rdst, *fdst = NULL;
2177 struct vxlan_fdb *f;
2178
2179 info = skb_tunnel_info(skb);
2180
2181 skb_reset_mac_header(skb);
2182
2183 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2184 if (info && info->mode & IP_TUNNEL_INFO_TX)
2185 vxlan_xmit_one(skb, dev, NULL, false);
2186 else
2187 kfree_skb(skb);
2188 return NETDEV_TX_OK;
2189 }
2190
2191 if (vxlan->flags & VXLAN_F_PROXY) {
2192 eth = eth_hdr(skb);
2193 if (ntohs(eth->h_proto) == ETH_P_ARP)
2194 return arp_reduce(dev, skb);
2195#if IS_ENABLED(CONFIG_IPV6)
2196 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2197 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2198 + sizeof(struct nd_msg)) &&
2199 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2200 struct nd_msg *msg;
2201
2202 msg = (struct nd_msg *)skb_transport_header(skb);
2203 if (msg->icmph.icmp6_code == 0 &&
2204 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2205 return neigh_reduce(dev, skb);
2206 }
2207#endif
2208 }
2209
2210 eth = eth_hdr(skb);
2211 f = vxlan_find_mac(vxlan, eth->h_dest);
2212 did_rsc = false;
2213
2214 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2215 (ntohs(eth->h_proto) == ETH_P_IP ||
2216 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2217 did_rsc = route_shortcircuit(dev, skb);
2218 if (did_rsc)
2219 f = vxlan_find_mac(vxlan, eth->h_dest);
2220 }
2221
2222 if (f == NULL) {
2223 f = vxlan_find_mac(vxlan, all_zeros_mac);
2224 if (f == NULL) {
2225 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2226 !is_multicast_ether_addr(eth->h_dest))
2227 vxlan_fdb_miss(vxlan, eth->h_dest);
2228
2229 dev->stats.tx_dropped++;
2230 kfree_skb(skb);
2231 return NETDEV_TX_OK;
2232 }
2233 }
2234
2235 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2236 struct sk_buff *skb1;
2237
2238 if (!fdst) {
2239 fdst = rdst;
2240 continue;
2241 }
2242 skb1 = skb_clone(skb, GFP_ATOMIC);
2243 if (skb1)
2244 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2245 }
2246
2247 if (fdst)
2248 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2249 else
2250 kfree_skb(skb);
2251 return NETDEV_TX_OK;
2252}
2253
2254/* Walk the forwarding table and purge stale entries */
2255static void vxlan_cleanup(unsigned long arg)
2256{
2257 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2258 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2259 unsigned int h;
2260
2261 if (!netif_running(vxlan->dev))
2262 return;
2263
2264 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2265 struct hlist_node *p, *n;
2266
2267 spin_lock_bh(&vxlan->hash_lock);
2268 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2269 struct vxlan_fdb *f
2270 = container_of(p, struct vxlan_fdb, hlist);
2271 unsigned long timeout;
2272
2273 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2274 continue;
2275
2276 timeout = f->used + vxlan->cfg.age_interval * HZ;
2277 if (time_before_eq(timeout, jiffies)) {
2278 netdev_dbg(vxlan->dev,
2279 "garbage collect %pM\n",
2280 f->eth_addr);
2281 f->state = NUD_STALE;
2282 vxlan_fdb_destroy(vxlan, f);
2283 } else if (time_before(timeout, next_timer))
2284 next_timer = timeout;
2285 }
2286 spin_unlock_bh(&vxlan->hash_lock);
2287 }
2288
2289 mod_timer(&vxlan->age_timer, next_timer);
2290}
2291
2292static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2293{
2294 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2295 __be32 vni = vxlan->default_dst.remote_vni;
2296
2297 spin_lock(&vn->sock_lock);
2298 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2299 spin_unlock(&vn->sock_lock);
2300}
2301
2302/* Setup stats when device is created */
2303static int vxlan_init(struct net_device *dev)
2304{
2305 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2306 if (!dev->tstats)
2307 return -ENOMEM;
2308
2309 return 0;
2310}
2311
2312static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2313{
2314 struct vxlan_fdb *f;
2315
2316 spin_lock_bh(&vxlan->hash_lock);
2317 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2318 if (f)
2319 vxlan_fdb_destroy(vxlan, f);
2320 spin_unlock_bh(&vxlan->hash_lock);
2321}
2322
2323static void vxlan_uninit(struct net_device *dev)
2324{
2325 struct vxlan_dev *vxlan = netdev_priv(dev);
2326
2327 vxlan_fdb_delete_default(vxlan);
2328
2329 free_percpu(dev->tstats);
2330}
2331
2332/* Start ageing timer and join group when device is brought up */
2333static int vxlan_open(struct net_device *dev)
2334{
2335 struct vxlan_dev *vxlan = netdev_priv(dev);
2336 int ret;
2337
2338 ret = vxlan_sock_add(vxlan);
2339 if (ret < 0)
2340 return ret;
2341
2342 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2343 ret = vxlan_igmp_join(vxlan);
2344 if (ret == -EADDRINUSE)
2345 ret = 0;
2346 if (ret) {
2347 vxlan_sock_release(vxlan);
2348 return ret;
2349 }
2350 }
2351
2352 if (vxlan->cfg.age_interval)
2353 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2354
2355 return ret;
2356}
2357
2358/* Purge the forwarding table */
2359static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2360{
2361 unsigned int h;
2362
2363 spin_lock_bh(&vxlan->hash_lock);
2364 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2365 struct hlist_node *p, *n;
2366 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2367 struct vxlan_fdb *f
2368 = container_of(p, struct vxlan_fdb, hlist);
2369 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2370 continue;
2371 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2372 if (!is_zero_ether_addr(f->eth_addr))
2373 vxlan_fdb_destroy(vxlan, f);
2374 }
2375 }
2376 spin_unlock_bh(&vxlan->hash_lock);
2377}
2378
2379/* Cleanup timer and forwarding table on shutdown */
2380static int vxlan_stop(struct net_device *dev)
2381{
2382 struct vxlan_dev *vxlan = netdev_priv(dev);
2383 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2384 int ret = 0;
2385
2386 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2387 !vxlan_group_used(vn, vxlan))
2388 ret = vxlan_igmp_leave(vxlan);
2389
2390 del_timer_sync(&vxlan->age_timer);
2391
2392 vxlan_flush(vxlan, false);
2393 vxlan_sock_release(vxlan);
2394
2395 return ret;
2396}
2397
2398/* Stub, nothing needs to be done. */
2399static void vxlan_set_multicast_list(struct net_device *dev)
2400{
2401}
2402
2403static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2404{
2405 struct vxlan_dev *vxlan = netdev_priv(dev);
2406 struct vxlan_rdst *dst = &vxlan->default_dst;
2407 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2408 dst->remote_ifindex);
2409 bool use_ipv6 = false;
2410
2411 if (dst->remote_ip.sa.sa_family == AF_INET6)
2412 use_ipv6 = true;
2413
2414 /* This check is different than dev->max_mtu, because it looks at
2415 * the lowerdev->mtu, rather than the static dev->max_mtu
2416 */
2417 if (lowerdev) {
2418 int max_mtu = lowerdev->mtu -
2419 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2420 if (new_mtu > max_mtu)
2421 return -EINVAL;
2422 }
2423
2424 dev->mtu = new_mtu;
2425 return 0;
2426}
2427
2428static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2429{
2430 struct vxlan_dev *vxlan = netdev_priv(dev);
2431 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2432 __be16 sport, dport;
2433
2434 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2435 vxlan->cfg.port_max, true);
2436 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2437
2438 if (ip_tunnel_info_af(info) == AF_INET) {
2439 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2440 struct rtable *rt;
2441
2442 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2443 info->key.u.ipv4.dst,
2444 &info->key.u.ipv4.src, dport, sport,
2445 &info->dst_cache, info);
2446 if (IS_ERR(rt))
2447 return PTR_ERR(rt);
2448 ip_rt_put(rt);
2449 } else {
2450#if IS_ENABLED(CONFIG_IPV6)
2451 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2452 struct dst_entry *ndst;
2453
2454 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2455 info->key.label, &info->key.u.ipv6.dst,
2456 &info->key.u.ipv6.src, dport, sport,
2457 &info->dst_cache, info);
2458 if (IS_ERR(ndst))
2459 return PTR_ERR(ndst);
2460 dst_release(ndst);
2461#else /* !CONFIG_IPV6 */
2462 return -EPFNOSUPPORT;
2463#endif
2464 }
2465 info->key.tp_src = sport;
2466 info->key.tp_dst = dport;
2467 return 0;
2468}
2469
2470static const struct net_device_ops vxlan_netdev_ether_ops = {
2471 .ndo_init = vxlan_init,
2472 .ndo_uninit = vxlan_uninit,
2473 .ndo_open = vxlan_open,
2474 .ndo_stop = vxlan_stop,
2475 .ndo_start_xmit = vxlan_xmit,
2476 .ndo_get_stats64 = ip_tunnel_get_stats64,
2477 .ndo_set_rx_mode = vxlan_set_multicast_list,
2478 .ndo_change_mtu = vxlan_change_mtu,
2479 .ndo_validate_addr = eth_validate_addr,
2480 .ndo_set_mac_address = eth_mac_addr,
2481 .ndo_fdb_add = vxlan_fdb_add,
2482 .ndo_fdb_del = vxlan_fdb_delete,
2483 .ndo_fdb_dump = vxlan_fdb_dump,
2484 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2485};
2486
2487static const struct net_device_ops vxlan_netdev_raw_ops = {
2488 .ndo_init = vxlan_init,
2489 .ndo_uninit = vxlan_uninit,
2490 .ndo_open = vxlan_open,
2491 .ndo_stop = vxlan_stop,
2492 .ndo_start_xmit = vxlan_xmit,
2493 .ndo_get_stats64 = ip_tunnel_get_stats64,
2494 .ndo_change_mtu = vxlan_change_mtu,
2495 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2496};
2497
2498/* Info for udev, that this is a virtual tunnel endpoint */
2499static struct device_type vxlan_type = {
2500 .name = "vxlan",
2501};
2502
2503/* Calls the ndo_udp_tunnel_add of the caller in order to
2504 * supply the listening VXLAN udp ports. Callers are expected
2505 * to implement the ndo_udp_tunnel_add.
2506 */
2507static void vxlan_push_rx_ports(struct net_device *dev)
2508{
2509 struct vxlan_sock *vs;
2510 struct net *net = dev_net(dev);
2511 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2512 unsigned int i;
2513
2514 spin_lock(&vn->sock_lock);
2515 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2516 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist)
2517 udp_tunnel_push_rx_port(dev, vs->sock,
2518 (vs->flags & VXLAN_F_GPE) ?
2519 UDP_TUNNEL_TYPE_VXLAN_GPE :
2520 UDP_TUNNEL_TYPE_VXLAN);
2521 }
2522 spin_unlock(&vn->sock_lock);
2523}
2524
2525/* Initialize the device structure. */
2526static void vxlan_setup(struct net_device *dev)
2527{
2528 struct vxlan_dev *vxlan = netdev_priv(dev);
2529 unsigned int h;
2530
2531 eth_hw_addr_random(dev);
2532 ether_setup(dev);
2533
2534 dev->destructor = free_netdev;
2535 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2536
2537 dev->features |= NETIF_F_LLTX;
2538 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2539 dev->features |= NETIF_F_RXCSUM;
2540 dev->features |= NETIF_F_GSO_SOFTWARE;
2541
2542 dev->vlan_features = dev->features;
2543 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2544 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2545 netif_keep_dst(dev);
2546 dev->priv_flags |= IFF_NO_QUEUE;
2547
2548 INIT_LIST_HEAD(&vxlan->next);
2549 spin_lock_init(&vxlan->hash_lock);
2550
2551 init_timer_deferrable(&vxlan->age_timer);
2552 vxlan->age_timer.function = vxlan_cleanup;
2553 vxlan->age_timer.data = (unsigned long) vxlan;
2554
2555 vxlan->cfg.dst_port = htons(vxlan_port);
2556
2557 vxlan->dev = dev;
2558
2559 gro_cells_init(&vxlan->gro_cells, dev);
2560
2561 for (h = 0; h < FDB_HASH_SIZE; ++h)
2562 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2563}
2564
2565static void vxlan_ether_setup(struct net_device *dev)
2566{
2567 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2568 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2569 dev->netdev_ops = &vxlan_netdev_ether_ops;
2570}
2571
2572static void vxlan_raw_setup(struct net_device *dev)
2573{
2574 dev->header_ops = NULL;
2575 dev->type = ARPHRD_NONE;
2576 dev->hard_header_len = 0;
2577 dev->addr_len = 0;
2578 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2579 dev->netdev_ops = &vxlan_netdev_raw_ops;
2580}
2581
2582static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2583 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2584 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2585 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2586 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2587 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2588 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2589 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2590 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2591 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2592 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2593 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2594 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2595 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2596 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2597 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2598 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2599 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2600 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2601 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2602 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2603 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2604 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2605 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2606 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2607 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2608 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2609 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2610};
2611
2612static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2613{
2614 if (tb[IFLA_ADDRESS]) {
2615 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2616 pr_debug("invalid link address (not ethernet)\n");
2617 return -EINVAL;
2618 }
2619
2620 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2621 pr_debug("invalid all zero ethernet address\n");
2622 return -EADDRNOTAVAIL;
2623 }
2624 }
2625
2626 if (!data)
2627 return -EINVAL;
2628
2629 if (data[IFLA_VXLAN_ID]) {
2630 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2631 if (id >= VXLAN_N_VID)
2632 return -ERANGE;
2633 }
2634
2635 if (data[IFLA_VXLAN_PORT_RANGE]) {
2636 const struct ifla_vxlan_port_range *p
2637 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2638
2639 if (ntohs(p->high) < ntohs(p->low)) {
2640 pr_debug("port range %u .. %u not valid\n",
2641 ntohs(p->low), ntohs(p->high));
2642 return -EINVAL;
2643 }
2644 }
2645
2646 return 0;
2647}
2648
2649static void vxlan_get_drvinfo(struct net_device *netdev,
2650 struct ethtool_drvinfo *drvinfo)
2651{
2652 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2653 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2654}
2655
2656static const struct ethtool_ops vxlan_ethtool_ops = {
2657 .get_drvinfo = vxlan_get_drvinfo,
2658 .get_link = ethtool_op_get_link,
2659};
2660
2661static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2662 __be16 port, u32 flags)
2663{
2664 struct socket *sock;
2665 struct udp_port_cfg udp_conf;
2666 int err;
2667
2668 memset(&udp_conf, 0, sizeof(udp_conf));
2669
2670 if (ipv6) {
2671 udp_conf.family = AF_INET6;
2672 udp_conf.use_udp6_rx_checksums =
2673 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2674 udp_conf.ipv6_v6only = 1;
2675 } else {
2676 udp_conf.family = AF_INET;
2677 }
2678
2679 udp_conf.local_udp_port = port;
2680
2681 /* Open UDP socket */
2682 err = udp_sock_create(net, &udp_conf, &sock);
2683 if (err < 0)
2684 return ERR_PTR(err);
2685
2686 return sock;
2687}
2688
2689/* Create new listen socket if needed */
2690static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2691 __be16 port, u32 flags)
2692{
2693 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2694 struct vxlan_sock *vs;
2695 struct socket *sock;
2696 unsigned int h;
2697 struct udp_tunnel_sock_cfg tunnel_cfg;
2698
2699 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2700 if (!vs)
2701 return ERR_PTR(-ENOMEM);
2702
2703 for (h = 0; h < VNI_HASH_SIZE; ++h)
2704 INIT_HLIST_HEAD(&vs->vni_list[h]);
2705
2706 sock = vxlan_create_sock(net, ipv6, port, flags);
2707 if (IS_ERR(sock)) {
2708 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
2709 PTR_ERR(sock));
2710 kfree(vs);
2711 return ERR_CAST(sock);
2712 }
2713
2714 vs->sock = sock;
2715 atomic_set(&vs->refcnt, 1);
2716 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2717
2718 spin_lock(&vn->sock_lock);
2719 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2720 udp_tunnel_notify_add_rx_port(sock,
2721 (vs->flags & VXLAN_F_GPE) ?
2722 UDP_TUNNEL_TYPE_VXLAN_GPE :
2723 UDP_TUNNEL_TYPE_VXLAN);
2724 spin_unlock(&vn->sock_lock);
2725
2726 /* Mark socket as an encapsulation socket. */
2727 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2728 tunnel_cfg.sk_user_data = vs;
2729 tunnel_cfg.encap_type = 1;
2730 tunnel_cfg.encap_rcv = vxlan_rcv;
2731 tunnel_cfg.encap_destroy = NULL;
2732 tunnel_cfg.gro_receive = vxlan_gro_receive;
2733 tunnel_cfg.gro_complete = vxlan_gro_complete;
2734
2735 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2736
2737 return vs;
2738}
2739
2740static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2741{
2742 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2743 struct vxlan_sock *vs = NULL;
2744
2745 if (!vxlan->cfg.no_share) {
2746 spin_lock(&vn->sock_lock);
2747 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2748 vxlan->cfg.dst_port, vxlan->flags);
2749 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
2750 spin_unlock(&vn->sock_lock);
2751 return -EBUSY;
2752 }
2753 spin_unlock(&vn->sock_lock);
2754 }
2755 if (!vs)
2756 vs = vxlan_socket_create(vxlan->net, ipv6,
2757 vxlan->cfg.dst_port, vxlan->flags);
2758 if (IS_ERR(vs))
2759 return PTR_ERR(vs);
2760#if IS_ENABLED(CONFIG_IPV6)
2761 if (ipv6)
2762 rcu_assign_pointer(vxlan->vn6_sock, vs);
2763 else
2764#endif
2765 rcu_assign_pointer(vxlan->vn4_sock, vs);
2766 vxlan_vs_add_dev(vs, vxlan);
2767 return 0;
2768}
2769
2770static int vxlan_sock_add(struct vxlan_dev *vxlan)
2771{
2772 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
2773 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
2774 int ret = 0;
2775
2776 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
2777#if IS_ENABLED(CONFIG_IPV6)
2778 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
2779 if (ipv6 || metadata)
2780 ret = __vxlan_sock_add(vxlan, true);
2781#endif
2782 if (!ret && (!ipv6 || metadata))
2783 ret = __vxlan_sock_add(vxlan, false);
2784 if (ret < 0)
2785 vxlan_sock_release(vxlan);
2786 return ret;
2787}
2788
2789static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2790 struct vxlan_config *conf)
2791{
2792 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2793 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
2794 struct vxlan_rdst *dst = &vxlan->default_dst;
2795 unsigned short needed_headroom = ETH_HLEN;
2796 int err;
2797 bool use_ipv6 = false;
2798 __be16 default_port = vxlan->cfg.dst_port;
2799 struct net_device *lowerdev = NULL;
2800
2801 if (conf->flags & VXLAN_F_GPE) {
2802 /* For now, allow GPE only together with COLLECT_METADATA.
2803 * This can be relaxed later; in such case, the other side
2804 * of the PtP link will have to be provided.
2805 */
2806 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
2807 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
2808 pr_info("unsupported combination of extensions\n");
2809 return -EINVAL;
2810 }
2811
2812 vxlan_raw_setup(dev);
2813 } else {
2814 vxlan_ether_setup(dev);
2815 }
2816
2817 /* MTU range: 68 - 65535 */
2818 dev->min_mtu = ETH_MIN_MTU;
2819 dev->max_mtu = ETH_MAX_MTU;
2820
2821 vxlan->net = src_net;
2822
2823 dst->remote_vni = conf->vni;
2824
2825 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
2826
2827 /* Unless IPv6 is explicitly requested, assume IPv4 */
2828 if (!dst->remote_ip.sa.sa_family)
2829 dst->remote_ip.sa.sa_family = AF_INET;
2830
2831 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2832 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2833 if (!IS_ENABLED(CONFIG_IPV6))
2834 return -EPFNOSUPPORT;
2835 use_ipv6 = true;
2836 vxlan->flags |= VXLAN_F_IPV6;
2837 }
2838
2839 if (conf->label && !use_ipv6) {
2840 pr_info("label only supported in use with IPv6\n");
2841 return -EINVAL;
2842 }
2843
2844 if (conf->remote_ifindex) {
2845 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2846 dst->remote_ifindex = conf->remote_ifindex;
2847
2848 if (!lowerdev) {
2849 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2850 return -ENODEV;
2851 }
2852
2853#if IS_ENABLED(CONFIG_IPV6)
2854 if (use_ipv6) {
2855 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2856 if (idev && idev->cnf.disable_ipv6) {
2857 pr_info("IPv6 is disabled via sysctl\n");
2858 return -EPERM;
2859 }
2860 }
2861#endif
2862
2863 if (!conf->mtu)
2864 dev->mtu = lowerdev->mtu -
2865 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2866
2867 needed_headroom = lowerdev->hard_header_len;
2868 } else if (vxlan_addr_multicast(&dst->remote_ip)) {
2869 pr_info("multicast destination requires interface to be specified\n");
2870 return -EINVAL;
2871 }
2872
2873 if (conf->mtu) {
2874 int max_mtu = ETH_MAX_MTU;
2875
2876 if (lowerdev)
2877 max_mtu = lowerdev->mtu;
2878
2879 max_mtu -= (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2880
2881 if (conf->mtu < dev->min_mtu || conf->mtu > dev->max_mtu)
2882 return -EINVAL;
2883
2884 dev->mtu = conf->mtu;
2885
2886 if (conf->mtu > max_mtu)
2887 dev->mtu = max_mtu;
2888 }
2889
2890 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2891 needed_headroom += VXLAN6_HEADROOM;
2892 else
2893 needed_headroom += VXLAN_HEADROOM;
2894 dev->needed_headroom = needed_headroom;
2895
2896 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2897 if (!vxlan->cfg.dst_port) {
2898 if (conf->flags & VXLAN_F_GPE)
2899 vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
2900 else
2901 vxlan->cfg.dst_port = default_port;
2902 }
2903 vxlan->flags |= conf->flags;
2904
2905 if (!vxlan->cfg.age_interval)
2906 vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
2907
2908 list_for_each_entry(tmp, &vn->vxlan_list, next) {
2909 if (tmp->cfg.vni == conf->vni &&
2910 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
2911 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2912 tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2913 (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2914 (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
2915 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
2916 return -EEXIST;
2917 }
2918 }
2919
2920 dev->ethtool_ops = &vxlan_ethtool_ops;
2921
2922 /* create an fdb entry for a valid default destination */
2923 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2924 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2925 &vxlan->default_dst.remote_ip,
2926 NUD_REACHABLE|NUD_PERMANENT,
2927 NLM_F_EXCL|NLM_F_CREATE,
2928 vxlan->cfg.dst_port,
2929 vxlan->default_dst.remote_vni,
2930 vxlan->default_dst.remote_ifindex,
2931 NTF_SELF);
2932 if (err)
2933 return err;
2934 }
2935
2936 err = register_netdevice(dev);
2937 if (err) {
2938 vxlan_fdb_delete_default(vxlan);
2939 return err;
2940 }
2941
2942 list_add(&vxlan->next, &vn->vxlan_list);
2943
2944 return 0;
2945}
2946
2947static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2948 struct nlattr *tb[], struct nlattr *data[])
2949{
2950 struct vxlan_config conf;
2951
2952 memset(&conf, 0, sizeof(conf));
2953
2954 if (data[IFLA_VXLAN_ID])
2955 conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
2956
2957 if (data[IFLA_VXLAN_GROUP]) {
2958 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
2959 } else if (data[IFLA_VXLAN_GROUP6]) {
2960 if (!IS_ENABLED(CONFIG_IPV6))
2961 return -EPFNOSUPPORT;
2962
2963 conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
2964 conf.remote_ip.sa.sa_family = AF_INET6;
2965 }
2966
2967 if (data[IFLA_VXLAN_LOCAL]) {
2968 conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
2969 conf.saddr.sa.sa_family = AF_INET;
2970 } else if (data[IFLA_VXLAN_LOCAL6]) {
2971 if (!IS_ENABLED(CONFIG_IPV6))
2972 return -EPFNOSUPPORT;
2973
2974 /* TODO: respect scope id */
2975 conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
2976 conf.saddr.sa.sa_family = AF_INET6;
2977 }
2978
2979 if (data[IFLA_VXLAN_LINK])
2980 conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
2981
2982 if (data[IFLA_VXLAN_TOS])
2983 conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2984
2985 if (data[IFLA_VXLAN_TTL])
2986 conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2987
2988 if (data[IFLA_VXLAN_LABEL])
2989 conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
2990 IPV6_FLOWLABEL_MASK;
2991
2992 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2993 conf.flags |= VXLAN_F_LEARN;
2994
2995 if (data[IFLA_VXLAN_AGEING])
2996 conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2997
2998 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2999 conf.flags |= VXLAN_F_PROXY;
3000
3001 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
3002 conf.flags |= VXLAN_F_RSC;
3003
3004 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
3005 conf.flags |= VXLAN_F_L2MISS;
3006
3007 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
3008 conf.flags |= VXLAN_F_L3MISS;
3009
3010 if (data[IFLA_VXLAN_LIMIT])
3011 conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3012
3013 if (data[IFLA_VXLAN_COLLECT_METADATA] &&
3014 nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
3015 conf.flags |= VXLAN_F_COLLECT_METADATA;
3016
3017 if (data[IFLA_VXLAN_PORT_RANGE]) {
3018 const struct ifla_vxlan_port_range *p
3019 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3020 conf.port_min = ntohs(p->low);
3021 conf.port_max = ntohs(p->high);
3022 }
3023
3024 if (data[IFLA_VXLAN_PORT])
3025 conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3026
3027 if (data[IFLA_VXLAN_UDP_CSUM] &&
3028 !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3029 conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3030
3031 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
3032 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
3033 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
3034
3035 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
3036 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
3037 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
3038
3039 if (data[IFLA_VXLAN_REMCSUM_TX] &&
3040 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
3041 conf.flags |= VXLAN_F_REMCSUM_TX;
3042
3043 if (data[IFLA_VXLAN_REMCSUM_RX] &&
3044 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
3045 conf.flags |= VXLAN_F_REMCSUM_RX;
3046
3047 if (data[IFLA_VXLAN_GBP])
3048 conf.flags |= VXLAN_F_GBP;
3049
3050 if (data[IFLA_VXLAN_GPE])
3051 conf.flags |= VXLAN_F_GPE;
3052
3053 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
3054 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3055
3056 if (tb[IFLA_MTU])
3057 conf.mtu = nla_get_u32(tb[IFLA_MTU]);
3058
3059 return vxlan_dev_configure(src_net, dev, &conf);
3060}
3061
3062static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3063{
3064 struct vxlan_dev *vxlan = netdev_priv(dev);
3065 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3066
3067 vxlan_flush(vxlan, true);
3068
3069 spin_lock(&vn->sock_lock);
3070 if (!hlist_unhashed(&vxlan->hlist))
3071 hlist_del_rcu(&vxlan->hlist);
3072 spin_unlock(&vn->sock_lock);
3073
3074 gro_cells_destroy(&vxlan->gro_cells);
3075 list_del(&vxlan->next);
3076 unregister_netdevice_queue(dev, head);
3077}
3078
3079static size_t vxlan_get_size(const struct net_device *dev)
3080{
3081
3082 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3083 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3084 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3085 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3086 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3087 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3088 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3089 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3090 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3091 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3092 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3093 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3094 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3095 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3096 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3097 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3098 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3099 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3100 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3101 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3102 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3103 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3104 0;
3105}
3106
3107static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3108{
3109 const struct vxlan_dev *vxlan = netdev_priv(dev);
3110 const struct vxlan_rdst *dst = &vxlan->default_dst;
3111 struct ifla_vxlan_port_range ports = {
3112 .low = htons(vxlan->cfg.port_min),
3113 .high = htons(vxlan->cfg.port_max),
3114 };
3115
3116 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3117 goto nla_put_failure;
3118
3119 if (!vxlan_addr_any(&dst->remote_ip)) {
3120 if (dst->remote_ip.sa.sa_family == AF_INET) {
3121 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3122 dst->remote_ip.sin.sin_addr.s_addr))
3123 goto nla_put_failure;
3124#if IS_ENABLED(CONFIG_IPV6)
3125 } else {
3126 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3127 &dst->remote_ip.sin6.sin6_addr))
3128 goto nla_put_failure;
3129#endif
3130 }
3131 }
3132
3133 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3134 goto nla_put_failure;
3135
3136 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3137 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3138 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3139 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3140 goto nla_put_failure;
3141#if IS_ENABLED(CONFIG_IPV6)
3142 } else {
3143 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3144 &vxlan->cfg.saddr.sin6.sin6_addr))
3145 goto nla_put_failure;
3146#endif
3147 }
3148 }
3149
3150 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3151 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3152 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3153 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3154 !!(vxlan->flags & VXLAN_F_LEARN)) ||
3155 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3156 !!(vxlan->flags & VXLAN_F_PROXY)) ||
3157 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
3158 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3159 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
3160 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3161 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
3162 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3163 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
3164 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3165 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3166 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3167 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3168 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3169 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3170 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3171 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3172 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3173 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3174 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
3175 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3176 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
3177 goto nla_put_failure;
3178
3179 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3180 goto nla_put_failure;
3181
3182 if (vxlan->flags & VXLAN_F_GBP &&
3183 nla_put_flag(skb, IFLA_VXLAN_GBP))
3184 goto nla_put_failure;
3185
3186 if (vxlan->flags & VXLAN_F_GPE &&
3187 nla_put_flag(skb, IFLA_VXLAN_GPE))
3188 goto nla_put_failure;
3189
3190 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3191 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3192 goto nla_put_failure;
3193
3194 return 0;
3195
3196nla_put_failure:
3197 return -EMSGSIZE;
3198}
3199
3200static struct net *vxlan_get_link_net(const struct net_device *dev)
3201{
3202 struct vxlan_dev *vxlan = netdev_priv(dev);
3203
3204 return vxlan->net;
3205}
3206
3207static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3208 .kind = "vxlan",
3209 .maxtype = IFLA_VXLAN_MAX,
3210 .policy = vxlan_policy,
3211 .priv_size = sizeof(struct vxlan_dev),
3212 .setup = vxlan_setup,
3213 .validate = vxlan_validate,
3214 .newlink = vxlan_newlink,
3215 .dellink = vxlan_dellink,
3216 .get_size = vxlan_get_size,
3217 .fill_info = vxlan_fill_info,
3218 .get_link_net = vxlan_get_link_net,
3219};
3220
3221struct net_device *vxlan_dev_create(struct net *net, const char *name,
3222 u8 name_assign_type,
3223 struct vxlan_config *conf)
3224{
3225 struct nlattr *tb[IFLA_MAX + 1];
3226 struct net_device *dev;
3227 int err;
3228
3229 memset(&tb, 0, sizeof(tb));
3230
3231 dev = rtnl_create_link(net, name, name_assign_type,
3232 &vxlan_link_ops, tb);
3233 if (IS_ERR(dev))
3234 return dev;
3235
3236 err = vxlan_dev_configure(net, dev, conf);
3237 if (err < 0) {
3238 free_netdev(dev);
3239 return ERR_PTR(err);
3240 }
3241
3242 err = rtnl_configure_link(dev, NULL);
3243 if (err < 0) {
3244 LIST_HEAD(list_kill);
3245
3246 vxlan_dellink(dev, &list_kill);
3247 unregister_netdevice_many(&list_kill);
3248 return ERR_PTR(err);
3249 }
3250
3251 return dev;
3252}
3253EXPORT_SYMBOL_GPL(vxlan_dev_create);
3254
3255static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3256 struct net_device *dev)
3257{
3258 struct vxlan_dev *vxlan, *next;
3259 LIST_HEAD(list_kill);
3260
3261 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3262 struct vxlan_rdst *dst = &vxlan->default_dst;
3263
3264 /* In case we created vxlan device with carrier
3265 * and we loose the carrier due to module unload
3266 * we also need to remove vxlan device. In other
3267 * cases, it's not necessary and remote_ifindex
3268 * is 0 here, so no matches.
3269 */
3270 if (dst->remote_ifindex == dev->ifindex)
3271 vxlan_dellink(vxlan->dev, &list_kill);
3272 }
3273
3274 unregister_netdevice_many(&list_kill);
3275}
3276
3277static int vxlan_netdevice_event(struct notifier_block *unused,
3278 unsigned long event, void *ptr)
3279{
3280 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3281 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3282
3283 if (event == NETDEV_UNREGISTER)
3284 vxlan_handle_lowerdev_unregister(vn, dev);
3285 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
3286 vxlan_push_rx_ports(dev);
3287
3288 return NOTIFY_DONE;
3289}
3290
3291static struct notifier_block vxlan_notifier_block __read_mostly = {
3292 .notifier_call = vxlan_netdevice_event,
3293};
3294
3295static __net_init int vxlan_init_net(struct net *net)
3296{
3297 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3298 unsigned int h;
3299
3300 INIT_LIST_HEAD(&vn->vxlan_list);
3301 spin_lock_init(&vn->sock_lock);
3302
3303 for (h = 0; h < PORT_HASH_SIZE; ++h)
3304 INIT_HLIST_HEAD(&vn->sock_list[h]);
3305
3306 return 0;
3307}
3308
3309static void __net_exit vxlan_exit_net(struct net *net)
3310{
3311 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3312 struct vxlan_dev *vxlan, *next;
3313 struct net_device *dev, *aux;
3314 LIST_HEAD(list);
3315
3316 rtnl_lock();
3317 for_each_netdev_safe(net, dev, aux)
3318 if (dev->rtnl_link_ops == &vxlan_link_ops)
3319 unregister_netdevice_queue(dev, &list);
3320
3321 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3322 /* If vxlan->dev is in the same netns, it has already been added
3323 * to the list by the previous loop.
3324 */
3325 if (!net_eq(dev_net(vxlan->dev), net)) {
3326 gro_cells_destroy(&vxlan->gro_cells);
3327 unregister_netdevice_queue(vxlan->dev, &list);
3328 }
3329 }
3330
3331 unregister_netdevice_many(&list);
3332 rtnl_unlock();
3333}
3334
3335static struct pernet_operations vxlan_net_ops = {
3336 .init = vxlan_init_net,
3337 .exit = vxlan_exit_net,
3338 .id = &vxlan_net_id,
3339 .size = sizeof(struct vxlan_net),
3340};
3341
3342static int __init vxlan_init_module(void)
3343{
3344 int rc;
3345
3346 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3347
3348 rc = register_pernet_subsys(&vxlan_net_ops);
3349 if (rc)
3350 goto out1;
3351
3352 rc = register_netdevice_notifier(&vxlan_notifier_block);
3353 if (rc)
3354 goto out2;
3355
3356 rc = rtnl_link_register(&vxlan_link_ops);
3357 if (rc)
3358 goto out3;
3359
3360 return 0;
3361out3:
3362 unregister_netdevice_notifier(&vxlan_notifier_block);
3363out2:
3364 unregister_pernet_subsys(&vxlan_net_ops);
3365out1:
3366 return rc;
3367}
3368late_initcall(vxlan_init_module);
3369
3370static void __exit vxlan_cleanup_module(void)
3371{
3372 rtnl_link_unregister(&vxlan_link_ops);
3373 unregister_netdevice_notifier(&vxlan_notifier_block);
3374 unregister_pernet_subsys(&vxlan_net_ops);
3375 /* rcu_barrier() is called by netns */
3376}
3377module_exit(vxlan_cleanup_module);
3378
3379MODULE_LICENSE("GPL");
3380MODULE_VERSION(VXLAN_VERSION);
3381MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3382MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3383MODULE_ALIAS_RTNL_LINK("vxlan");