Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Bareudp: UDP  tunnel encasulation for different Payload types like
  3 * MPLS, NSH, IP, etc.
  4 * Copyright (c) 2019 Nokia, Inc.
  5 * Authors:  Martin Varghese, <martin.varghese@nokia.com>
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/etherdevice.h>
 13#include <linux/hash.h>
 14#include <net/dst_metadata.h>
 15#include <net/gro_cells.h>
 16#include <net/rtnetlink.h>
 17#include <net/protocol.h>
 18#include <net/ip6_tunnel.h>
 19#include <net/ip_tunnels.h>
 20#include <net/udp_tunnel.h>
 21#include <net/bareudp.h>
 22
 23#define BAREUDP_BASE_HLEN sizeof(struct udphdr)
 24#define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
 25			   sizeof(struct udphdr))
 26#define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
 27			   sizeof(struct udphdr))
 28
 29static bool log_ecn_error = true;
 30module_param(log_ecn_error, bool, 0644);
 31MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 32
 33/* per-network namespace private data for this module */
 34
 35static unsigned int bareudp_net_id;
 36
 37struct bareudp_net {
 38	struct list_head        bareudp_list;
 39};
 40
 41struct bareudp_conf {
 42	__be16 ethertype;
 43	__be16 port;
 44	u16 sport_min;
 45	bool multi_proto_mode;
 46};
 47
 48/* Pseudo network device */
 49struct bareudp_dev {
 50	struct net         *net;        /* netns for packet i/o */
 51	struct net_device  *dev;        /* netdev for bareudp tunnel */
 52	__be16		   ethertype;
 53	__be16             port;
 54	u16	           sport_min;
 55	bool               multi_proto_mode;
 56	struct socket      __rcu *sock;
 57	struct list_head   next;        /* bareudp node  on namespace list */
 58	struct gro_cells   gro_cells;
 59};
 60
 61static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 62{
 63	struct metadata_dst *tun_dst = NULL;
 64	IP_TUNNEL_DECLARE_FLAGS(key) = { };
 65	struct bareudp_dev *bareudp;
 66	unsigned short family;
 67	unsigned int len;
 68	__be16 proto;
 69	void *oiph;
 70	int err;
 71	int nh;
 72
 73	bareudp = rcu_dereference_sk_user_data(sk);
 74	if (!bareudp)
 75		goto drop;
 76
 77	if (skb->protocol ==  htons(ETH_P_IP))
 78		family = AF_INET;
 79	else
 80		family = AF_INET6;
 81
 82	if (bareudp->ethertype == htons(ETH_P_IP)) {
 83		__u8 ipversion;
 84
 85		if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
 86				  sizeof(ipversion))) {
 87			dev_core_stats_rx_dropped_inc(bareudp->dev);
 88			goto drop;
 89		}
 90		ipversion >>= 4;
 91
 92		if (ipversion == 4) {
 93			proto = htons(ETH_P_IP);
 94		} else if (ipversion == 6 && bareudp->multi_proto_mode) {
 95			proto = htons(ETH_P_IPV6);
 96		} else {
 97			dev_core_stats_rx_dropped_inc(bareudp->dev);
 98			goto drop;
 99		}
100	} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
101		struct iphdr *tunnel_hdr;
102
103		tunnel_hdr = (struct iphdr *)skb_network_header(skb);
104		if (tunnel_hdr->version == 4) {
105			if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
106				proto = bareudp->ethertype;
107			} else if (bareudp->multi_proto_mode &&
108				   ipv4_is_multicast(tunnel_hdr->daddr)) {
109				proto = htons(ETH_P_MPLS_MC);
110			} else {
111				dev_core_stats_rx_dropped_inc(bareudp->dev);
112				goto drop;
113			}
114		} else {
115			int addr_type;
116			struct ipv6hdr *tunnel_hdr_v6;
117
118			tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
119			addr_type =
120			ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
121			if (!(addr_type & IPV6_ADDR_MULTICAST)) {
122				proto = bareudp->ethertype;
123			} else if (bareudp->multi_proto_mode &&
124				   (addr_type & IPV6_ADDR_MULTICAST)) {
125				proto = htons(ETH_P_MPLS_MC);
126			} else {
127				dev_core_stats_rx_dropped_inc(bareudp->dev);
128				goto drop;
129			}
130		}
131	} else {
132		proto = bareudp->ethertype;
133	}
134
135	if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
136				 proto,
137				 !net_eq(bareudp->net,
138				 dev_net(bareudp->dev)))) {
139		dev_core_stats_rx_dropped_inc(bareudp->dev);
140		goto drop;
141	}
142
143	__set_bit(IP_TUNNEL_KEY_BIT, key);
144
145	tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
146	if (!tun_dst) {
147		dev_core_stats_rx_dropped_inc(bareudp->dev);
148		goto drop;
149	}
150	skb_dst_set(skb, &tun_dst->dst);
151	skb->dev = bareudp->dev;
152	skb_reset_mac_header(skb);
153
154	/* Save offset of outer header relative to skb->head,
155	 * because we are going to reset the network header to the inner header
156	 * and might change skb->head.
157	 */
158	nh = skb_network_header(skb) - skb->head;
159
160	skb_reset_network_header(skb);
161
162	if (!pskb_inet_may_pull(skb)) {
163		DEV_STATS_INC(bareudp->dev, rx_length_errors);
164		DEV_STATS_INC(bareudp->dev, rx_errors);
165		goto drop;
166	}
167
168	/* Get the outer header. */
169	oiph = skb->head + nh;
170
171	if (!ipv6_mod_enabled() || family == AF_INET)
172		err = IP_ECN_decapsulate(oiph, skb);
173	else
174		err = IP6_ECN_decapsulate(oiph, skb);
175
176	if (unlikely(err)) {
177		if (log_ecn_error) {
178			if  (!ipv6_mod_enabled() || family == AF_INET)
179				net_info_ratelimited("non-ECT from %pI4 "
180						     "with TOS=%#x\n",
181						     &((struct iphdr *)oiph)->saddr,
182						     ((struct iphdr *)oiph)->tos);
183			else
184				net_info_ratelimited("non-ECT from %pI6\n",
185						     &((struct ipv6hdr *)oiph)->saddr);
186		}
187		if (err > 1) {
188			DEV_STATS_INC(bareudp->dev, rx_frame_errors);
189			DEV_STATS_INC(bareudp->dev, rx_errors);
190			goto drop;
191		}
192	}
193
194	len = skb->len;
195	err = gro_cells_receive(&bareudp->gro_cells, skb);
196	if (likely(err == NET_RX_SUCCESS))
197		dev_sw_netstats_rx_add(bareudp->dev, len);
198
199	return 0;
200drop:
201	/* Consume bad packet */
202	kfree_skb(skb);
203
204	return 0;
205}
206
207static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
208{
209	return 0;
210}
211
212static int bareudp_init(struct net_device *dev)
213{
214	struct bareudp_dev *bareudp = netdev_priv(dev);
215	int err;
216
217	err = gro_cells_init(&bareudp->gro_cells, dev);
218	if (err)
219		return err;
220
221	return 0;
222}
223
224static void bareudp_uninit(struct net_device *dev)
225{
226	struct bareudp_dev *bareudp = netdev_priv(dev);
227
228	gro_cells_destroy(&bareudp->gro_cells);
229}
230
231static struct socket *bareudp_create_sock(struct net *net, __be16 port)
232{
233	struct udp_port_cfg udp_conf;
234	struct socket *sock;
235	int err;
236
237	memset(&udp_conf, 0, sizeof(udp_conf));
238
239	if (ipv6_mod_enabled())
240		udp_conf.family = AF_INET6;
241	else
242		udp_conf.family = AF_INET;
243
244	udp_conf.local_udp_port = port;
245	/* Open UDP socket */
246	err = udp_sock_create(net, &udp_conf, &sock);
247	if (err < 0)
248		return ERR_PTR(err);
249
250	udp_allow_gso(sock->sk);
251	return sock;
252}
253
254/* Create new listen socket if needed */
255static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
256{
257	struct udp_tunnel_sock_cfg tunnel_cfg;
258	struct socket *sock;
259
260	sock = bareudp_create_sock(bareudp->net, port);
261	if (IS_ERR(sock))
262		return PTR_ERR(sock);
263
264	/* Mark socket as an encapsulation socket */
265	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
266	tunnel_cfg.sk_user_data = bareudp;
267	tunnel_cfg.encap_type = 1;
268	tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
269	tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
270	tunnel_cfg.encap_destroy = NULL;
271	setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
272
273	rcu_assign_pointer(bareudp->sock, sock);
274	return 0;
275}
276
277static int bareudp_open(struct net_device *dev)
278{
279	struct bareudp_dev *bareudp = netdev_priv(dev);
280	int ret = 0;
281
282	ret =  bareudp_socket_create(bareudp, bareudp->port);
283	return ret;
284}
285
286static void bareudp_sock_release(struct bareudp_dev *bareudp)
287{
288	struct socket *sock;
289
290	sock = bareudp->sock;
291	rcu_assign_pointer(bareudp->sock, NULL);
292	synchronize_net();
293	udp_tunnel_sock_release(sock);
294}
295
296static int bareudp_stop(struct net_device *dev)
297{
298	struct bareudp_dev *bareudp = netdev_priv(dev);
299
300	bareudp_sock_release(bareudp);
301	return 0;
302}
303
304static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
305			    struct bareudp_dev *bareudp,
306			    const struct ip_tunnel_info *info)
307{
308	bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
309	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
310	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
311	struct socket *sock = rcu_dereference(bareudp->sock);
 
312	const struct ip_tunnel_key *key = &info->key;
313	struct rtable *rt;
314	__be16 sport, df;
315	int min_headroom;
316	__u8 tos, ttl;
317	__be32 saddr;
318	int err;
319
320	if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
321		return -EINVAL;
322
323	if (!sock)
324		return -ESHUTDOWN;
325
326	sport = udp_flow_src_port(bareudp->net, skb,
327				  bareudp->sport_min, USHRT_MAX,
328				  true);
329	rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key,
330				   sport, bareudp->port, key->tos,
331				   use_cache ?
332				   (struct dst_cache *)&info->dst_cache : NULL);
333
334	if (IS_ERR(rt))
335		return PTR_ERR(rt);
336
337	skb_tunnel_check_pmtu(skb, &rt->dst,
338			      BAREUDP_IPV4_HLEN + info->options_len, false);
339
340	tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
341	ttl = key->ttl;
342	df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
343	     htons(IP_DF) : 0;
344	skb_scrub_packet(skb, xnet);
345
346	err = -ENOSPC;
347	if (!skb_pull(skb, skb_network_offset(skb)))
348		goto free_dst;
349
350	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
351		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
352
353	err = skb_cow_head(skb, min_headroom);
354	if (unlikely(err))
355		goto free_dst;
356
357	err = udp_tunnel_handle_offloads(skb, udp_sum);
358	if (err)
359		goto free_dst;
360
361	skb_set_inner_protocol(skb, bareudp->ethertype);
362	udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
363			    tos, ttl, df, sport, bareudp->port,
364			    !net_eq(bareudp->net, dev_net(bareudp->dev)),
365			    !test_bit(IP_TUNNEL_CSUM_BIT,
366				      info->key.tun_flags));
367	return 0;
368
369free_dst:
370	dst_release(&rt->dst);
371	return err;
372}
373
374static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
375			     struct bareudp_dev *bareudp,
376			     const struct ip_tunnel_info *info)
377{
378	bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
379	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
380	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
381	struct socket *sock  = rcu_dereference(bareudp->sock);
 
382	const struct ip_tunnel_key *key = &info->key;
383	struct dst_entry *dst = NULL;
384	struct in6_addr saddr, daddr;
385	int min_headroom;
386	__u8 prio, ttl;
387	__be16 sport;
388	int err;
389
390	if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
391		return -EINVAL;
392
393	if (!sock)
394		return -ESHUTDOWN;
395
396	sport = udp_flow_src_port(bareudp->net, skb,
397				  bareudp->sport_min, USHRT_MAX,
398				  true);
399	dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr,
400				     key, sport, bareudp->port, key->tos,
401				     use_cache ?
402				     (struct dst_cache *) &info->dst_cache : NULL);
403	if (IS_ERR(dst))
404		return PTR_ERR(dst);
405
406	skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
407			      false);
408
409	prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
410	ttl = key->ttl;
411
412	skb_scrub_packet(skb, xnet);
413
414	err = -ENOSPC;
415	if (!skb_pull(skb, skb_network_offset(skb)))
416		goto free_dst;
417
418	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
419		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
420
421	err = skb_cow_head(skb, min_headroom);
422	if (unlikely(err))
423		goto free_dst;
424
425	err = udp_tunnel_handle_offloads(skb, udp_sum);
426	if (err)
427		goto free_dst;
428
429	daddr = info->key.u.ipv6.dst;
430	udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
431			     &saddr, &daddr, prio, ttl,
432			     info->key.label, sport, bareudp->port,
433			     !test_bit(IP_TUNNEL_CSUM_BIT,
434				       info->key.tun_flags));
435	return 0;
436
437free_dst:
438	dst_release(dst);
439	return err;
440}
441
442static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
443{
444	if (bareudp->ethertype == proto)
445		return true;
446
447	if (!bareudp->multi_proto_mode)
448		return false;
449
450	if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
451	    proto == htons(ETH_P_MPLS_MC))
452		return true;
453
454	if (bareudp->ethertype == htons(ETH_P_IP) &&
455	    proto == htons(ETH_P_IPV6))
456		return true;
457
458	return false;
459}
460
461static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
462{
463	struct bareudp_dev *bareudp = netdev_priv(dev);
464	struct ip_tunnel_info *info = NULL;
465	int err;
466
467	if (!bareudp_proto_valid(bareudp, skb->protocol)) {
468		err = -EINVAL;
469		goto tx_error;
470	}
471
472	info = skb_tunnel_info(skb);
473	if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
474		err = -EINVAL;
475		goto tx_error;
476	}
477
478	rcu_read_lock();
479	if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
480		err = bareudp6_xmit_skb(skb, dev, bareudp, info);
481	else
482		err = bareudp_xmit_skb(skb, dev, bareudp, info);
483
484	rcu_read_unlock();
485
486	if (likely(!err))
487		return NETDEV_TX_OK;
488tx_error:
489	dev_kfree_skb(skb);
490
491	if (err == -ELOOP)
492		DEV_STATS_INC(dev, collisions);
493	else if (err == -ENETUNREACH)
494		DEV_STATS_INC(dev, tx_carrier_errors);
495
496	DEV_STATS_INC(dev, tx_errors);
497	return NETDEV_TX_OK;
498}
499
500static int bareudp_fill_metadata_dst(struct net_device *dev,
501				     struct sk_buff *skb)
502{
503	struct ip_tunnel_info *info = skb_tunnel_info(skb);
504	struct bareudp_dev *bareudp = netdev_priv(dev);
505	bool use_cache;
506	__be16 sport;
507
508	use_cache = ip_tunnel_dst_cache_usable(skb, info);
509	sport = udp_flow_src_port(bareudp->net, skb,
510				  bareudp->sport_min, USHRT_MAX,
511				  true);
512
513	if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
514		struct rtable *rt;
515		__be32 saddr;
516
517		rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr,
518					   &info->key, sport, bareudp->port,
519					   info->key.tos,
520					   use_cache ? &info->dst_cache : NULL);
521		if (IS_ERR(rt))
522			return PTR_ERR(rt);
523
524		ip_rt_put(rt);
525		info->key.u.ipv4.src = saddr;
526	} else if (ip_tunnel_info_af(info) == AF_INET6) {
527		struct dst_entry *dst;
528		struct in6_addr saddr;
529		struct socket *sock = rcu_dereference(bareudp->sock);
530
531		dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock,
532					     0, &saddr, &info->key,
533					     sport, bareudp->port, info->key.tos,
534					     use_cache ? &info->dst_cache : NULL);
535		if (IS_ERR(dst))
536			return PTR_ERR(dst);
537
538		dst_release(dst);
539		info->key.u.ipv6.src = saddr;
540	} else {
541		return -EINVAL;
542	}
543
544	info->key.tp_src = sport;
545	info->key.tp_dst = bareudp->port;
546	return 0;
547}
548
549static const struct net_device_ops bareudp_netdev_ops = {
550	.ndo_init               = bareudp_init,
551	.ndo_uninit             = bareudp_uninit,
552	.ndo_open               = bareudp_open,
553	.ndo_stop               = bareudp_stop,
554	.ndo_start_xmit         = bareudp_xmit,
555	.ndo_fill_metadata_dst  = bareudp_fill_metadata_dst,
556};
557
558static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
559	[IFLA_BAREUDP_PORT]                = { .type = NLA_U16 },
560	[IFLA_BAREUDP_ETHERTYPE]	   = { .type = NLA_U16 },
561	[IFLA_BAREUDP_SRCPORT_MIN]         = { .type = NLA_U16 },
562	[IFLA_BAREUDP_MULTIPROTO_MODE]     = { .type = NLA_FLAG },
563};
564
565/* Info for udev, that this is a virtual tunnel endpoint */
566static const struct device_type bareudp_type = {
567	.name = "bareudp",
568};
569
570/* Initialize the device structure. */
571static void bareudp_setup(struct net_device *dev)
572{
573	dev->netdev_ops = &bareudp_netdev_ops;
574	dev->needs_free_netdev = true;
575	SET_NETDEV_DEVTYPE(dev, &bareudp_type);
576	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
577	dev->features    |= NETIF_F_RXCSUM;
 
578	dev->features    |= NETIF_F_GSO_SOFTWARE;
579	dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
580	dev->hw_features |= NETIF_F_RXCSUM;
581	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
582	dev->hard_header_len = 0;
583	dev->addr_len = 0;
584	dev->mtu = ETH_DATA_LEN;
585	dev->min_mtu = IPV4_MIN_MTU;
586	dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
587	dev->type = ARPHRD_NONE;
588	netif_keep_dst(dev);
589	dev->priv_flags |= IFF_NO_QUEUE;
590	dev->lltx = true;
591	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
592	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
593}
594
595static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
596			    struct netlink_ext_ack *extack)
597{
598	if (!data) {
599		NL_SET_ERR_MSG(extack,
600			       "Not enough attributes provided to perform the operation");
601		return -EINVAL;
602	}
603	return 0;
604}
605
606static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
607			struct netlink_ext_ack *extack)
608{
609	memset(conf, 0, sizeof(*conf));
610
611	if (!data[IFLA_BAREUDP_PORT]) {
612		NL_SET_ERR_MSG(extack, "port not specified");
613		return -EINVAL;
614	}
615	if (!data[IFLA_BAREUDP_ETHERTYPE]) {
616		NL_SET_ERR_MSG(extack, "ethertype not specified");
617		return -EINVAL;
618	}
619
620	conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
621	conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
622
623	if (data[IFLA_BAREUDP_SRCPORT_MIN])
624		conf->sport_min =  nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
625
626	if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
627		conf->multi_proto_mode = true;
628
629	return 0;
630}
631
632static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
633					    const struct bareudp_conf *conf)
634{
635	struct bareudp_dev *bareudp, *t = NULL;
636
637	list_for_each_entry(bareudp, &bn->bareudp_list, next) {
638		if (conf->port == bareudp->port)
639			t = bareudp;
640	}
641	return t;
642}
643
644static int bareudp_configure(struct net *net, struct net_device *dev,
645			     struct bareudp_conf *conf,
646			     struct netlink_ext_ack *extack)
647{
648	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
649	struct bareudp_dev *t, *bareudp = netdev_priv(dev);
650	int err;
651
652	bareudp->net = net;
653	bareudp->dev = dev;
654	t = bareudp_find_dev(bn, conf);
655	if (t) {
656		NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
657		return -EBUSY;
658	}
659
660	if (conf->multi_proto_mode &&
661	    (conf->ethertype != htons(ETH_P_MPLS_UC) &&
662	     conf->ethertype != htons(ETH_P_IP))) {
663		NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
664		return -EINVAL;
665	}
666
667	bareudp->port = conf->port;
668	bareudp->ethertype = conf->ethertype;
669	bareudp->sport_min = conf->sport_min;
670	bareudp->multi_proto_mode = conf->multi_proto_mode;
671
672	err = register_netdevice(dev);
673	if (err)
674		return err;
675
676	list_add(&bareudp->next, &bn->bareudp_list);
677	return 0;
678}
679
680static int bareudp_link_config(struct net_device *dev,
681			       struct nlattr *tb[])
682{
683	int err;
684
685	if (tb[IFLA_MTU]) {
686		err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
687		if (err)
688			return err;
689	}
690	return 0;
691}
692
693static void bareudp_dellink(struct net_device *dev, struct list_head *head)
694{
695	struct bareudp_dev *bareudp = netdev_priv(dev);
696
697	list_del(&bareudp->next);
698	unregister_netdevice_queue(dev, head);
699}
700
701static int bareudp_newlink(struct net *net, struct net_device *dev,
702			   struct nlattr *tb[], struct nlattr *data[],
703			   struct netlink_ext_ack *extack)
704{
705	struct bareudp_conf conf;
706	int err;
707
708	err = bareudp2info(data, &conf, extack);
709	if (err)
710		return err;
711
712	err = bareudp_configure(net, dev, &conf, extack);
713	if (err)
714		return err;
715
716	err = bareudp_link_config(dev, tb);
717	if (err)
718		goto err_unconfig;
719
720	return 0;
721
722err_unconfig:
723	bareudp_dellink(dev, NULL);
724	return err;
725}
726
727static size_t bareudp_get_size(const struct net_device *dev)
728{
729	return  nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_PORT */
730		nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_ETHERTYPE */
731		nla_total_size(sizeof(__u16))  +  /* IFLA_BAREUDP_SRCPORT_MIN */
732		nla_total_size(0)              +  /* IFLA_BAREUDP_MULTIPROTO_MODE */
733		0;
734}
735
736static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
737{
738	struct bareudp_dev *bareudp = netdev_priv(dev);
739
740	if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
741		goto nla_put_failure;
742	if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
743		goto nla_put_failure;
744	if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
745		goto nla_put_failure;
746	if (bareudp->multi_proto_mode &&
747	    nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
748		goto nla_put_failure;
749
750	return 0;
751
752nla_put_failure:
753	return -EMSGSIZE;
754}
755
756static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
757	.kind           = "bareudp",
758	.maxtype        = IFLA_BAREUDP_MAX,
759	.policy         = bareudp_policy,
760	.priv_size      = sizeof(struct bareudp_dev),
761	.setup          = bareudp_setup,
762	.validate       = bareudp_validate,
763	.newlink        = bareudp_newlink,
764	.dellink        = bareudp_dellink,
765	.get_size       = bareudp_get_size,
766	.fill_info      = bareudp_fill_info,
767};
768
769static __net_init int bareudp_init_net(struct net *net)
770{
771	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
772
773	INIT_LIST_HEAD(&bn->bareudp_list);
774	return 0;
775}
776
777static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
778{
779	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
780	struct bareudp_dev *bareudp, *next;
781
782	list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
783		unregister_netdevice_queue(bareudp->dev, head);
784}
785
786static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
787					       struct list_head *dev_kill_list)
788{
789	struct net *net;
790
791	list_for_each_entry(net, net_list, exit_list)
792		bareudp_destroy_tunnels(net, dev_kill_list);
793}
794
795static struct pernet_operations bareudp_net_ops = {
796	.init = bareudp_init_net,
797	.exit_batch_rtnl = bareudp_exit_batch_rtnl,
798	.id   = &bareudp_net_id,
799	.size = sizeof(struct bareudp_net),
800};
801
802static int __init bareudp_init_module(void)
803{
804	int rc;
805
806	rc = register_pernet_subsys(&bareudp_net_ops);
807	if (rc)
808		goto out1;
809
810	rc = rtnl_link_register(&bareudp_link_ops);
811	if (rc)
812		goto out2;
813
814	return 0;
815out2:
816	unregister_pernet_subsys(&bareudp_net_ops);
817out1:
818	return rc;
819}
820late_initcall(bareudp_init_module);
821
822static void __exit bareudp_cleanup_module(void)
823{
824	rtnl_link_unregister(&bareudp_link_ops);
825	unregister_pernet_subsys(&bareudp_net_ops);
826}
827module_exit(bareudp_cleanup_module);
828
829MODULE_ALIAS_RTNL_LINK("bareudp");
830MODULE_LICENSE("GPL");
831MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
832MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Bareudp: UDP  tunnel encasulation for different Payload types like
  3 * MPLS, NSH, IP, etc.
  4 * Copyright (c) 2019 Nokia, Inc.
  5 * Authors:  Martin Varghese, <martin.varghese@nokia.com>
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/etherdevice.h>
 13#include <linux/hash.h>
 14#include <net/dst_metadata.h>
 15#include <net/gro_cells.h>
 16#include <net/rtnetlink.h>
 17#include <net/protocol.h>
 18#include <net/ip6_tunnel.h>
 19#include <net/ip_tunnels.h>
 20#include <net/udp_tunnel.h>
 21#include <net/bareudp.h>
 22
 23#define BAREUDP_BASE_HLEN sizeof(struct udphdr)
 24#define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
 25			   sizeof(struct udphdr))
 26#define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
 27			   sizeof(struct udphdr))
 28
 29static bool log_ecn_error = true;
 30module_param(log_ecn_error, bool, 0644);
 31MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 32
 33/* per-network namespace private data for this module */
 34
 35static unsigned int bareudp_net_id;
 36
 37struct bareudp_net {
 38	struct list_head        bareudp_list;
 39};
 40
 41struct bareudp_conf {
 42	__be16 ethertype;
 43	__be16 port;
 44	u16 sport_min;
 45	bool multi_proto_mode;
 46};
 47
 48/* Pseudo network device */
 49struct bareudp_dev {
 50	struct net         *net;        /* netns for packet i/o */
 51	struct net_device  *dev;        /* netdev for bareudp tunnel */
 52	__be16		   ethertype;
 53	__be16             port;
 54	u16	           sport_min;
 55	bool               multi_proto_mode;
 56	struct socket      __rcu *sock;
 57	struct list_head   next;        /* bareudp node  on namespace list */
 58	struct gro_cells   gro_cells;
 59};
 60
 61static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 62{
 63	struct metadata_dst *tun_dst = NULL;
 
 64	struct bareudp_dev *bareudp;
 65	unsigned short family;
 66	unsigned int len;
 67	__be16 proto;
 68	void *oiph;
 69	int err;
 
 70
 71	bareudp = rcu_dereference_sk_user_data(sk);
 72	if (!bareudp)
 73		goto drop;
 74
 75	if (skb->protocol ==  htons(ETH_P_IP))
 76		family = AF_INET;
 77	else
 78		family = AF_INET6;
 79
 80	if (bareudp->ethertype == htons(ETH_P_IP)) {
 81		__u8 ipversion;
 82
 83		if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
 84				  sizeof(ipversion))) {
 85			bareudp->dev->stats.rx_dropped++;
 86			goto drop;
 87		}
 88		ipversion >>= 4;
 89
 90		if (ipversion == 4) {
 91			proto = htons(ETH_P_IP);
 92		} else if (ipversion == 6 && bareudp->multi_proto_mode) {
 93			proto = htons(ETH_P_IPV6);
 94		} else {
 95			bareudp->dev->stats.rx_dropped++;
 96			goto drop;
 97		}
 98	} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
 99		struct iphdr *tunnel_hdr;
100
101		tunnel_hdr = (struct iphdr *)skb_network_header(skb);
102		if (tunnel_hdr->version == 4) {
103			if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
104				proto = bareudp->ethertype;
105			} else if (bareudp->multi_proto_mode &&
106				   ipv4_is_multicast(tunnel_hdr->daddr)) {
107				proto = htons(ETH_P_MPLS_MC);
108			} else {
109				bareudp->dev->stats.rx_dropped++;
110				goto drop;
111			}
112		} else {
113			int addr_type;
114			struct ipv6hdr *tunnel_hdr_v6;
115
116			tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
117			addr_type =
118			ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
119			if (!(addr_type & IPV6_ADDR_MULTICAST)) {
120				proto = bareudp->ethertype;
121			} else if (bareudp->multi_proto_mode &&
122				   (addr_type & IPV6_ADDR_MULTICAST)) {
123				proto = htons(ETH_P_MPLS_MC);
124			} else {
125				bareudp->dev->stats.rx_dropped++;
126				goto drop;
127			}
128		}
129	} else {
130		proto = bareudp->ethertype;
131	}
132
133	if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
134				 proto,
135				 !net_eq(bareudp->net,
136				 dev_net(bareudp->dev)))) {
137		bareudp->dev->stats.rx_dropped++;
138		goto drop;
139	}
140	tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
 
 
 
141	if (!tun_dst) {
142		bareudp->dev->stats.rx_dropped++;
143		goto drop;
144	}
145	skb_dst_set(skb, &tun_dst->dst);
146	skb->dev = bareudp->dev;
147	oiph = skb_network_header(skb);
 
 
 
 
 
 
 
148	skb_reset_network_header(skb);
149	skb_reset_mac_header(skb);
 
 
 
 
 
 
 
 
150
151	if (!ipv6_mod_enabled() || family == AF_INET)
152		err = IP_ECN_decapsulate(oiph, skb);
153	else
154		err = IP6_ECN_decapsulate(oiph, skb);
155
156	if (unlikely(err)) {
157		if (log_ecn_error) {
158			if  (!ipv6_mod_enabled() || family == AF_INET)
159				net_info_ratelimited("non-ECT from %pI4 "
160						     "with TOS=%#x\n",
161						     &((struct iphdr *)oiph)->saddr,
162						     ((struct iphdr *)oiph)->tos);
163			else
164				net_info_ratelimited("non-ECT from %pI6\n",
165						     &((struct ipv6hdr *)oiph)->saddr);
166		}
167		if (err > 1) {
168			++bareudp->dev->stats.rx_frame_errors;
169			++bareudp->dev->stats.rx_errors;
170			goto drop;
171		}
172	}
173
174	len = skb->len;
175	err = gro_cells_receive(&bareudp->gro_cells, skb);
176	if (likely(err == NET_RX_SUCCESS))
177		dev_sw_netstats_rx_add(bareudp->dev, len);
178
179	return 0;
180drop:
181	/* Consume bad packet */
182	kfree_skb(skb);
183
184	return 0;
185}
186
187static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
188{
189	return 0;
190}
191
192static int bareudp_init(struct net_device *dev)
193{
194	struct bareudp_dev *bareudp = netdev_priv(dev);
195	int err;
196
197	err = gro_cells_init(&bareudp->gro_cells, dev);
198	if (err)
199		return err;
200
201	return 0;
202}
203
204static void bareudp_uninit(struct net_device *dev)
205{
206	struct bareudp_dev *bareudp = netdev_priv(dev);
207
208	gro_cells_destroy(&bareudp->gro_cells);
209}
210
211static struct socket *bareudp_create_sock(struct net *net, __be16 port)
212{
213	struct udp_port_cfg udp_conf;
214	struct socket *sock;
215	int err;
216
217	memset(&udp_conf, 0, sizeof(udp_conf));
218
219	if (ipv6_mod_enabled())
220		udp_conf.family = AF_INET6;
221	else
222		udp_conf.family = AF_INET;
223
224	udp_conf.local_udp_port = port;
225	/* Open UDP socket */
226	err = udp_sock_create(net, &udp_conf, &sock);
227	if (err < 0)
228		return ERR_PTR(err);
229
230	udp_allow_gso(sock->sk);
231	return sock;
232}
233
234/* Create new listen socket if needed */
235static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
236{
237	struct udp_tunnel_sock_cfg tunnel_cfg;
238	struct socket *sock;
239
240	sock = bareudp_create_sock(bareudp->net, port);
241	if (IS_ERR(sock))
242		return PTR_ERR(sock);
243
244	/* Mark socket as an encapsulation socket */
245	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
246	tunnel_cfg.sk_user_data = bareudp;
247	tunnel_cfg.encap_type = 1;
248	tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
249	tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
250	tunnel_cfg.encap_destroy = NULL;
251	setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
252
253	rcu_assign_pointer(bareudp->sock, sock);
254	return 0;
255}
256
257static int bareudp_open(struct net_device *dev)
258{
259	struct bareudp_dev *bareudp = netdev_priv(dev);
260	int ret = 0;
261
262	ret =  bareudp_socket_create(bareudp, bareudp->port);
263	return ret;
264}
265
266static void bareudp_sock_release(struct bareudp_dev *bareudp)
267{
268	struct socket *sock;
269
270	sock = bareudp->sock;
271	rcu_assign_pointer(bareudp->sock, NULL);
272	synchronize_net();
273	udp_tunnel_sock_release(sock);
274}
275
276static int bareudp_stop(struct net_device *dev)
277{
278	struct bareudp_dev *bareudp = netdev_priv(dev);
279
280	bareudp_sock_release(bareudp);
281	return 0;
282}
283
284static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
285			    struct bareudp_dev *bareudp,
286			    const struct ip_tunnel_info *info)
287{
 
288	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
289	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
290	struct socket *sock = rcu_dereference(bareudp->sock);
291	bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
292	const struct ip_tunnel_key *key = &info->key;
293	struct rtable *rt;
294	__be16 sport, df;
295	int min_headroom;
296	__u8 tos, ttl;
297	__be32 saddr;
298	int err;
299
 
 
 
300	if (!sock)
301		return -ESHUTDOWN;
302
303	sport = udp_flow_src_port(bareudp->net, skb,
304				  bareudp->sport_min, USHRT_MAX,
305				  true);
306	rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key,
307				   sport, bareudp->port, key->tos,
308				   use_cache ?
309				   (struct dst_cache *)&info->dst_cache : NULL);
310
311	if (IS_ERR(rt))
312		return PTR_ERR(rt);
313
314	skb_tunnel_check_pmtu(skb, &rt->dst,
315			      BAREUDP_IPV4_HLEN + info->options_len, false);
316
317	tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
318	ttl = key->ttl;
319	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
 
320	skb_scrub_packet(skb, xnet);
321
322	err = -ENOSPC;
323	if (!skb_pull(skb, skb_network_offset(skb)))
324		goto free_dst;
325
326	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
327		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
328
329	err = skb_cow_head(skb, min_headroom);
330	if (unlikely(err))
331		goto free_dst;
332
333	err = udp_tunnel_handle_offloads(skb, udp_sum);
334	if (err)
335		goto free_dst;
336
337	skb_set_inner_protocol(skb, bareudp->ethertype);
338	udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
339			    tos, ttl, df, sport, bareudp->port,
340			    !net_eq(bareudp->net, dev_net(bareudp->dev)),
341			    !(info->key.tun_flags & TUNNEL_CSUM));
 
342	return 0;
343
344free_dst:
345	dst_release(&rt->dst);
346	return err;
347}
348
349static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
350			     struct bareudp_dev *bareudp,
351			     const struct ip_tunnel_info *info)
352{
 
353	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
354	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
355	struct socket *sock  = rcu_dereference(bareudp->sock);
356	bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
357	const struct ip_tunnel_key *key = &info->key;
358	struct dst_entry *dst = NULL;
359	struct in6_addr saddr, daddr;
360	int min_headroom;
361	__u8 prio, ttl;
362	__be16 sport;
363	int err;
364
 
 
 
365	if (!sock)
366		return -ESHUTDOWN;
367
368	sport = udp_flow_src_port(bareudp->net, skb,
369				  bareudp->sport_min, USHRT_MAX,
370				  true);
371	dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr,
372				     key, sport, bareudp->port, key->tos,
373				     use_cache ?
374				     (struct dst_cache *) &info->dst_cache : NULL);
375	if (IS_ERR(dst))
376		return PTR_ERR(dst);
377
378	skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
379			      false);
380
381	prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
382	ttl = key->ttl;
383
384	skb_scrub_packet(skb, xnet);
385
386	err = -ENOSPC;
387	if (!skb_pull(skb, skb_network_offset(skb)))
388		goto free_dst;
389
390	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
391		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
392
393	err = skb_cow_head(skb, min_headroom);
394	if (unlikely(err))
395		goto free_dst;
396
397	err = udp_tunnel_handle_offloads(skb, udp_sum);
398	if (err)
399		goto free_dst;
400
401	daddr = info->key.u.ipv6.dst;
402	udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
403			     &saddr, &daddr, prio, ttl,
404			     info->key.label, sport, bareudp->port,
405			     !(info->key.tun_flags & TUNNEL_CSUM));
 
406	return 0;
407
408free_dst:
409	dst_release(dst);
410	return err;
411}
412
413static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
414{
415	if (bareudp->ethertype == proto)
416		return true;
417
418	if (!bareudp->multi_proto_mode)
419		return false;
420
421	if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
422	    proto == htons(ETH_P_MPLS_MC))
423		return true;
424
425	if (bareudp->ethertype == htons(ETH_P_IP) &&
426	    proto == htons(ETH_P_IPV6))
427		return true;
428
429	return false;
430}
431
432static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
433{
434	struct bareudp_dev *bareudp = netdev_priv(dev);
435	struct ip_tunnel_info *info = NULL;
436	int err;
437
438	if (!bareudp_proto_valid(bareudp, skb->protocol)) {
439		err = -EINVAL;
440		goto tx_error;
441	}
442
443	info = skb_tunnel_info(skb);
444	if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
445		err = -EINVAL;
446		goto tx_error;
447	}
448
449	rcu_read_lock();
450	if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
451		err = bareudp6_xmit_skb(skb, dev, bareudp, info);
452	else
453		err = bareudp_xmit_skb(skb, dev, bareudp, info);
454
455	rcu_read_unlock();
456
457	if (likely(!err))
458		return NETDEV_TX_OK;
459tx_error:
460	dev_kfree_skb(skb);
461
462	if (err == -ELOOP)
463		dev->stats.collisions++;
464	else if (err == -ENETUNREACH)
465		dev->stats.tx_carrier_errors++;
466
467	dev->stats.tx_errors++;
468	return NETDEV_TX_OK;
469}
470
471static int bareudp_fill_metadata_dst(struct net_device *dev,
472				     struct sk_buff *skb)
473{
474	struct ip_tunnel_info *info = skb_tunnel_info(skb);
475	struct bareudp_dev *bareudp = netdev_priv(dev);
476	bool use_cache;
477	__be16 sport;
478
479	use_cache = ip_tunnel_dst_cache_usable(skb, info);
480	sport = udp_flow_src_port(bareudp->net, skb,
481				  bareudp->sport_min, USHRT_MAX,
482				  true);
483
484	if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
485		struct rtable *rt;
486		__be32 saddr;
487
488		rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr,
489					   &info->key, sport, bareudp->port,
490					   info->key.tos,
491					   use_cache ? &info->dst_cache : NULL);
492		if (IS_ERR(rt))
493			return PTR_ERR(rt);
494
495		ip_rt_put(rt);
496		info->key.u.ipv4.src = saddr;
497	} else if (ip_tunnel_info_af(info) == AF_INET6) {
498		struct dst_entry *dst;
499		struct in6_addr saddr;
500		struct socket *sock = rcu_dereference(bareudp->sock);
501
502		dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock,
503					     0, &saddr, &info->key,
504					     sport, bareudp->port, info->key.tos,
505					     use_cache ? &info->dst_cache : NULL);
506		if (IS_ERR(dst))
507			return PTR_ERR(dst);
508
509		dst_release(dst);
510		info->key.u.ipv6.src = saddr;
511	} else {
512		return -EINVAL;
513	}
514
515	info->key.tp_src = sport;
516	info->key.tp_dst = bareudp->port;
517	return 0;
518}
519
520static const struct net_device_ops bareudp_netdev_ops = {
521	.ndo_init               = bareudp_init,
522	.ndo_uninit             = bareudp_uninit,
523	.ndo_open               = bareudp_open,
524	.ndo_stop               = bareudp_stop,
525	.ndo_start_xmit         = bareudp_xmit,
526	.ndo_fill_metadata_dst  = bareudp_fill_metadata_dst,
527};
528
529static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
530	[IFLA_BAREUDP_PORT]                = { .type = NLA_U16 },
531	[IFLA_BAREUDP_ETHERTYPE]	   = { .type = NLA_U16 },
532	[IFLA_BAREUDP_SRCPORT_MIN]         = { .type = NLA_U16 },
533	[IFLA_BAREUDP_MULTIPROTO_MODE]     = { .type = NLA_FLAG },
534};
535
536/* Info for udev, that this is a virtual tunnel endpoint */
537static const struct device_type bareudp_type = {
538	.name = "bareudp",
539};
540
541/* Initialize the device structure. */
542static void bareudp_setup(struct net_device *dev)
543{
544	dev->netdev_ops = &bareudp_netdev_ops;
545	dev->needs_free_netdev = true;
546	SET_NETDEV_DEVTYPE(dev, &bareudp_type);
547	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
548	dev->features    |= NETIF_F_RXCSUM;
549	dev->features    |= NETIF_F_LLTX;
550	dev->features    |= NETIF_F_GSO_SOFTWARE;
551	dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
552	dev->hw_features |= NETIF_F_RXCSUM;
553	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
554	dev->hard_header_len = 0;
555	dev->addr_len = 0;
556	dev->mtu = ETH_DATA_LEN;
557	dev->min_mtu = IPV4_MIN_MTU;
558	dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
559	dev->type = ARPHRD_NONE;
560	netif_keep_dst(dev);
561	dev->priv_flags |= IFF_NO_QUEUE;
 
562	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
563	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
564}
565
566static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
567			    struct netlink_ext_ack *extack)
568{
569	if (!data) {
570		NL_SET_ERR_MSG(extack,
571			       "Not enough attributes provided to perform the operation");
572		return -EINVAL;
573	}
574	return 0;
575}
576
577static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
578			struct netlink_ext_ack *extack)
579{
580	memset(conf, 0, sizeof(*conf));
581
582	if (!data[IFLA_BAREUDP_PORT]) {
583		NL_SET_ERR_MSG(extack, "port not specified");
584		return -EINVAL;
585	}
586	if (!data[IFLA_BAREUDP_ETHERTYPE]) {
587		NL_SET_ERR_MSG(extack, "ethertype not specified");
588		return -EINVAL;
589	}
590
591	conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
592	conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
593
594	if (data[IFLA_BAREUDP_SRCPORT_MIN])
595		conf->sport_min =  nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
596
597	if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
598		conf->multi_proto_mode = true;
599
600	return 0;
601}
602
603static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
604					    const struct bareudp_conf *conf)
605{
606	struct bareudp_dev *bareudp, *t = NULL;
607
608	list_for_each_entry(bareudp, &bn->bareudp_list, next) {
609		if (conf->port == bareudp->port)
610			t = bareudp;
611	}
612	return t;
613}
614
615static int bareudp_configure(struct net *net, struct net_device *dev,
616			     struct bareudp_conf *conf,
617			     struct netlink_ext_ack *extack)
618{
619	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
620	struct bareudp_dev *t, *bareudp = netdev_priv(dev);
621	int err;
622
623	bareudp->net = net;
624	bareudp->dev = dev;
625	t = bareudp_find_dev(bn, conf);
626	if (t) {
627		NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
628		return -EBUSY;
629	}
630
631	if (conf->multi_proto_mode &&
632	    (conf->ethertype != htons(ETH_P_MPLS_UC) &&
633	     conf->ethertype != htons(ETH_P_IP))) {
634		NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
635		return -EINVAL;
636	}
637
638	bareudp->port = conf->port;
639	bareudp->ethertype = conf->ethertype;
640	bareudp->sport_min = conf->sport_min;
641	bareudp->multi_proto_mode = conf->multi_proto_mode;
642
643	err = register_netdevice(dev);
644	if (err)
645		return err;
646
647	list_add(&bareudp->next, &bn->bareudp_list);
648	return 0;
649}
650
651static int bareudp_link_config(struct net_device *dev,
652			       struct nlattr *tb[])
653{
654	int err;
655
656	if (tb[IFLA_MTU]) {
657		err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
658		if (err)
659			return err;
660	}
661	return 0;
662}
663
664static void bareudp_dellink(struct net_device *dev, struct list_head *head)
665{
666	struct bareudp_dev *bareudp = netdev_priv(dev);
667
668	list_del(&bareudp->next);
669	unregister_netdevice_queue(dev, head);
670}
671
672static int bareudp_newlink(struct net *net, struct net_device *dev,
673			   struct nlattr *tb[], struct nlattr *data[],
674			   struct netlink_ext_ack *extack)
675{
676	struct bareudp_conf conf;
677	int err;
678
679	err = bareudp2info(data, &conf, extack);
680	if (err)
681		return err;
682
683	err = bareudp_configure(net, dev, &conf, extack);
684	if (err)
685		return err;
686
687	err = bareudp_link_config(dev, tb);
688	if (err)
689		goto err_unconfig;
690
691	return 0;
692
693err_unconfig:
694	bareudp_dellink(dev, NULL);
695	return err;
696}
697
698static size_t bareudp_get_size(const struct net_device *dev)
699{
700	return  nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_PORT */
701		nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_ETHERTYPE */
702		nla_total_size(sizeof(__u16))  +  /* IFLA_BAREUDP_SRCPORT_MIN */
703		nla_total_size(0)              +  /* IFLA_BAREUDP_MULTIPROTO_MODE */
704		0;
705}
706
707static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
708{
709	struct bareudp_dev *bareudp = netdev_priv(dev);
710
711	if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
712		goto nla_put_failure;
713	if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
714		goto nla_put_failure;
715	if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
716		goto nla_put_failure;
717	if (bareudp->multi_proto_mode &&
718	    nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
719		goto nla_put_failure;
720
721	return 0;
722
723nla_put_failure:
724	return -EMSGSIZE;
725}
726
727static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
728	.kind           = "bareudp",
729	.maxtype        = IFLA_BAREUDP_MAX,
730	.policy         = bareudp_policy,
731	.priv_size      = sizeof(struct bareudp_dev),
732	.setup          = bareudp_setup,
733	.validate       = bareudp_validate,
734	.newlink        = bareudp_newlink,
735	.dellink        = bareudp_dellink,
736	.get_size       = bareudp_get_size,
737	.fill_info      = bareudp_fill_info,
738};
739
740static __net_init int bareudp_init_net(struct net *net)
741{
742	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
743
744	INIT_LIST_HEAD(&bn->bareudp_list);
745	return 0;
746}
747
748static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
749{
750	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
751	struct bareudp_dev *bareudp, *next;
752
753	list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
754		unregister_netdevice_queue(bareudp->dev, head);
755}
756
757static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
758					       struct list_head *dev_kill_list)
759{
760	struct net *net;
761
762	list_for_each_entry(net, net_list, exit_list)
763		bareudp_destroy_tunnels(net, dev_kill_list);
764}
765
766static struct pernet_operations bareudp_net_ops = {
767	.init = bareudp_init_net,
768	.exit_batch_rtnl = bareudp_exit_batch_rtnl,
769	.id   = &bareudp_net_id,
770	.size = sizeof(struct bareudp_net),
771};
772
773static int __init bareudp_init_module(void)
774{
775	int rc;
776
777	rc = register_pernet_subsys(&bareudp_net_ops);
778	if (rc)
779		goto out1;
780
781	rc = rtnl_link_register(&bareudp_link_ops);
782	if (rc)
783		goto out2;
784
785	return 0;
786out2:
787	unregister_pernet_subsys(&bareudp_net_ops);
788out1:
789	return rc;
790}
791late_initcall(bareudp_init_module);
792
793static void __exit bareudp_cleanup_module(void)
794{
795	rtnl_link_unregister(&bareudp_link_ops);
796	unregister_pernet_subsys(&bareudp_net_ops);
797}
798module_exit(bareudp_cleanup_module);
799
800MODULE_ALIAS_RTNL_LINK("bareudp");
801MODULE_LICENSE("GPL");
802MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
803MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");