Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0
  2/* Bareudp: UDP  tunnel encasulation for different Payload types like
  3 * MPLS, NSH, IP, etc.
  4 * Copyright (c) 2019 Nokia, Inc.
  5 * Authors:  Martin Varghese, <martin.varghese@nokia.com>
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/etherdevice.h>
 13#include <linux/hash.h>
 14#include <net/dst_metadata.h>
 15#include <net/gro_cells.h>
 16#include <net/rtnetlink.h>
 17#include <net/protocol.h>
 18#include <net/ip6_tunnel.h>
 19#include <net/ip_tunnels.h>
 20#include <net/udp_tunnel.h>
 21#include <net/bareudp.h>
 22
 23#define BAREUDP_BASE_HLEN sizeof(struct udphdr)
 24#define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
 25			   sizeof(struct udphdr))
 26#define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
 27			   sizeof(struct udphdr))
 28
 29static bool log_ecn_error = true;
 30module_param(log_ecn_error, bool, 0644);
 31MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 32
 33/* per-network namespace private data for this module */
 34
 35static unsigned int bareudp_net_id;
 36
 37struct bareudp_net {
 38	struct list_head        bareudp_list;
 39};
 40
 41/* Pseudo network device */
 42struct bareudp_dev {
 43	struct net         *net;        /* netns for packet i/o */
 44	struct net_device  *dev;        /* netdev for bareudp tunnel */
 45	__be16		   ethertype;
 46	__be16             port;
 47	u16	           sport_min;
 48	bool               multi_proto_mode;
 49	struct socket      __rcu *sock;
 50	struct list_head   next;        /* bareudp node  on namespace list */
 51	struct gro_cells   gro_cells;
 52};
 53
 54static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 55{
 56	struct metadata_dst *tun_dst = NULL;
 57	struct pcpu_sw_netstats *stats;
 58	struct bareudp_dev *bareudp;
 59	unsigned short family;
 60	unsigned int len;
 61	__be16 proto;
 62	void *oiph;
 63	int err;
 64
 65	bareudp = rcu_dereference_sk_user_data(sk);
 66	if (!bareudp)
 67		goto drop;
 68
 69	if (skb->protocol ==  htons(ETH_P_IP))
 70		family = AF_INET;
 71	else
 72		family = AF_INET6;
 73
 74	if (bareudp->ethertype == htons(ETH_P_IP)) {
 75		struct iphdr *iphdr;
 76
 77		iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
 78		if (iphdr->version == 4) {
 79			proto = bareudp->ethertype;
 80		} else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
 81			proto = htons(ETH_P_IPV6);
 82		} else {
 83			bareudp->dev->stats.rx_dropped++;
 84			goto drop;
 85		}
 86	} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
 87		struct iphdr *tunnel_hdr;
 88
 89		tunnel_hdr = (struct iphdr *)skb_network_header(skb);
 90		if (tunnel_hdr->version == 4) {
 91			if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
 92				proto = bareudp->ethertype;
 93			} else if (bareudp->multi_proto_mode &&
 94				   ipv4_is_multicast(tunnel_hdr->daddr)) {
 95				proto = htons(ETH_P_MPLS_MC);
 96			} else {
 97				bareudp->dev->stats.rx_dropped++;
 98				goto drop;
 99			}
100		} else {
101			int addr_type;
102			struct ipv6hdr *tunnel_hdr_v6;
103
104			tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
105			addr_type =
106			ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
107			if (!(addr_type & IPV6_ADDR_MULTICAST)) {
108				proto = bareudp->ethertype;
109			} else if (bareudp->multi_proto_mode &&
110				   (addr_type & IPV6_ADDR_MULTICAST)) {
111				proto = htons(ETH_P_MPLS_MC);
112			} else {
113				bareudp->dev->stats.rx_dropped++;
114				goto drop;
115			}
116		}
117	} else {
118		proto = bareudp->ethertype;
119	}
120
121	if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
122				 proto,
123				 !net_eq(bareudp->net,
124				 dev_net(bareudp->dev)))) {
125		bareudp->dev->stats.rx_dropped++;
126		goto drop;
127	}
128	tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
129	if (!tun_dst) {
130		bareudp->dev->stats.rx_dropped++;
131		goto drop;
132	}
133	skb_dst_set(skb, &tun_dst->dst);
134	skb->dev = bareudp->dev;
135	oiph = skb_network_header(skb);
136	skb_reset_network_header(skb);
137
138	if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
139		err = IP_ECN_decapsulate(oiph, skb);
140	else
141		err = IP6_ECN_decapsulate(oiph, skb);
142
143	if (unlikely(err)) {
144		if (log_ecn_error) {
145			if  (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
146				net_info_ratelimited("non-ECT from %pI4 "
147						     "with TOS=%#x\n",
148						     &((struct iphdr *)oiph)->saddr,
149						     ((struct iphdr *)oiph)->tos);
150			else
151				net_info_ratelimited("non-ECT from %pI6\n",
152						     &((struct ipv6hdr *)oiph)->saddr);
153		}
154		if (err > 1) {
155			++bareudp->dev->stats.rx_frame_errors;
156			++bareudp->dev->stats.rx_errors;
157			goto drop;
158		}
159	}
160
161	len = skb->len;
162	err = gro_cells_receive(&bareudp->gro_cells, skb);
163	if (likely(err == NET_RX_SUCCESS)) {
164		stats = this_cpu_ptr(bareudp->dev->tstats);
165		u64_stats_update_begin(&stats->syncp);
166		stats->rx_packets++;
167		stats->rx_bytes += len;
168		u64_stats_update_end(&stats->syncp);
169	}
170	return 0;
171drop:
172	/* Consume bad packet */
173	kfree_skb(skb);
174
175	return 0;
176}
177
178static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
179{
180	return 0;
181}
182
183static int bareudp_init(struct net_device *dev)
184{
185	struct bareudp_dev *bareudp = netdev_priv(dev);
186	int err;
187
188	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
189	if (!dev->tstats)
190		return -ENOMEM;
191
192	err = gro_cells_init(&bareudp->gro_cells, dev);
193	if (err) {
194		free_percpu(dev->tstats);
195		return err;
196	}
197	return 0;
198}
199
200static void bareudp_uninit(struct net_device *dev)
201{
202	struct bareudp_dev *bareudp = netdev_priv(dev);
203
204	gro_cells_destroy(&bareudp->gro_cells);
205	free_percpu(dev->tstats);
206}
207
208static struct socket *bareudp_create_sock(struct net *net, __be16 port)
209{
210	struct udp_port_cfg udp_conf;
211	struct socket *sock;
212	int err;
213
214	memset(&udp_conf, 0, sizeof(udp_conf));
215#if IS_ENABLED(CONFIG_IPV6)
216	udp_conf.family = AF_INET6;
217#else
218	udp_conf.family = AF_INET;
219#endif
220	udp_conf.local_udp_port = port;
221	/* Open UDP socket */
222	err = udp_sock_create(net, &udp_conf, &sock);
223	if (err < 0)
224		return ERR_PTR(err);
225
226	return sock;
227}
228
229/* Create new listen socket if needed */
230static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
231{
232	struct udp_tunnel_sock_cfg tunnel_cfg;
233	struct socket *sock;
234
235	sock = bareudp_create_sock(bareudp->net, port);
236	if (IS_ERR(sock))
237		return PTR_ERR(sock);
238
239	/* Mark socket as an encapsulation socket */
240	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
241	tunnel_cfg.sk_user_data = bareudp;
242	tunnel_cfg.encap_type = 1;
243	tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
244	tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
245	tunnel_cfg.encap_destroy = NULL;
246	setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
247
248	/* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
249	 * socket type is v6 an explicit call to udp_encap_enable is needed.
250	 */
251	if (sock->sk->sk_family == AF_INET6)
252		udp_encap_enable();
253
254	rcu_assign_pointer(bareudp->sock, sock);
255	return 0;
256}
257
258static int bareudp_open(struct net_device *dev)
259{
260	struct bareudp_dev *bareudp = netdev_priv(dev);
261	int ret = 0;
262
263	ret =  bareudp_socket_create(bareudp, bareudp->port);
264	return ret;
265}
266
267static void bareudp_sock_release(struct bareudp_dev *bareudp)
268{
269	struct socket *sock;
270
271	sock = bareudp->sock;
272	rcu_assign_pointer(bareudp->sock, NULL);
273	synchronize_net();
274	udp_tunnel_sock_release(sock);
275}
276
277static int bareudp_stop(struct net_device *dev)
278{
279	struct bareudp_dev *bareudp = netdev_priv(dev);
280
281	bareudp_sock_release(bareudp);
282	return 0;
283}
284
285static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
286			    struct bareudp_dev *bareudp,
287			    const struct ip_tunnel_info *info)
288{
289	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
290	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
291	struct socket *sock = rcu_dereference(bareudp->sock);
292	bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
293	const struct ip_tunnel_key *key = &info->key;
294	struct rtable *rt;
295	__be16 sport, df;
296	int min_headroom;
297	__u8 tos, ttl;
298	__be32 saddr;
299	int err;
300
301	if (!sock)
302		return -ESHUTDOWN;
303
304	rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info,
305				    IPPROTO_UDP, use_cache);
306
307	if (IS_ERR(rt))
308		return PTR_ERR(rt);
309
310	skb_tunnel_check_pmtu(skb, &rt->dst,
311			      BAREUDP_IPV4_HLEN + info->options_len, false);
312
313	sport = udp_flow_src_port(bareudp->net, skb,
314				  bareudp->sport_min, USHRT_MAX,
315				  true);
316	tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
317	ttl = key->ttl;
318	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
319	skb_scrub_packet(skb, xnet);
320
321	err = -ENOSPC;
322	if (!skb_pull(skb, skb_network_offset(skb)))
323		goto free_dst;
324
325	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
326		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
327
328	err = skb_cow_head(skb, min_headroom);
329	if (unlikely(err))
330		goto free_dst;
331
332	err = udp_tunnel_handle_offloads(skb, udp_sum);
333	if (err)
334		goto free_dst;
335
336	skb_set_inner_protocol(skb, bareudp->ethertype);
337	udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
338			    tos, ttl, df, sport, bareudp->port,
339			    !net_eq(bareudp->net, dev_net(bareudp->dev)),
340			    !(info->key.tun_flags & TUNNEL_CSUM));
341	return 0;
342
343free_dst:
344	dst_release(&rt->dst);
345	return err;
346}
347
348static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
349			     struct bareudp_dev *bareudp,
350			     const struct ip_tunnel_info *info)
351{
352	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
353	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
354	struct socket *sock  = rcu_dereference(bareudp->sock);
355	bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
356	const struct ip_tunnel_key *key = &info->key;
357	struct dst_entry *dst = NULL;
358	struct in6_addr saddr, daddr;
359	int min_headroom;
360	__u8 prio, ttl;
361	__be16 sport;
362	int err;
363
364	if (!sock)
365		return -ESHUTDOWN;
366
367	dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
368				    IPPROTO_UDP, use_cache);
369	if (IS_ERR(dst))
370		return PTR_ERR(dst);
371
372	skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
373			      false);
374
375	sport = udp_flow_src_port(bareudp->net, skb,
376				  bareudp->sport_min, USHRT_MAX,
377				  true);
378	prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
379	ttl = key->ttl;
380
381	skb_scrub_packet(skb, xnet);
382
383	err = -ENOSPC;
384	if (!skb_pull(skb, skb_network_offset(skb)))
385		goto free_dst;
386
387	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
388		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
389
390	err = skb_cow_head(skb, min_headroom);
391	if (unlikely(err))
392		goto free_dst;
393
394	err = udp_tunnel_handle_offloads(skb, udp_sum);
395	if (err)
396		goto free_dst;
397
398	daddr = info->key.u.ipv6.dst;
399	udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
400			     &saddr, &daddr, prio, ttl,
401			     info->key.label, sport, bareudp->port,
402			     !(info->key.tun_flags & TUNNEL_CSUM));
403	return 0;
404
405free_dst:
406	dst_release(dst);
407	return err;
408}
409
410static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
411{
412	if (bareudp->ethertype == proto)
413		return true;
414
415	if (!bareudp->multi_proto_mode)
416		return false;
417
418	if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
419	    proto == htons(ETH_P_MPLS_MC))
420		return true;
421
422	if (bareudp->ethertype == htons(ETH_P_IP) &&
423	    proto == htons(ETH_P_IPV6))
424		return true;
425
426	return false;
427}
428
429static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
430{
431	struct bareudp_dev *bareudp = netdev_priv(dev);
432	struct ip_tunnel_info *info = NULL;
433	int err;
434
435	if (!bareudp_proto_valid(bareudp, skb->protocol)) {
436		err = -EINVAL;
437		goto tx_error;
438	}
439
440	info = skb_tunnel_info(skb);
441	if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
442		err = -EINVAL;
443		goto tx_error;
444	}
445
446	rcu_read_lock();
447	if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
448		err = bareudp6_xmit_skb(skb, dev, bareudp, info);
449	else
450		err = bareudp_xmit_skb(skb, dev, bareudp, info);
451
452	rcu_read_unlock();
453
454	if (likely(!err))
455		return NETDEV_TX_OK;
456tx_error:
457	dev_kfree_skb(skb);
458
459	if (err == -ELOOP)
460		dev->stats.collisions++;
461	else if (err == -ENETUNREACH)
462		dev->stats.tx_carrier_errors++;
463
464	dev->stats.tx_errors++;
465	return NETDEV_TX_OK;
466}
467
468static int bareudp_fill_metadata_dst(struct net_device *dev,
469				     struct sk_buff *skb)
470{
471	struct ip_tunnel_info *info = skb_tunnel_info(skb);
472	struct bareudp_dev *bareudp = netdev_priv(dev);
473	bool use_cache;
474
475	use_cache = ip_tunnel_dst_cache_usable(skb, info);
476
477	if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
478		struct rtable *rt;
479		__be32 saddr;
480
481		rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr,
482					    info, IPPROTO_UDP, use_cache);
483		if (IS_ERR(rt))
484			return PTR_ERR(rt);
485
486		ip_rt_put(rt);
487		info->key.u.ipv4.src = saddr;
488	} else if (ip_tunnel_info_af(info) == AF_INET6) {
489		struct dst_entry *dst;
490		struct in6_addr saddr;
491		struct socket *sock = rcu_dereference(bareudp->sock);
492
493		dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
494					    &saddr, info, IPPROTO_UDP,
495					    use_cache);
496		if (IS_ERR(dst))
497			return PTR_ERR(dst);
498
499		dst_release(dst);
500		info->key.u.ipv6.src = saddr;
501	} else {
502		return -EINVAL;
503	}
504
505	info->key.tp_src = udp_flow_src_port(bareudp->net, skb,
506					     bareudp->sport_min,
507			USHRT_MAX, true);
508	info->key.tp_dst = bareudp->port;
509	return 0;
510}
511
512static const struct net_device_ops bareudp_netdev_ops = {
513	.ndo_init               = bareudp_init,
514	.ndo_uninit             = bareudp_uninit,
515	.ndo_open               = bareudp_open,
516	.ndo_stop               = bareudp_stop,
517	.ndo_start_xmit         = bareudp_xmit,
518	.ndo_get_stats64        = ip_tunnel_get_stats64,
519	.ndo_fill_metadata_dst  = bareudp_fill_metadata_dst,
520};
521
522static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
523	[IFLA_BAREUDP_PORT]                = { .type = NLA_U16 },
524	[IFLA_BAREUDP_ETHERTYPE]	   = { .type = NLA_U16 },
525	[IFLA_BAREUDP_SRCPORT_MIN]         = { .type = NLA_U16 },
526	[IFLA_BAREUDP_MULTIPROTO_MODE]     = { .type = NLA_FLAG },
527};
528
529/* Info for udev, that this is a virtual tunnel endpoint */
530static struct device_type bareudp_type = {
531	.name = "bareudp",
532};
533
534/* Initialize the device structure. */
535static void bareudp_setup(struct net_device *dev)
536{
537	dev->netdev_ops = &bareudp_netdev_ops;
538	dev->needs_free_netdev = true;
539	SET_NETDEV_DEVTYPE(dev, &bareudp_type);
540	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
541	dev->features    |= NETIF_F_RXCSUM;
542	dev->features    |= NETIF_F_GSO_SOFTWARE;
543	dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
544	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
545	dev->hard_header_len = 0;
546	dev->addr_len = 0;
547	dev->mtu = ETH_DATA_LEN;
548	dev->min_mtu = IPV4_MIN_MTU;
549	dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
550	dev->type = ARPHRD_NONE;
551	netif_keep_dst(dev);
552	dev->priv_flags |= IFF_NO_QUEUE;
553	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
554}
555
556static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
557			    struct netlink_ext_ack *extack)
558{
559	if (!data) {
560		NL_SET_ERR_MSG(extack,
561			       "Not enough attributes provided to perform the operation");
562		return -EINVAL;
563	}
564	return 0;
565}
566
567static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
568			struct netlink_ext_ack *extack)
569{
570	memset(conf, 0, sizeof(*conf));
571
572	if (!data[IFLA_BAREUDP_PORT]) {
573		NL_SET_ERR_MSG(extack, "port not specified");
574		return -EINVAL;
575	}
576	if (!data[IFLA_BAREUDP_ETHERTYPE]) {
577		NL_SET_ERR_MSG(extack, "ethertype not specified");
578		return -EINVAL;
579	}
580
581	if (data[IFLA_BAREUDP_PORT])
582		conf->port =  nla_get_u16(data[IFLA_BAREUDP_PORT]);
583
584	if (data[IFLA_BAREUDP_ETHERTYPE])
585		conf->ethertype =  nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
586
587	if (data[IFLA_BAREUDP_SRCPORT_MIN])
588		conf->sport_min =  nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
589
590	if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
591		conf->multi_proto_mode = true;
592
593	return 0;
594}
595
596static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
597					    const struct bareudp_conf *conf)
598{
599	struct bareudp_dev *bareudp, *t = NULL;
600
601	list_for_each_entry(bareudp, &bn->bareudp_list, next) {
602		if (conf->port == bareudp->port)
603			t = bareudp;
604	}
605	return t;
606}
607
608static int bareudp_configure(struct net *net, struct net_device *dev,
609			     struct bareudp_conf *conf)
610{
611	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
612	struct bareudp_dev *t, *bareudp = netdev_priv(dev);
613	int err;
614
615	bareudp->net = net;
616	bareudp->dev = dev;
617	t = bareudp_find_dev(bn, conf);
618	if (t)
619		return -EBUSY;
620
621	if (conf->multi_proto_mode &&
622	    (conf->ethertype != htons(ETH_P_MPLS_UC) &&
623	     conf->ethertype != htons(ETH_P_IP)))
624		return -EINVAL;
625
626	bareudp->port = conf->port;
627	bareudp->ethertype = conf->ethertype;
628	bareudp->sport_min = conf->sport_min;
629	bareudp->multi_proto_mode = conf->multi_proto_mode;
630
631	err = register_netdevice(dev);
632	if (err)
633		return err;
634
635	list_add(&bareudp->next, &bn->bareudp_list);
636	return 0;
637}
638
639static int bareudp_link_config(struct net_device *dev,
640			       struct nlattr *tb[])
641{
642	int err;
643
644	if (tb[IFLA_MTU]) {
645		err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
646		if (err)
647			return err;
648	}
649	return 0;
650}
651
652static int bareudp_newlink(struct net *net, struct net_device *dev,
653			   struct nlattr *tb[], struct nlattr *data[],
654			   struct netlink_ext_ack *extack)
655{
656	struct bareudp_conf conf;
657	int err;
658
659	err = bareudp2info(data, &conf, extack);
660	if (err)
661		return err;
662
663	err = bareudp_configure(net, dev, &conf);
664	if (err)
665		return err;
666
667	err = bareudp_link_config(dev, tb);
668	if (err)
669		return err;
670
671	return 0;
672}
673
674static void bareudp_dellink(struct net_device *dev, struct list_head *head)
675{
676	struct bareudp_dev *bareudp = netdev_priv(dev);
677
678	list_del(&bareudp->next);
679	unregister_netdevice_queue(dev, head);
680}
681
682static size_t bareudp_get_size(const struct net_device *dev)
683{
684	return  nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_PORT */
685		nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_ETHERTYPE */
686		nla_total_size(sizeof(__u16))  +  /* IFLA_BAREUDP_SRCPORT_MIN */
687		nla_total_size(0)              +  /* IFLA_BAREUDP_MULTIPROTO_MODE */
688		0;
689}
690
691static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
692{
693	struct bareudp_dev *bareudp = netdev_priv(dev);
694
695	if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
696		goto nla_put_failure;
697	if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
698		goto nla_put_failure;
699	if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
700		goto nla_put_failure;
701	if (bareudp->multi_proto_mode &&
702	    nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
703		goto nla_put_failure;
704
705	return 0;
706
707nla_put_failure:
708	return -EMSGSIZE;
709}
710
711static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
712	.kind           = "bareudp",
713	.maxtype        = IFLA_BAREUDP_MAX,
714	.policy         = bareudp_policy,
715	.priv_size      = sizeof(struct bareudp_dev),
716	.setup          = bareudp_setup,
717	.validate       = bareudp_validate,
718	.newlink        = bareudp_newlink,
719	.dellink        = bareudp_dellink,
720	.get_size       = bareudp_get_size,
721	.fill_info      = bareudp_fill_info,
722};
723
724struct net_device *bareudp_dev_create(struct net *net, const char *name,
725				      u8 name_assign_type,
726				      struct bareudp_conf *conf)
727{
728	struct nlattr *tb[IFLA_MAX + 1];
729	struct net_device *dev;
730	LIST_HEAD(list_kill);
731	int err;
732
733	memset(tb, 0, sizeof(tb));
734	dev = rtnl_create_link(net, name, name_assign_type,
735			       &bareudp_link_ops, tb, NULL);
736	if (IS_ERR(dev))
737		return dev;
738
739	err = bareudp_configure(net, dev, conf);
740	if (err) {
741		free_netdev(dev);
742		return ERR_PTR(err);
743	}
744	err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
745	if (err)
746		goto err;
747
748	err = rtnl_configure_link(dev, NULL);
749	if (err < 0)
750		goto err;
751
752	return dev;
753err:
754	bareudp_dellink(dev, &list_kill);
755	unregister_netdevice_many(&list_kill);
756	return ERR_PTR(err);
757}
758EXPORT_SYMBOL_GPL(bareudp_dev_create);
759
760static __net_init int bareudp_init_net(struct net *net)
761{
762	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
763
764	INIT_LIST_HEAD(&bn->bareudp_list);
765	return 0;
766}
767
768static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
769{
770	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
771	struct bareudp_dev *bareudp, *next;
772
773	list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
774		unregister_netdevice_queue(bareudp->dev, head);
775}
776
777static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
778{
779	struct net *net;
780	LIST_HEAD(list);
781
782	rtnl_lock();
783	list_for_each_entry(net, net_list, exit_list)
784		bareudp_destroy_tunnels(net, &list);
785
786	/* unregister the devices gathered above */
787	unregister_netdevice_many(&list);
788	rtnl_unlock();
789}
790
791static struct pernet_operations bareudp_net_ops = {
792	.init = bareudp_init_net,
793	.exit_batch = bareudp_exit_batch_net,
794	.id   = &bareudp_net_id,
795	.size = sizeof(struct bareudp_net),
796};
797
798static int __init bareudp_init_module(void)
799{
800	int rc;
801
802	rc = register_pernet_subsys(&bareudp_net_ops);
803	if (rc)
804		goto out1;
805
806	rc = rtnl_link_register(&bareudp_link_ops);
807	if (rc)
808		goto out2;
809
810	return 0;
811out2:
812	unregister_pernet_subsys(&bareudp_net_ops);
813out1:
814	return rc;
815}
816late_initcall(bareudp_init_module);
817
818static void __exit bareudp_cleanup_module(void)
819{
820	rtnl_link_unregister(&bareudp_link_ops);
821	unregister_pernet_subsys(&bareudp_net_ops);
822}
823module_exit(bareudp_cleanup_module);
824
825MODULE_ALIAS_RTNL_LINK("bareudp");
826MODULE_LICENSE("GPL");
827MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
828MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");