Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	XFRM virtual interface
  4 *
  5 *	Copyright (C) 2018 secunet Security Networks AG
  6 *
  7 *	Author:
  8 *	Steffen Klassert <steffen.klassert@secunet.com>
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/capability.h>
 13#include <linux/errno.h>
 14#include <linux/types.h>
 15#include <linux/sockios.h>
 16#include <linux/icmp.h>
 17#include <linux/if.h>
 18#include <linux/in.h>
 19#include <linux/ip.h>
 20#include <linux/net.h>
 21#include <linux/in6.h>
 22#include <linux/netdevice.h>
 23#include <linux/if_link.h>
 24#include <linux/if_arp.h>
 25#include <linux/icmpv6.h>
 26#include <linux/init.h>
 27#include <linux/route.h>
 28#include <linux/rtnetlink.h>
 29#include <linux/netfilter_ipv6.h>
 30#include <linux/slab.h>
 31#include <linux/hash.h>
 32
 33#include <linux/uaccess.h>
 34#include <linux/atomic.h>
 35
 36#include <net/icmp.h>
 37#include <net/ip.h>
 38#include <net/ipv6.h>
 39#include <net/ip6_route.h>
 40#include <net/addrconf.h>
 41#include <net/xfrm.h>
 42#include <net/net_namespace.h>
 43#include <net/netns/generic.h>
 44#include <linux/etherdevice.h>
 45
 46static int xfrmi_dev_init(struct net_device *dev);
 47static void xfrmi_dev_setup(struct net_device *dev);
 48static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
 49static unsigned int xfrmi_net_id __read_mostly;
 50
 51struct xfrmi_net {
 52	/* lists for storing interfaces in use */
 53	struct xfrm_if __rcu *xfrmi[1];
 54};
 55
 56#define for_each_xfrmi_rcu(start, xi) \
 57	for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
 58
 59static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
 60{
 61	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
 62	struct xfrm_if *xi;
 63
 64	for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
 65		if (x->if_id == xi->p.if_id &&
 66		    (xi->dev->flags & IFF_UP))
 67			return xi;
 68	}
 69
 70	return NULL;
 71}
 72
 73static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
 74					    unsigned short family)
 75{
 76	struct xfrmi_net *xfrmn;
 77	struct xfrm_if *xi;
 78	int ifindex = 0;
 79
 80	if (!secpath_exists(skb) || !skb->dev)
 81		return NULL;
 82
 83	switch (family) {
 84	case AF_INET6:
 85		ifindex = inet6_sdif(skb);
 86		break;
 87	case AF_INET:
 88		ifindex = inet_sdif(skb);
 89		break;
 90	}
 91	if (!ifindex)
 92		ifindex = skb->dev->ifindex;
 93
 94	xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
 95
 96	for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
 97		if (ifindex == xi->dev->ifindex &&
 98			(xi->dev->flags & IFF_UP))
 99				return xi;
100	}
101
102	return NULL;
103}
104
105static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
106{
107	struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
108
109	rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
110	rcu_assign_pointer(*xip, xi);
111}
112
113static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
114{
115	struct xfrm_if __rcu **xip;
116	struct xfrm_if *iter;
117
118	for (xip = &xfrmn->xfrmi[0];
119	     (iter = rtnl_dereference(*xip)) != NULL;
120	     xip = &iter->next) {
121		if (xi == iter) {
122			rcu_assign_pointer(*xip, xi->next);
123			break;
124		}
125	}
126}
127
128static void xfrmi_dev_free(struct net_device *dev)
129{
130	struct xfrm_if *xi = netdev_priv(dev);
131
132	gro_cells_destroy(&xi->gro_cells);
133	free_percpu(dev->tstats);
134}
135
136static int xfrmi_create(struct net_device *dev)
137{
138	struct xfrm_if *xi = netdev_priv(dev);
139	struct net *net = dev_net(dev);
140	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
141	int err;
142
143	dev->rtnl_link_ops = &xfrmi_link_ops;
144	err = register_netdevice(dev);
145	if (err < 0)
146		goto out;
147
148	dev_hold(dev);
149	xfrmi_link(xfrmn, xi);
150
151	return 0;
152
153out:
154	return err;
155}
156
157static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
158{
159	struct xfrm_if __rcu **xip;
160	struct xfrm_if *xi;
161	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
162
163	for (xip = &xfrmn->xfrmi[0];
164	     (xi = rtnl_dereference(*xip)) != NULL;
165	     xip = &xi->next)
166		if (xi->p.if_id == p->if_id)
167			return xi;
168
169	return NULL;
170}
171
172static void xfrmi_dev_uninit(struct net_device *dev)
173{
174	struct xfrm_if *xi = netdev_priv(dev);
175	struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
176
177	xfrmi_unlink(xfrmn, xi);
178	dev_put(dev);
179}
180
181static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
182{
183	skb->tstamp = 0;
184	skb->pkt_type = PACKET_HOST;
185	skb->skb_iif = 0;
186	skb->ignore_df = 0;
187	skb_dst_drop(skb);
188	nf_reset_ct(skb);
189	nf_reset_trace(skb);
190
191	if (!xnet)
192		return;
193
194	ipvs_reset(skb);
195	secpath_reset(skb);
196	skb_orphan(skb);
197	skb->mark = 0;
198}
199
200static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
201{
202	const struct xfrm_mode *inner_mode;
203	struct pcpu_sw_netstats *tstats;
204	struct net_device *dev;
205	struct xfrm_state *x;
206	struct xfrm_if *xi;
207	bool xnet;
208
209	if (err && !secpath_exists(skb))
210		return 0;
211
212	x = xfrm_input_state(skb);
213
214	xi = xfrmi_lookup(xs_net(x), x);
215	if (!xi)
216		return 1;
217
218	dev = xi->dev;
219	skb->dev = dev;
220
221	if (err) {
222		dev->stats.rx_errors++;
223		dev->stats.rx_dropped++;
224
225		return 0;
226	}
227
228	xnet = !net_eq(xi->net, dev_net(skb->dev));
229
230	if (xnet) {
231		inner_mode = &x->inner_mode;
232
233		if (x->sel.family == AF_UNSPEC) {
234			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
235			if (inner_mode == NULL) {
236				XFRM_INC_STATS(dev_net(skb->dev),
237					       LINUX_MIB_XFRMINSTATEMODEERROR);
238				return -EINVAL;
239			}
240		}
241
242		if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
243				       inner_mode->family))
244			return -EPERM;
245	}
246
247	xfrmi_scrub_packet(skb, xnet);
248
249	tstats = this_cpu_ptr(dev->tstats);
250
251	u64_stats_update_begin(&tstats->syncp);
252	tstats->rx_packets++;
253	tstats->rx_bytes += skb->len;
254	u64_stats_update_end(&tstats->syncp);
255
256	return 0;
257}
258
259static int
260xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
261{
262	struct xfrm_if *xi = netdev_priv(dev);
263	struct net_device_stats *stats = &xi->dev->stats;
264	struct dst_entry *dst = skb_dst(skb);
265	unsigned int length = skb->len;
266	struct net_device *tdev;
267	struct xfrm_state *x;
268	int err = -1;
269	int mtu;
270
271	if (!dst)
272		goto tx_err_link_failure;
273
274	dst_hold(dst);
275	dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
276	if (IS_ERR(dst)) {
277		err = PTR_ERR(dst);
278		dst = NULL;
279		goto tx_err_link_failure;
280	}
281
282	x = dst->xfrm;
283	if (!x)
284		goto tx_err_link_failure;
285
286	if (x->if_id != xi->p.if_id)
287		goto tx_err_link_failure;
288
289	tdev = dst->dev;
290
291	if (tdev == dev) {
292		stats->collisions++;
293		net_warn_ratelimited("%s: Local routing loop detected!\n",
294				     dev->name);
295		goto tx_err_dst_release;
296	}
297
298	mtu = dst_mtu(dst);
299	if (!skb->ignore_df && skb->len > mtu) {
300		skb_dst_update_pmtu(skb, mtu);
301
302		if (skb->protocol == htons(ETH_P_IPV6)) {
303			if (mtu < IPV6_MIN_MTU)
304				mtu = IPV6_MIN_MTU;
305
306			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
307		} else {
308			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
309				  htonl(mtu));
310		}
311
312		dst_release(dst);
313		return -EMSGSIZE;
314	}
315
316	xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
317	skb_dst_set(skb, dst);
318	skb->dev = tdev;
319
320	err = dst_output(xi->net, skb->sk, skb);
321	if (net_xmit_eval(err) == 0) {
322		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
323
324		u64_stats_update_begin(&tstats->syncp);
325		tstats->tx_bytes += length;
326		tstats->tx_packets++;
327		u64_stats_update_end(&tstats->syncp);
328	} else {
329		stats->tx_errors++;
330		stats->tx_aborted_errors++;
331	}
332
333	return 0;
334tx_err_link_failure:
335	stats->tx_carrier_errors++;
336	dst_link_failure(skb);
337tx_err_dst_release:
338	dst_release(dst);
339	return err;
340}
341
342static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
343{
344	struct xfrm_if *xi = netdev_priv(dev);
345	struct net_device_stats *stats = &xi->dev->stats;
346	struct flowi fl;
347	int ret;
348
349	memset(&fl, 0, sizeof(fl));
350
351	switch (skb->protocol) {
352	case htons(ETH_P_IPV6):
353		xfrm_decode_session(skb, &fl, AF_INET6);
354		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
355		break;
356	case htons(ETH_P_IP):
357		xfrm_decode_session(skb, &fl, AF_INET);
358		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
359		break;
360	default:
361		goto tx_err;
362	}
363
364	fl.flowi_oif = xi->p.link;
365
366	ret = xfrmi_xmit2(skb, dev, &fl);
367	if (ret < 0)
368		goto tx_err;
369
370	return NETDEV_TX_OK;
371
372tx_err:
373	stats->tx_errors++;
374	stats->tx_dropped++;
375	kfree_skb(skb);
376	return NETDEV_TX_OK;
377}
378
379static int xfrmi4_err(struct sk_buff *skb, u32 info)
380{
381	const struct iphdr *iph = (const struct iphdr *)skb->data;
382	struct net *net = dev_net(skb->dev);
383	int protocol = iph->protocol;
384	struct ip_comp_hdr *ipch;
385	struct ip_esp_hdr *esph;
386	struct ip_auth_hdr *ah ;
387	struct xfrm_state *x;
388	struct xfrm_if *xi;
389	__be32 spi;
390
391	switch (protocol) {
392	case IPPROTO_ESP:
393		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
394		spi = esph->spi;
395		break;
396	case IPPROTO_AH:
397		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
398		spi = ah->spi;
399		break;
400	case IPPROTO_COMP:
401		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
402		spi = htonl(ntohs(ipch->cpi));
403		break;
404	default:
405		return 0;
406	}
407
408	switch (icmp_hdr(skb)->type) {
409	case ICMP_DEST_UNREACH:
410		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
411			return 0;
412	case ICMP_REDIRECT:
413		break;
414	default:
415		return 0;
416	}
417
418	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
419			      spi, protocol, AF_INET);
420	if (!x)
421		return 0;
422
423	xi = xfrmi_lookup(net, x);
424	if (!xi) {
425		xfrm_state_put(x);
426		return -1;
427	}
428
429	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
430		ipv4_update_pmtu(skb, net, info, 0, protocol);
431	else
432		ipv4_redirect(skb, net, 0, protocol);
433	xfrm_state_put(x);
434
435	return 0;
436}
437
438static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
439		    u8 type, u8 code, int offset, __be32 info)
440{
441	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
442	struct net *net = dev_net(skb->dev);
443	int protocol = iph->nexthdr;
444	struct ip_comp_hdr *ipch;
445	struct ip_esp_hdr *esph;
446	struct ip_auth_hdr *ah;
447	struct xfrm_state *x;
448	struct xfrm_if *xi;
449	__be32 spi;
450
451	switch (protocol) {
452	case IPPROTO_ESP:
453		esph = (struct ip_esp_hdr *)(skb->data + offset);
454		spi = esph->spi;
455		break;
456	case IPPROTO_AH:
457		ah = (struct ip_auth_hdr *)(skb->data + offset);
458		spi = ah->spi;
459		break;
460	case IPPROTO_COMP:
461		ipch = (struct ip_comp_hdr *)(skb->data + offset);
462		spi = htonl(ntohs(ipch->cpi));
463		break;
464	default:
465		return 0;
466	}
467
468	if (type != ICMPV6_PKT_TOOBIG &&
469	    type != NDISC_REDIRECT)
470		return 0;
471
472	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
473			      spi, protocol, AF_INET6);
474	if (!x)
475		return 0;
476
477	xi = xfrmi_lookup(net, x);
478	if (!xi) {
479		xfrm_state_put(x);
480		return -1;
481	}
482
483	if (type == NDISC_REDIRECT)
484		ip6_redirect(skb, net, skb->dev->ifindex, 0,
485			     sock_net_uid(net, NULL));
486	else
487		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
488	xfrm_state_put(x);
489
490	return 0;
491}
492
493static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
494{
495	if (xi->p.link != p->link)
496		return -EINVAL;
497
498	xi->p.if_id = p->if_id;
499
500	return 0;
501}
502
503static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
504{
505	struct net *net = xi->net;
506	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
507	int err;
508
509	xfrmi_unlink(xfrmn, xi);
510	synchronize_net();
511	err = xfrmi_change(xi, p);
512	xfrmi_link(xfrmn, xi);
513	netdev_state_change(xi->dev);
514	return err;
515}
516
517static void xfrmi_get_stats64(struct net_device *dev,
518			       struct rtnl_link_stats64 *s)
519{
520	int cpu;
521
522	for_each_possible_cpu(cpu) {
523		struct pcpu_sw_netstats *stats;
524		struct pcpu_sw_netstats tmp;
525		int start;
526
527		stats = per_cpu_ptr(dev->tstats, cpu);
528		do {
529			start = u64_stats_fetch_begin_irq(&stats->syncp);
530			tmp.rx_packets = stats->rx_packets;
531			tmp.rx_bytes   = stats->rx_bytes;
532			tmp.tx_packets = stats->tx_packets;
533			tmp.tx_bytes   = stats->tx_bytes;
534		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
535
536		s->rx_packets += tmp.rx_packets;
537		s->rx_bytes   += tmp.rx_bytes;
538		s->tx_packets += tmp.tx_packets;
539		s->tx_bytes   += tmp.tx_bytes;
540	}
541
542	s->rx_dropped = dev->stats.rx_dropped;
543	s->tx_dropped = dev->stats.tx_dropped;
544}
545
546static int xfrmi_get_iflink(const struct net_device *dev)
547{
548	struct xfrm_if *xi = netdev_priv(dev);
549
550	return xi->p.link;
551}
552
553
554static const struct net_device_ops xfrmi_netdev_ops = {
555	.ndo_init	= xfrmi_dev_init,
556	.ndo_uninit	= xfrmi_dev_uninit,
557	.ndo_start_xmit = xfrmi_xmit,
558	.ndo_get_stats64 = xfrmi_get_stats64,
559	.ndo_get_iflink = xfrmi_get_iflink,
560};
561
562static void xfrmi_dev_setup(struct net_device *dev)
563{
564	dev->netdev_ops 	= &xfrmi_netdev_ops;
565	dev->type		= ARPHRD_NONE;
566	dev->hard_header_len 	= ETH_HLEN;
567	dev->min_header_len	= ETH_HLEN;
568	dev->mtu		= ETH_DATA_LEN;
569	dev->min_mtu		= ETH_MIN_MTU;
570	dev->max_mtu		= ETH_DATA_LEN;
571	dev->addr_len		= ETH_ALEN;
572	dev->flags 		= IFF_NOARP;
573	dev->needs_free_netdev	= true;
574	dev->priv_destructor	= xfrmi_dev_free;
575	netif_keep_dst(dev);
576
577	eth_broadcast_addr(dev->broadcast);
578}
579
580static int xfrmi_dev_init(struct net_device *dev)
581{
582	struct xfrm_if *xi = netdev_priv(dev);
583	struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
584	int err;
585
586	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
587	if (!dev->tstats)
588		return -ENOMEM;
589
590	err = gro_cells_init(&xi->gro_cells, dev);
591	if (err) {
592		free_percpu(dev->tstats);
593		return err;
594	}
595
596	dev->features |= NETIF_F_LLTX;
597
598	if (phydev) {
599		dev->needed_headroom = phydev->needed_headroom;
600		dev->needed_tailroom = phydev->needed_tailroom;
601
602		if (is_zero_ether_addr(dev->dev_addr))
603			eth_hw_addr_inherit(dev, phydev);
604		if (is_zero_ether_addr(dev->broadcast))
605			memcpy(dev->broadcast, phydev->broadcast,
606			       dev->addr_len);
607	} else {
608		eth_hw_addr_random(dev);
609		eth_broadcast_addr(dev->broadcast);
610	}
611
612	return 0;
613}
614
615static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
616			 struct netlink_ext_ack *extack)
617{
618	return 0;
619}
620
621static void xfrmi_netlink_parms(struct nlattr *data[],
622			       struct xfrm_if_parms *parms)
623{
624	memset(parms, 0, sizeof(*parms));
625
626	if (!data)
627		return;
628
629	if (data[IFLA_XFRM_LINK])
630		parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
631
632	if (data[IFLA_XFRM_IF_ID])
633		parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
634}
635
636static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
637			struct nlattr *tb[], struct nlattr *data[],
638			struct netlink_ext_ack *extack)
639{
640	struct net *net = dev_net(dev);
641	struct xfrm_if_parms p;
642	struct xfrm_if *xi;
643	int err;
644
645	xfrmi_netlink_parms(data, &p);
646	xi = xfrmi_locate(net, &p);
647	if (xi)
648		return -EEXIST;
649
650	xi = netdev_priv(dev);
651	xi->p = p;
652	xi->net = net;
653	xi->dev = dev;
654
655	err = xfrmi_create(dev);
656	return err;
657}
658
659static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
660{
661	unregister_netdevice_queue(dev, head);
662}
663
664static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
665			   struct nlattr *data[],
666			   struct netlink_ext_ack *extack)
667{
668	struct xfrm_if *xi = netdev_priv(dev);
669	struct net *net = xi->net;
670	struct xfrm_if_parms p;
671
672	xfrmi_netlink_parms(data, &p);
673	xi = xfrmi_locate(net, &p);
674	if (!xi) {
675		xi = netdev_priv(dev);
676	} else {
677		if (xi->dev != dev)
678			return -EEXIST;
679	}
680
681	return xfrmi_update(xi, &p);
682}
683
684static size_t xfrmi_get_size(const struct net_device *dev)
685{
686	return
687		/* IFLA_XFRM_LINK */
688		nla_total_size(4) +
689		/* IFLA_XFRM_IF_ID */
690		nla_total_size(4) +
691		0;
692}
693
694static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
695{
696	struct xfrm_if *xi = netdev_priv(dev);
697	struct xfrm_if_parms *parm = &xi->p;
698
699	if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
700	    nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
701		goto nla_put_failure;
702	return 0;
703
704nla_put_failure:
705	return -EMSGSIZE;
706}
707
708static struct net *xfrmi_get_link_net(const struct net_device *dev)
709{
710	struct xfrm_if *xi = netdev_priv(dev);
711
712	return xi->net;
713}
714
715static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
716	[IFLA_XFRM_LINK]	= { .type = NLA_U32 },
717	[IFLA_XFRM_IF_ID]	= { .type = NLA_U32 },
718};
719
720static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
721	.kind		= "xfrm",
722	.maxtype	= IFLA_XFRM_MAX,
723	.policy		= xfrmi_policy,
724	.priv_size	= sizeof(struct xfrm_if),
725	.setup		= xfrmi_dev_setup,
726	.validate	= xfrmi_validate,
727	.newlink	= xfrmi_newlink,
728	.dellink	= xfrmi_dellink,
729	.changelink	= xfrmi_changelink,
730	.get_size	= xfrmi_get_size,
731	.fill_info	= xfrmi_fill_info,
732	.get_link_net	= xfrmi_get_link_net,
733};
734
735static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
736{
737	struct xfrm_if *xi;
738	LIST_HEAD(list);
739
740	xi = rtnl_dereference(xfrmn->xfrmi[0]);
741	if (!xi)
742		return;
743
744	unregister_netdevice_queue(xi->dev, &list);
745	unregister_netdevice_many(&list);
746}
747
748static void __net_exit xfrmi_exit_net(struct net *net)
749{
750	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
751
752	rtnl_lock();
753	xfrmi_destroy_interfaces(xfrmn);
754	rtnl_unlock();
755}
756
757static struct pernet_operations xfrmi_net_ops = {
758	.exit = xfrmi_exit_net,
759	.id   = &xfrmi_net_id,
760	.size = sizeof(struct xfrmi_net),
761};
762
763static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
764	.handler	=	xfrm6_rcv,
765	.cb_handler	=	xfrmi_rcv_cb,
766	.err_handler	=	xfrmi6_err,
767	.priority	=	10,
768};
769
770static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
771	.handler	=	xfrm6_rcv,
772	.cb_handler	=	xfrmi_rcv_cb,
773	.err_handler	=	xfrmi6_err,
774	.priority	=	10,
775};
776
777static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
778	.handler	=	xfrm6_rcv,
779	.cb_handler	=	xfrmi_rcv_cb,
780	.err_handler	=	xfrmi6_err,
781	.priority	=	10,
782};
783
784static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
785	.handler	=	xfrm4_rcv,
786	.input_handler	=	xfrm_input,
787	.cb_handler	=	xfrmi_rcv_cb,
788	.err_handler	=	xfrmi4_err,
789	.priority	=	10,
790};
791
792static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
793	.handler	=	xfrm4_rcv,
794	.input_handler	=	xfrm_input,
795	.cb_handler	=	xfrmi_rcv_cb,
796	.err_handler	=	xfrmi4_err,
797	.priority	=	10,
798};
799
800static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
801	.handler	=	xfrm4_rcv,
802	.input_handler	=	xfrm_input,
803	.cb_handler	=	xfrmi_rcv_cb,
804	.err_handler	=	xfrmi4_err,
805	.priority	=	10,
806};
807
808static int __init xfrmi4_init(void)
809{
810	int err;
811
812	err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
813	if (err < 0)
814		goto xfrm_proto_esp_failed;
815	err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
816	if (err < 0)
817		goto xfrm_proto_ah_failed;
818	err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
819	if (err < 0)
820		goto xfrm_proto_comp_failed;
821
822	return 0;
823
824xfrm_proto_comp_failed:
825	xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
826xfrm_proto_ah_failed:
827	xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
828xfrm_proto_esp_failed:
829	return err;
830}
831
832static void xfrmi4_fini(void)
833{
834	xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
835	xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
836	xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
837}
838
839static int __init xfrmi6_init(void)
840{
841	int err;
842
843	err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
844	if (err < 0)
845		goto xfrm_proto_esp_failed;
846	err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
847	if (err < 0)
848		goto xfrm_proto_ah_failed;
849	err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
850	if (err < 0)
851		goto xfrm_proto_comp_failed;
852
853	return 0;
854
855xfrm_proto_comp_failed:
856	xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
857xfrm_proto_ah_failed:
858	xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
859xfrm_proto_esp_failed:
860	return err;
861}
862
863static void xfrmi6_fini(void)
864{
865	xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
866	xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
867	xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
868}
869
870static const struct xfrm_if_cb xfrm_if_cb = {
871	.decode_session =	xfrmi_decode_session,
872};
873
874static int __init xfrmi_init(void)
875{
876	const char *msg;
877	int err;
878
879	pr_info("IPsec XFRM device driver\n");
880
881	msg = "tunnel device";
882	err = register_pernet_device(&xfrmi_net_ops);
883	if (err < 0)
884		goto pernet_dev_failed;
885
886	msg = "xfrm4 protocols";
887	err = xfrmi4_init();
888	if (err < 0)
889		goto xfrmi4_failed;
890
891	msg = "xfrm6 protocols";
892	err = xfrmi6_init();
893	if (err < 0)
894		goto xfrmi6_failed;
895
896
897	msg = "netlink interface";
898	err = rtnl_link_register(&xfrmi_link_ops);
899	if (err < 0)
900		goto rtnl_link_failed;
901
902	xfrm_if_register_cb(&xfrm_if_cb);
903
904	return err;
905
906rtnl_link_failed:
907	xfrmi6_fini();
908xfrmi6_failed:
909	xfrmi4_fini();
910xfrmi4_failed:
911	unregister_pernet_device(&xfrmi_net_ops);
912pernet_dev_failed:
913	pr_err("xfrmi init: failed to register %s\n", msg);
914	return err;
915}
916
917static void __exit xfrmi_fini(void)
918{
919	xfrm_if_unregister_cb();
920	rtnl_link_unregister(&xfrmi_link_ops);
921	xfrmi4_fini();
922	xfrmi6_fini();
923	unregister_pernet_device(&xfrmi_net_ops);
924}
925
926module_init(xfrmi_init);
927module_exit(xfrmi_fini);
928MODULE_LICENSE("GPL");
929MODULE_ALIAS_RTNL_LINK("xfrm");
930MODULE_ALIAS_NETDEV("xfrm0");
931MODULE_AUTHOR("Steffen Klassert");
932MODULE_DESCRIPTION("XFRM virtual interface");