Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* L2TPv3 IP encapsulation support
  3 *
  4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
 
 
 
 
 
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <asm/ioctls.h>
 10#include <linux/icmp.h>
 11#include <linux/module.h>
 12#include <linux/skbuff.h>
 13#include <linux/random.h>
 14#include <linux/socket.h>
 15#include <linux/l2tp.h>
 16#include <linux/in.h>
 17#include <net/sock.h>
 18#include <net/ip.h>
 19#include <net/icmp.h>
 20#include <net/udp.h>
 21#include <net/inet_common.h>
 
 22#include <net/tcp_states.h>
 23#include <net/protocol.h>
 24#include <net/xfrm.h>
 25
 26#include "l2tp_core.h"
 27
 28struct l2tp_ip_sock {
 29	/* inet_sock has to be the first member of l2tp_ip_sock */
 30	struct inet_sock	inet;
 31
 32	u32			conn_id;
 33	u32			peer_conn_id;
 34};
 35
 36static DEFINE_RWLOCK(l2tp_ip_lock);
 37static struct hlist_head l2tp_ip_table;
 38static struct hlist_head l2tp_ip_bind_table;
 39
 40static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
 41{
 42	return (struct l2tp_ip_sock *)sk;
 43}
 44
 45static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
 46					  __be32 raddr, int dif, u32 tunnel_id)
 47{
 
 48	struct sock *sk;
 49
 50	sk_for_each_bound(sk, &l2tp_ip_bind_table) {
 51		const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
 52		const struct inet_sock *inet = inet_sk(sk);
 53		int bound_dev_if;
 54
 55		if (!net_eq(sock_net(sk), net))
 56			continue;
 57
 58		bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
 59		if (bound_dev_if && dif && bound_dev_if != dif)
 60			continue;
 61
 62		if (inet->inet_rcv_saddr && laddr &&
 63		    inet->inet_rcv_saddr != laddr)
 64			continue;
 65
 66		if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
 67			continue;
 68
 69		if (l2tp->conn_id != tunnel_id)
 70			continue;
 71
 72		goto found;
 
 
 
 
 73	}
 74
 75	sk = NULL;
 76found:
 77	return sk;
 78}
 79
 
 
 
 
 
 
 
 
 
 80/* When processing receive frames, there are two cases to
 81 * consider. Data frames consist of a non-zero session-id and an
 82 * optional cookie. Control frames consist of a regular L2TP header
 83 * preceded by 32-bits of zeros.
 84 *
 85 * L2TPv3 Session Header Over IP
 86 *
 87 *  0                   1                   2                   3
 88 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 90 * |                           Session ID                          |
 91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 92 * |               Cookie (optional, maximum 64 bits)...
 93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 94 *                                                                 |
 95 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 96 *
 97 * L2TPv3 Control Message Header Over IP
 98 *
 99 *  0                   1                   2                   3
100 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * |                      (32 bits of zeros)                       |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |T|L|x|x|S|x|x|x|x|x|x|x|  Ver  |             Length            |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * |                     Control Connection ID                     |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 * |               Ns              |               Nr              |
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 *
111 * All control frames are passed to userspace.
112 */
113static int l2tp_ip_recv(struct sk_buff *skb)
114{
115	struct net *net = dev_net(skb->dev);
116	struct sock *sk;
117	u32 session_id;
118	u32 tunnel_id;
119	unsigned char *ptr, *optr;
120	struct l2tp_session *session;
121	struct l2tp_tunnel *tunnel = NULL;
122	struct iphdr *iph;
 
 
 
123
124	if (!pskb_may_pull(skb, 4))
125		goto discard;
126
127	/* Point to L2TP header */
128	optr = skb->data;
129	ptr = skb->data;
130	session_id = ntohl(*((__be32 *)ptr));
131	ptr += 4;
132
133	/* RFC3931: L2TP/IP packets have the first 4 bytes containing
134	 * the session_id. If it is 0, the packet is a L2TP control
135	 * frame and the session_id value can be discarded.
136	 */
137	if (session_id == 0) {
138		__skb_pull(skb, 4);
139		goto pass_up;
140	}
141
142	/* Ok, this is a data packet. Lookup the session. */
143	session = l2tp_session_get(net, session_id);
144	if (!session)
145		goto discard;
146
147	tunnel = session->tunnel;
148	if (!tunnel)
149		goto discard_sess;
150
151	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
152		goto discard_sess;
 
 
 
153
154	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
155	l2tp_session_dec_refcount(session);
 
 
 
156
157	return 0;
158
159pass_up:
160	/* Get the tunnel_id from the L2TP header */
161	if (!pskb_may_pull(skb, 12))
162		goto discard;
163
164	if ((skb->data[0] & 0xc0) != 0xc0)
165		goto discard;
166
167	tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
168	iph = (struct iphdr *)skb_network_header(skb);
 
 
 
 
169
170	read_lock_bh(&l2tp_ip_lock);
171	sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
172				   tunnel_id);
173	if (!sk) {
174		read_unlock_bh(&l2tp_ip_lock);
175		goto discard;
176	}
 
 
 
 
177	sock_hold(sk);
178	read_unlock_bh(&l2tp_ip_lock);
179
180	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
181		goto discard_put;
182
183	nf_reset_ct(skb);
184
185	return sk_receive_skb(sk, skb, 1);
186
187discard_sess:
188	l2tp_session_dec_refcount(session);
189	goto discard;
190
191discard_put:
192	sock_put(sk);
193
194discard:
195	kfree_skb(skb);
196	return 0;
197}
198
199static int l2tp_ip_hash(struct sock *sk)
200{
201	if (sk_unhashed(sk)) {
202		write_lock_bh(&l2tp_ip_lock);
203		sk_add_node(sk, &l2tp_ip_table);
204		write_unlock_bh(&l2tp_ip_lock);
205	}
206	return 0;
207}
208
209static void l2tp_ip_unhash(struct sock *sk)
210{
211	if (sk_unhashed(sk))
212		return;
213	write_lock_bh(&l2tp_ip_lock);
214	sk_del_node_init(sk);
215	write_unlock_bh(&l2tp_ip_lock);
216}
217
218static int l2tp_ip_open(struct sock *sk)
219{
220	/* Prevent autobind. We don't have ports. */
221	inet_sk(sk)->inet_num = IPPROTO_L2TP;
222
223	l2tp_ip_hash(sk);
 
 
 
224	return 0;
225}
226
227static void l2tp_ip_close(struct sock *sk, long timeout)
228{
229	write_lock_bh(&l2tp_ip_lock);
230	hlist_del_init(&sk->sk_bind_node);
231	sk_del_node_init(sk);
232	write_unlock_bh(&l2tp_ip_lock);
233	sk_common_release(sk);
234}
235
236static void l2tp_ip_destroy_sock(struct sock *sk)
237{
238	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
239	struct sk_buff *skb;
240
241	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
242		kfree_skb(skb);
243
244	if (tunnel)
245		l2tp_tunnel_delete(tunnel);
246}
247
248static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
249{
250	struct inet_sock *inet = inet_sk(sk);
251	struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
252	struct net *net = sock_net(sk);
253	int ret;
254	int chk_addr_ret;
255
 
 
256	if (addr_len < sizeof(struct sockaddr_l2tpip))
257		return -EINVAL;
258	if (addr->l2tp_family != AF_INET)
259		return -EINVAL;
260
261	lock_sock(sk);
 
 
 
262
263	ret = -EINVAL;
264	if (!sock_flag(sk, SOCK_ZAPPED))
265		goto out;
266
267	if (sk->sk_state != TCP_CLOSE)
 
268		goto out;
269
270	chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
271	ret = -EADDRNOTAVAIL;
272	if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
273	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
274		goto out;
275
276	if (addr->l2tp_addr.s_addr) {
277		inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
278		inet->inet_saddr = addr->l2tp_addr.s_addr;
279	}
280	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
281		inet->inet_saddr = 0;  /* Use device */
282
283	write_lock_bh(&l2tp_ip_lock);
284	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
285				  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
286		write_unlock_bh(&l2tp_ip_lock);
287		ret = -EADDRINUSE;
288		goto out;
289	}
290
291	sk_dst_reset(sk);
 
292	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
293
 
294	sk_add_bind_node(sk, &l2tp_ip_bind_table);
295	sk_del_node_init(sk);
296	write_unlock_bh(&l2tp_ip_lock);
297
298	ret = 0;
299	sock_reset_flag(sk, SOCK_ZAPPED);
300
301out:
302	release_sock(sk);
303
304	return ret;
 
 
 
 
 
305}
306
307static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
308{
309	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
310	int rc;
311
 
 
 
312	if (addr_len < sizeof(*lsa))
313		return -EINVAL;
314
315	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
316		return -EINVAL;
317
318	lock_sock(sk);
319
320	/* Must bind first - autobinding does not work */
321	if (sock_flag(sk, SOCK_ZAPPED)) {
322		rc = -EINVAL;
323		goto out_sk;
324	}
325
326	rc = __ip4_datagram_connect(sk, uaddr, addr_len);
327	if (rc < 0)
328		goto out_sk;
 
 
329
330	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
331
332	write_lock_bh(&l2tp_ip_lock);
333	hlist_del_init(&sk->sk_bind_node);
334	sk_add_bind_node(sk, &l2tp_ip_bind_table);
335	write_unlock_bh(&l2tp_ip_lock);
336
337out_sk:
338	release_sock(sk);
339
340	return rc;
341}
342
343static int l2tp_ip_disconnect(struct sock *sk, int flags)
344{
345	if (sock_flag(sk, SOCK_ZAPPED))
346		return 0;
347
348	return __udp_disconnect(sk, flags);
349}
350
351static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
352			   int peer)
353{
354	struct sock *sk		= sock->sk;
355	struct inet_sock *inet	= inet_sk(sk);
356	struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
357	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
358
359	memset(lsa, 0, sizeof(*lsa));
360	lsa->l2tp_family = AF_INET;
361	if (peer) {
362		if (!inet->inet_dport)
363			return -ENOTCONN;
364		lsa->l2tp_conn_id = lsk->peer_conn_id;
365		lsa->l2tp_addr.s_addr = inet->inet_daddr;
366	} else {
367		__be32 addr = inet->inet_rcv_saddr;
368
369		if (!addr)
370			addr = inet->inet_saddr;
371		lsa->l2tp_conn_id = lsk->conn_id;
372		lsa->l2tp_addr.s_addr = addr;
373	}
374	return sizeof(*lsa);
 
375}
376
377static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
378{
379	int rc;
380
381	/* Charge it to the socket, dropping if the queue is full. */
382	rc = sock_queue_rcv_skb(sk, skb);
383	if (rc < 0)
384		goto drop;
385
386	return 0;
387
388drop:
389	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
390	kfree_skb(skb);
391	return 0;
392}
393
394/* Userspace will call sendmsg() on the tunnel socket to send L2TP
395 * control frames.
396 */
397static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
398{
399	struct sk_buff *skb;
400	int rc;
401	struct inet_sock *inet = inet_sk(sk);
402	struct rtable *rt = NULL;
403	struct flowi4 *fl4;
404	int connected = 0;
405	__be32 daddr;
406
407	lock_sock(sk);
408
409	rc = -ENOTCONN;
410	if (sock_flag(sk, SOCK_DEAD))
411		goto out;
412
413	/* Get and verify the address. */
414	if (msg->msg_name) {
415		DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
416
417		rc = -EINVAL;
418		if (msg->msg_namelen < sizeof(*lip))
419			goto out;
420
421		if (lip->l2tp_family != AF_INET) {
422			rc = -EAFNOSUPPORT;
423			if (lip->l2tp_family != AF_UNSPEC)
424				goto out;
425		}
426
427		daddr = lip->l2tp_addr.s_addr;
428	} else {
429		rc = -EDESTADDRREQ;
430		if (sk->sk_state != TCP_ESTABLISHED)
431			goto out;
432
433		daddr = inet->inet_daddr;
434		connected = 1;
435	}
436
437	/* Allocate a socket buffer */
438	rc = -ENOMEM;
439	skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
440			   4 + len, 0, GFP_KERNEL);
441	if (!skb)
442		goto error;
443
444	/* Reserve space for headers, putting IP header on 4-byte boundary. */
445	skb_reserve(skb, 2 + NET_SKB_PAD);
446	skb_reset_network_header(skb);
447	skb_reserve(skb, sizeof(struct iphdr));
448	skb_reset_transport_header(skb);
449
450	/* Insert 0 session_id */
451	*((__be32 *)skb_put(skb, 4)) = 0;
452
453	/* Copy user data into skb */
454	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
455	if (rc < 0) {
456		kfree_skb(skb);
457		goto error;
458	}
459
460	fl4 = &inet->cork.fl.u.ip4;
461	if (connected)
462		rt = (struct rtable *)__sk_dst_check(sk, 0);
463
464	rcu_read_lock();
465	if (!rt) {
466		const struct ip_options_rcu *inet_opt;
467
468		inet_opt = rcu_dereference(inet->inet_opt);
469
470		/* Use correct destination address if we have options. */
471		if (inet_opt && inet_opt->opt.srr)
472			daddr = inet_opt->opt.faddr;
473
474		/* If this fails, retransmit mechanism of transport layer will
475		 * keep trying until route appears or the connection times
476		 * itself out.
477		 */
478		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
479					   daddr, inet->inet_saddr,
480					   inet->inet_dport, inet->inet_sport,
481					   sk->sk_protocol, RT_CONN_FLAGS(sk),
482					   sk->sk_bound_dev_if);
483		if (IS_ERR(rt))
484			goto no_route;
485		if (connected) {
486			sk_setup_caps(sk, &rt->dst);
487		} else {
488			skb_dst_set(skb, &rt->dst);
489			goto xmit;
490		}
491	}
492
493	/* We don't need to clone dst here, it is guaranteed to not disappear.
494	 *  __dev_xmit_skb() might force a refcount if needed.
495	 */
496	skb_dst_set_noref(skb, &rt->dst);
497
498xmit:
499	/* Queue the packet to IP for output */
500	rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
501	rcu_read_unlock();
502
503error:
504	if (rc >= 0)
505		rc = len;
506
507out:
508	release_sock(sk);
509	return rc;
510
511no_route:
512	rcu_read_unlock();
513	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
514	kfree_skb(skb);
515	rc = -EHOSTUNREACH;
516	goto out;
517}
518
519static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
520			   size_t len, int flags, int *addr_len)
521{
522	struct inet_sock *inet = inet_sk(sk);
523	size_t copied = 0;
524	int err = -EOPNOTSUPP;
525	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
526	struct sk_buff *skb;
527
528	if (flags & MSG_OOB)
529		goto out;
530
531	skb = skb_recv_datagram(sk, flags, &err);
 
 
 
532	if (!skb)
533		goto out;
534
535	copied = skb->len;
536	if (len < copied) {
537		msg->msg_flags |= MSG_TRUNC;
538		copied = len;
539	}
540
541	err = skb_copy_datagram_msg(skb, 0, msg, copied);
542	if (err)
543		goto done;
544
545	sock_recv_timestamp(msg, sk, skb);
546
547	/* Copy the address. */
548	if (sin) {
549		sin->sin_family = AF_INET;
550		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
551		sin->sin_port = 0;
552		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
553		*addr_len = sizeof(*sin);
554	}
555	if (inet_cmsg_flags(inet))
556		ip_cmsg_recv(msg, skb);
557	if (flags & MSG_TRUNC)
558		copied = skb->len;
559done:
560	skb_free_datagram(sk, skb);
561out:
562	return err ? err : copied;
563}
564
565int l2tp_ioctl(struct sock *sk, int cmd, int *karg)
566{
567	struct sk_buff *skb;
568
569	switch (cmd) {
570	case SIOCOUTQ:
571		*karg = sk_wmem_alloc_get(sk);
572		break;
573	case SIOCINQ:
574		spin_lock_bh(&sk->sk_receive_queue.lock);
575		skb = skb_peek(&sk->sk_receive_queue);
576		*karg = skb ? skb->len : 0;
577		spin_unlock_bh(&sk->sk_receive_queue.lock);
578		break;
579
580	default:
581		return -ENOIOCTLCMD;
582	}
583
584	return 0;
585}
586EXPORT_SYMBOL_GPL(l2tp_ioctl);
587
588static struct proto l2tp_ip_prot = {
589	.name		   = "L2TP/IP",
590	.owner		   = THIS_MODULE,
591	.init		   = l2tp_ip_open,
592	.close		   = l2tp_ip_close,
593	.bind		   = l2tp_ip_bind,
594	.connect	   = l2tp_ip_connect,
595	.disconnect	   = l2tp_ip_disconnect,
596	.ioctl		   = l2tp_ioctl,
597	.destroy	   = l2tp_ip_destroy_sock,
598	.setsockopt	   = ip_setsockopt,
599	.getsockopt	   = ip_getsockopt,
600	.sendmsg	   = l2tp_ip_sendmsg,
601	.recvmsg	   = l2tp_ip_recvmsg,
602	.backlog_rcv	   = l2tp_ip_backlog_recv,
603	.hash		   = l2tp_ip_hash,
604	.unhash		   = l2tp_ip_unhash,
605	.obj_size	   = sizeof(struct l2tp_ip_sock),
 
 
 
 
606};
607
608static const struct proto_ops l2tp_ip_ops = {
609	.family		   = PF_INET,
610	.owner		   = THIS_MODULE,
611	.release	   = inet_release,
612	.bind		   = inet_bind,
613	.connect	   = inet_dgram_connect,
614	.socketpair	   = sock_no_socketpair,
615	.accept		   = sock_no_accept,
616	.getname	   = l2tp_ip_getname,
617	.poll		   = datagram_poll,
618	.ioctl		   = inet_ioctl,
619	.gettstamp	   = sock_gettstamp,
620	.listen		   = sock_no_listen,
621	.shutdown	   = inet_shutdown,
622	.setsockopt	   = sock_common_setsockopt,
623	.getsockopt	   = sock_common_getsockopt,
624	.sendmsg	   = inet_sendmsg,
625	.recvmsg	   = sock_common_recvmsg,
626	.mmap		   = sock_no_mmap,
 
 
 
 
 
627};
628
629static struct inet_protosw l2tp_ip_protosw = {
630	.type		= SOCK_DGRAM,
631	.protocol	= IPPROTO_L2TP,
632	.prot		= &l2tp_ip_prot,
633	.ops		= &l2tp_ip_ops,
 
634};
635
636static struct net_protocol l2tp_ip_protocol __read_mostly = {
637	.handler	= l2tp_ip_recv,
638};
639
640static int __init l2tp_ip_init(void)
641{
642	int err;
643
644	pr_info("L2TP IP encapsulation support (L2TPv3)\n");
645
646	err = proto_register(&l2tp_ip_prot, 1);
647	if (err != 0)
648		goto out;
649
650	err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
651	if (err)
652		goto out1;
653
654	inet_register_protosw(&l2tp_ip_protosw);
655	return 0;
656
657out1:
658	proto_unregister(&l2tp_ip_prot);
659out:
660	return err;
661}
662
663static void __exit l2tp_ip_exit(void)
664{
665	inet_unregister_protosw(&l2tp_ip_protosw);
666	inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
667	proto_unregister(&l2tp_ip_prot);
668}
669
670module_init(l2tp_ip_init);
671module_exit(l2tp_ip_exit);
672
673MODULE_LICENSE("GPL");
674MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
675MODULE_DESCRIPTION("L2TP over IP");
676MODULE_VERSION("1.0");
677
678/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
679 * because __stringify doesn't like enums
680 */
681MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
682MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
v3.5.6
  1/*
  2 * L2TPv3 IP encapsulation support
  3 *
  4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  5 *
  6 *	This program is free software; you can redistribute it and/or
  7 *	modify it under the terms of the GNU General Public License
  8 *	as published by the Free Software Foundation; either version
  9 *	2 of the License, or (at your option) any later version.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 
 14#include <linux/icmp.h>
 15#include <linux/module.h>
 16#include <linux/skbuff.h>
 17#include <linux/random.h>
 18#include <linux/socket.h>
 19#include <linux/l2tp.h>
 20#include <linux/in.h>
 21#include <net/sock.h>
 22#include <net/ip.h>
 23#include <net/icmp.h>
 24#include <net/udp.h>
 25#include <net/inet_common.h>
 26#include <net/inet_hashtables.h>
 27#include <net/tcp_states.h>
 28#include <net/protocol.h>
 29#include <net/xfrm.h>
 30
 31#include "l2tp_core.h"
 32
 33struct l2tp_ip_sock {
 34	/* inet_sock has to be the first member of l2tp_ip_sock */
 35	struct inet_sock	inet;
 36
 37	u32			conn_id;
 38	u32			peer_conn_id;
 39};
 40
 41static DEFINE_RWLOCK(l2tp_ip_lock);
 42static struct hlist_head l2tp_ip_table;
 43static struct hlist_head l2tp_ip_bind_table;
 44
 45static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
 46{
 47	return (struct l2tp_ip_sock *)sk;
 48}
 49
 50static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
 
 51{
 52	struct hlist_node *node;
 53	struct sock *sk;
 54
 55	sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
 56		struct inet_sock *inet = inet_sk(sk);
 57		struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59		if (l2tp == NULL)
 60			continue;
 61
 62		if ((l2tp->conn_id == tunnel_id) &&
 63		    net_eq(sock_net(sk), net) &&
 64		    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
 65		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
 66			goto found;
 67	}
 68
 69	sk = NULL;
 70found:
 71	return sk;
 72}
 73
 74static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
 75{
 76	struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
 77	if (sk)
 78		sock_hold(sk);
 79
 80	return sk;
 81}
 82
 83/* When processing receive frames, there are two cases to
 84 * consider. Data frames consist of a non-zero session-id and an
 85 * optional cookie. Control frames consist of a regular L2TP header
 86 * preceded by 32-bits of zeros.
 87 *
 88 * L2TPv3 Session Header Over IP
 89 *
 90 *  0                   1                   2                   3
 91 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 92 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 93 * |                           Session ID                          |
 94 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 95 * |               Cookie (optional, maximum 64 bits)...
 96 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 97 *                                                                 |
 98 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 99 *
100 * L2TPv3 Control Message Header Over IP
101 *
102 *  0                   1                   2                   3
103 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
104 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
105 * |                      (32 bits of zeros)                       |
106 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
107 * |T|L|x|x|S|x|x|x|x|x|x|x|  Ver  |             Length            |
108 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
109 * |                     Control Connection ID                     |
110 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
111 * |               Ns              |               Nr              |
112 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
113 *
114 * All control frames are passed to userspace.
115 */
116static int l2tp_ip_recv(struct sk_buff *skb)
117{
 
118	struct sock *sk;
119	u32 session_id;
120	u32 tunnel_id;
121	unsigned char *ptr, *optr;
122	struct l2tp_session *session;
123	struct l2tp_tunnel *tunnel = NULL;
124	int length;
125
126	/* Point to L2TP header */
127	optr = ptr = skb->data;
128
129	if (!pskb_may_pull(skb, 4))
130		goto discard;
131
132	session_id = ntohl(*((__be32 *) ptr));
 
 
 
133	ptr += 4;
134
135	/* RFC3931: L2TP/IP packets have the first 4 bytes containing
136	 * the session_id. If it is 0, the packet is a L2TP control
137	 * frame and the session_id value can be discarded.
138	 */
139	if (session_id == 0) {
140		__skb_pull(skb, 4);
141		goto pass_up;
142	}
143
144	/* Ok, this is a data packet. Lookup the session. */
145	session = l2tp_session_find(&init_net, NULL, session_id);
146	if (session == NULL)
147		goto discard;
148
149	tunnel = session->tunnel;
150	if (tunnel == NULL)
151		goto discard;
152
153	/* Trace packet contents, if enabled */
154	if (tunnel->debug & L2TP_MSG_DATA) {
155		length = min(32u, skb->len);
156		if (!pskb_may_pull(skb, length))
157			goto discard;
158
159		pr_debug("%s: ip recv\n", tunnel->name);
160		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
161	}
162
163	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
164
165	return 0;
166
167pass_up:
168	/* Get the tunnel_id from the L2TP header */
169	if (!pskb_may_pull(skb, 12))
170		goto discard;
171
172	if ((skb->data[0] & 0xc0) != 0xc0)
173		goto discard;
174
175	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
176	tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
177	if (tunnel != NULL)
178		sk = tunnel->sock;
179	else {
180		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
181
182		read_lock_bh(&l2tp_ip_lock);
183		sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
 
 
184		read_unlock_bh(&l2tp_ip_lock);
 
185	}
186
187	if (sk == NULL)
188		goto discard;
189
190	sock_hold(sk);
 
191
192	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
193		goto discard_put;
194
195	nf_reset(skb);
196
197	return sk_receive_skb(sk, skb, 1);
198
 
 
 
 
199discard_put:
200	sock_put(sk);
201
202discard:
203	kfree_skb(skb);
204	return 0;
205}
206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207static int l2tp_ip_open(struct sock *sk)
208{
209	/* Prevent autobind. We don't have ports. */
210	inet_sk(sk)->inet_num = IPPROTO_L2TP;
211
212	write_lock_bh(&l2tp_ip_lock);
213	sk_add_node(sk, &l2tp_ip_table);
214	write_unlock_bh(&l2tp_ip_lock);
215
216	return 0;
217}
218
219static void l2tp_ip_close(struct sock *sk, long timeout)
220{
221	write_lock_bh(&l2tp_ip_lock);
222	hlist_del_init(&sk->sk_bind_node);
223	sk_del_node_init(sk);
224	write_unlock_bh(&l2tp_ip_lock);
225	sk_common_release(sk);
226}
227
228static void l2tp_ip_destroy_sock(struct sock *sk)
229{
 
230	struct sk_buff *skb;
231
232	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
233		kfree_skb(skb);
234
235	sk_refcnt_debug_dec(sk);
 
236}
237
238static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
239{
240	struct inet_sock *inet = inet_sk(sk);
241	struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
 
242	int ret;
243	int chk_addr_ret;
244
245	if (!sock_flag(sk, SOCK_ZAPPED))
246		return -EINVAL;
247	if (addr_len < sizeof(struct sockaddr_l2tpip))
248		return -EINVAL;
249	if (addr->l2tp_family != AF_INET)
250		return -EINVAL;
251
252	ret = -EADDRINUSE;
253	read_lock_bh(&l2tp_ip_lock);
254	if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
255		goto out_in_use;
256
257	read_unlock_bh(&l2tp_ip_lock);
 
 
258
259	lock_sock(sk);
260	if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
261		goto out;
262
263	chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
264	ret = -EADDRNOTAVAIL;
265	if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
266	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
267		goto out;
268
269	if (addr->l2tp_addr.s_addr)
270		inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
 
 
271	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
272		inet->inet_saddr = 0;  /* Use device */
 
 
 
 
 
 
 
 
 
273	sk_dst_reset(sk);
274
275	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
276
277	write_lock_bh(&l2tp_ip_lock);
278	sk_add_bind_node(sk, &l2tp_ip_bind_table);
279	sk_del_node_init(sk);
280	write_unlock_bh(&l2tp_ip_lock);
 
281	ret = 0;
282	sock_reset_flag(sk, SOCK_ZAPPED);
283
284out:
285	release_sock(sk);
286
287	return ret;
288
289out_in_use:
290	read_unlock_bh(&l2tp_ip_lock);
291
292	return ret;
293}
294
295static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
296{
297	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
298	int rc;
299
300	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
301		return -EINVAL;
302
303	if (addr_len < sizeof(*lsa))
304		return -EINVAL;
305
306	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
307		return -EINVAL;
308
309	rc = ip4_datagram_connect(sk, uaddr, addr_len);
 
 
 
 
 
 
 
 
310	if (rc < 0)
311		return rc;
312
313	lock_sock(sk);
314
315	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
316
317	write_lock_bh(&l2tp_ip_lock);
318	hlist_del_init(&sk->sk_bind_node);
319	sk_add_bind_node(sk, &l2tp_ip_bind_table);
320	write_unlock_bh(&l2tp_ip_lock);
321
 
322	release_sock(sk);
 
323	return rc;
324}
325
326static int l2tp_ip_disconnect(struct sock *sk, int flags)
327{
328	if (sock_flag(sk, SOCK_ZAPPED))
329		return 0;
330
331	return udp_disconnect(sk, flags);
332}
333
334static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
335			   int *uaddr_len, int peer)
336{
337	struct sock *sk		= sock->sk;
338	struct inet_sock *inet	= inet_sk(sk);
339	struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
340	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
341
342	memset(lsa, 0, sizeof(*lsa));
343	lsa->l2tp_family = AF_INET;
344	if (peer) {
345		if (!inet->inet_dport)
346			return -ENOTCONN;
347		lsa->l2tp_conn_id = lsk->peer_conn_id;
348		lsa->l2tp_addr.s_addr = inet->inet_daddr;
349	} else {
350		__be32 addr = inet->inet_rcv_saddr;
 
351		if (!addr)
352			addr = inet->inet_saddr;
353		lsa->l2tp_conn_id = lsk->conn_id;
354		lsa->l2tp_addr.s_addr = addr;
355	}
356	*uaddr_len = sizeof(*lsa);
357	return 0;
358}
359
360static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
361{
362	int rc;
363
364	/* Charge it to the socket, dropping if the queue is full. */
365	rc = sock_queue_rcv_skb(sk, skb);
366	if (rc < 0)
367		goto drop;
368
369	return 0;
370
371drop:
372	IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
373	kfree_skb(skb);
374	return -1;
375}
376
377/* Userspace will call sendmsg() on the tunnel socket to send L2TP
378 * control frames.
379 */
380static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
381{
382	struct sk_buff *skb;
383	int rc;
384	struct inet_sock *inet = inet_sk(sk);
385	struct rtable *rt = NULL;
386	struct flowi4 *fl4;
387	int connected = 0;
388	__be32 daddr;
389
390	lock_sock(sk);
391
392	rc = -ENOTCONN;
393	if (sock_flag(sk, SOCK_DEAD))
394		goto out;
395
396	/* Get and verify the address. */
397	if (msg->msg_name) {
398		struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
 
399		rc = -EINVAL;
400		if (msg->msg_namelen < sizeof(*lip))
401			goto out;
402
403		if (lip->l2tp_family != AF_INET) {
404			rc = -EAFNOSUPPORT;
405			if (lip->l2tp_family != AF_UNSPEC)
406				goto out;
407		}
408
409		daddr = lip->l2tp_addr.s_addr;
410	} else {
411		rc = -EDESTADDRREQ;
412		if (sk->sk_state != TCP_ESTABLISHED)
413			goto out;
414
415		daddr = inet->inet_daddr;
416		connected = 1;
417	}
418
419	/* Allocate a socket buffer */
420	rc = -ENOMEM;
421	skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
422			   4 + len, 0, GFP_KERNEL);
423	if (!skb)
424		goto error;
425
426	/* Reserve space for headers, putting IP header on 4-byte boundary. */
427	skb_reserve(skb, 2 + NET_SKB_PAD);
428	skb_reset_network_header(skb);
429	skb_reserve(skb, sizeof(struct iphdr));
430	skb_reset_transport_header(skb);
431
432	/* Insert 0 session_id */
433	*((__be32 *) skb_put(skb, 4)) = 0;
434
435	/* Copy user data into skb */
436	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
437	if (rc < 0) {
438		kfree_skb(skb);
439		goto error;
440	}
441
442	fl4 = &inet->cork.fl.u.ip4;
443	if (connected)
444		rt = (struct rtable *) __sk_dst_check(sk, 0);
445
446	rcu_read_lock();
447	if (rt == NULL) {
448		const struct ip_options_rcu *inet_opt;
449
450		inet_opt = rcu_dereference(inet->inet_opt);
451
452		/* Use correct destination address if we have options. */
453		if (inet_opt && inet_opt->opt.srr)
454			daddr = inet_opt->opt.faddr;
455
456		/* If this fails, retransmit mechanism of transport layer will
457		 * keep trying until route appears or the connection times
458		 * itself out.
459		 */
460		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
461					   daddr, inet->inet_saddr,
462					   inet->inet_dport, inet->inet_sport,
463					   sk->sk_protocol, RT_CONN_FLAGS(sk),
464					   sk->sk_bound_dev_if);
465		if (IS_ERR(rt))
466			goto no_route;
467		if (connected) {
468			sk_setup_caps(sk, &rt->dst);
469		} else {
470			skb_dst_set(skb, &rt->dst);
471			goto xmit;
472		}
473	}
474
475	/* We dont need to clone dst here, it is guaranteed to not disappear.
476	 *  __dev_xmit_skb() might force a refcount if needed.
477	 */
478	skb_dst_set_noref(skb, &rt->dst);
479
480xmit:
481	/* Queue the packet to IP for output */
482	rc = ip_queue_xmit(skb, &inet->cork.fl);
483	rcu_read_unlock();
484
485error:
486	if (rc >= 0)
487		rc = len;
488
489out:
490	release_sock(sk);
491	return rc;
492
493no_route:
494	rcu_read_unlock();
495	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
496	kfree_skb(skb);
497	rc = -EHOSTUNREACH;
498	goto out;
499}
500
501static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
502			   size_t len, int noblock, int flags, int *addr_len)
503{
504	struct inet_sock *inet = inet_sk(sk);
505	size_t copied = 0;
506	int err = -EOPNOTSUPP;
507	struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
508	struct sk_buff *skb;
509
510	if (flags & MSG_OOB)
511		goto out;
512
513	if (addr_len)
514		*addr_len = sizeof(*sin);
515
516	skb = skb_recv_datagram(sk, flags, noblock, &err);
517	if (!skb)
518		goto out;
519
520	copied = skb->len;
521	if (len < copied) {
522		msg->msg_flags |= MSG_TRUNC;
523		copied = len;
524	}
525
526	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
527	if (err)
528		goto done;
529
530	sock_recv_timestamp(msg, sk, skb);
531
532	/* Copy the address. */
533	if (sin) {
534		sin->sin_family = AF_INET;
535		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
536		sin->sin_port = 0;
537		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
 
538	}
539	if (inet->cmsg_flags)
540		ip_cmsg_recv(msg, skb);
541	if (flags & MSG_TRUNC)
542		copied = skb->len;
543done:
544	skb_free_datagram(sk, skb);
545out:
546	return err ? err : copied;
547}
548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549static struct proto l2tp_ip_prot = {
550	.name		   = "L2TP/IP",
551	.owner		   = THIS_MODULE,
552	.init		   = l2tp_ip_open,
553	.close		   = l2tp_ip_close,
554	.bind		   = l2tp_ip_bind,
555	.connect	   = l2tp_ip_connect,
556	.disconnect	   = l2tp_ip_disconnect,
557	.ioctl		   = udp_ioctl,
558	.destroy	   = l2tp_ip_destroy_sock,
559	.setsockopt	   = ip_setsockopt,
560	.getsockopt	   = ip_getsockopt,
561	.sendmsg	   = l2tp_ip_sendmsg,
562	.recvmsg	   = l2tp_ip_recvmsg,
563	.backlog_rcv	   = l2tp_ip_backlog_recv,
564	.hash		   = inet_hash,
565	.unhash		   = inet_unhash,
566	.obj_size	   = sizeof(struct l2tp_ip_sock),
567#ifdef CONFIG_COMPAT
568	.compat_setsockopt = compat_ip_setsockopt,
569	.compat_getsockopt = compat_ip_getsockopt,
570#endif
571};
572
573static const struct proto_ops l2tp_ip_ops = {
574	.family		   = PF_INET,
575	.owner		   = THIS_MODULE,
576	.release	   = inet_release,
577	.bind		   = inet_bind,
578	.connect	   = inet_dgram_connect,
579	.socketpair	   = sock_no_socketpair,
580	.accept		   = sock_no_accept,
581	.getname	   = l2tp_ip_getname,
582	.poll		   = datagram_poll,
583	.ioctl		   = inet_ioctl,
 
584	.listen		   = sock_no_listen,
585	.shutdown	   = inet_shutdown,
586	.setsockopt	   = sock_common_setsockopt,
587	.getsockopt	   = sock_common_getsockopt,
588	.sendmsg	   = inet_sendmsg,
589	.recvmsg	   = sock_common_recvmsg,
590	.mmap		   = sock_no_mmap,
591	.sendpage	   = sock_no_sendpage,
592#ifdef CONFIG_COMPAT
593	.compat_setsockopt = compat_sock_common_setsockopt,
594	.compat_getsockopt = compat_sock_common_getsockopt,
595#endif
596};
597
598static struct inet_protosw l2tp_ip_protosw = {
599	.type		= SOCK_DGRAM,
600	.protocol	= IPPROTO_L2TP,
601	.prot		= &l2tp_ip_prot,
602	.ops		= &l2tp_ip_ops,
603	.no_check	= 0,
604};
605
606static struct net_protocol l2tp_ip_protocol __read_mostly = {
607	.handler	= l2tp_ip_recv,
608};
609
610static int __init l2tp_ip_init(void)
611{
612	int err;
613
614	pr_info("L2TP IP encapsulation support (L2TPv3)\n");
615
616	err = proto_register(&l2tp_ip_prot, 1);
617	if (err != 0)
618		goto out;
619
620	err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
621	if (err)
622		goto out1;
623
624	inet_register_protosw(&l2tp_ip_protosw);
625	return 0;
626
627out1:
628	proto_unregister(&l2tp_ip_prot);
629out:
630	return err;
631}
632
633static void __exit l2tp_ip_exit(void)
634{
635	inet_unregister_protosw(&l2tp_ip_protosw);
636	inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
637	proto_unregister(&l2tp_ip_prot);
638}
639
640module_init(l2tp_ip_init);
641module_exit(l2tp_ip_exit);
642
643MODULE_LICENSE("GPL");
644MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
645MODULE_DESCRIPTION("L2TP over IP");
646MODULE_VERSION("1.0");
647
648/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
649 * enums
650 */
651MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);