Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* L2TPv3 IP encapsulation support
  3 *
  4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
 
 
 
 
 
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <asm/ioctls.h>
 10#include <linux/icmp.h>
 11#include <linux/module.h>
 12#include <linux/skbuff.h>
 13#include <linux/random.h>
 14#include <linux/socket.h>
 15#include <linux/l2tp.h>
 16#include <linux/in.h>
 17#include <net/sock.h>
 18#include <net/ip.h>
 19#include <net/icmp.h>
 20#include <net/udp.h>
 21#include <net/inet_common.h>
 
 22#include <net/tcp_states.h>
 23#include <net/protocol.h>
 24#include <net/xfrm.h>
 25
 26#include "l2tp_core.h"
 27
 28struct l2tp_ip_sock {
 29	/* inet_sock has to be the first member of l2tp_ip_sock */
 30	struct inet_sock	inet;
 31
 32	u32			conn_id;
 33	u32			peer_conn_id;
 34};
 35
 36static DEFINE_RWLOCK(l2tp_ip_lock);
 37static struct hlist_head l2tp_ip_table;
 38static struct hlist_head l2tp_ip_bind_table;
 39
 40static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
 41{
 42	return (struct l2tp_ip_sock *)sk;
 43}
 44
 45static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
 46					  __be32 raddr, int dif, u32 tunnel_id)
 47{
 48	struct sock *sk;
 49
 50	sk_for_each_bound(sk, &l2tp_ip_bind_table) {
 51		const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
 52		const struct inet_sock *inet = inet_sk(sk);
 53		int bound_dev_if;
 54
 55		if (!net_eq(sock_net(sk), net))
 56			continue;
 57
 58		bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
 59		if (bound_dev_if && dif && bound_dev_if != dif)
 60			continue;
 61
 62		if (inet->inet_rcv_saddr && laddr &&
 63		    inet->inet_rcv_saddr != laddr)
 64			continue;
 65
 66		if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
 67			continue;
 68
 69		if (l2tp->conn_id != tunnel_id)
 70			continue;
 71
 72		goto found;
 73	}
 74
 75	sk = NULL;
 76found:
 77	return sk;
 78}
 79
 
 
 
 
 
 
 
 
 
 80/* When processing receive frames, there are two cases to
 81 * consider. Data frames consist of a non-zero session-id and an
 82 * optional cookie. Control frames consist of a regular L2TP header
 83 * preceded by 32-bits of zeros.
 84 *
 85 * L2TPv3 Session Header Over IP
 86 *
 87 *  0                   1                   2                   3
 88 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 90 * |                           Session ID                          |
 91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 92 * |               Cookie (optional, maximum 64 bits)...
 93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 94 *                                                                 |
 95 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 96 *
 97 * L2TPv3 Control Message Header Over IP
 98 *
 99 *  0                   1                   2                   3
100 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * |                      (32 bits of zeros)                       |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |T|L|x|x|S|x|x|x|x|x|x|x|  Ver  |             Length            |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * |                     Control Connection ID                     |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 * |               Ns              |               Nr              |
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 *
111 * All control frames are passed to userspace.
112 */
113static int l2tp_ip_recv(struct sk_buff *skb)
114{
115	struct net *net = dev_net(skb->dev);
116	struct sock *sk;
117	u32 session_id;
118	u32 tunnel_id;
119	unsigned char *ptr, *optr;
120	struct l2tp_session *session;
121	struct l2tp_tunnel *tunnel = NULL;
122	struct iphdr *iph;
123
124	if (!pskb_may_pull(skb, 4))
125		goto discard;
126
127	/* Point to L2TP header */
128	optr = skb->data;
129	ptr = skb->data;
130	session_id = ntohl(*((__be32 *)ptr));
131	ptr += 4;
132
133	/* RFC3931: L2TP/IP packets have the first 4 bytes containing
134	 * the session_id. If it is 0, the packet is a L2TP control
135	 * frame and the session_id value can be discarded.
136	 */
137	if (session_id == 0) {
138		__skb_pull(skb, 4);
139		goto pass_up;
140	}
141
142	/* Ok, this is a data packet. Lookup the session. */
143	session = l2tp_session_get(net, session_id);
144	if (!session)
145		goto discard;
146
147	tunnel = session->tunnel;
148	if (!tunnel)
149		goto discard_sess;
150
151	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
152		goto discard_sess;
 
 
 
 
 
 
 
 
 
 
153
154	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
155	l2tp_session_dec_refcount(session);
156
157	return 0;
158
159pass_up:
160	/* Get the tunnel_id from the L2TP header */
161	if (!pskb_may_pull(skb, 12))
162		goto discard;
163
164	if ((skb->data[0] & 0xc0) != 0xc0)
165		goto discard;
166
167	tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
168	iph = (struct iphdr *)skb_network_header(skb);
 
 
 
 
169
170	read_lock_bh(&l2tp_ip_lock);
171	sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
172				   tunnel_id);
173	if (!sk) {
174		read_unlock_bh(&l2tp_ip_lock);
175		goto discard;
176	}
 
 
 
 
177	sock_hold(sk);
178	read_unlock_bh(&l2tp_ip_lock);
179
180	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
181		goto discard_put;
182
183	nf_reset_ct(skb);
184
185	return sk_receive_skb(sk, skb, 1);
186
187discard_sess:
188	l2tp_session_dec_refcount(session);
189	goto discard;
190
191discard_put:
192	sock_put(sk);
193
194discard:
195	kfree_skb(skb);
196	return 0;
197}
198
199static int l2tp_ip_hash(struct sock *sk)
200{
201	if (sk_unhashed(sk)) {
202		write_lock_bh(&l2tp_ip_lock);
203		sk_add_node(sk, &l2tp_ip_table);
204		write_unlock_bh(&l2tp_ip_lock);
205	}
206	return 0;
207}
208
209static void l2tp_ip_unhash(struct sock *sk)
210{
211	if (sk_unhashed(sk))
212		return;
213	write_lock_bh(&l2tp_ip_lock);
214	sk_del_node_init(sk);
215	write_unlock_bh(&l2tp_ip_lock);
216}
217
218static int l2tp_ip_open(struct sock *sk)
219{
220	/* Prevent autobind. We don't have ports. */
221	inet_sk(sk)->inet_num = IPPROTO_L2TP;
222
223	l2tp_ip_hash(sk);
 
 
 
224	return 0;
225}
226
227static void l2tp_ip_close(struct sock *sk, long timeout)
228{
229	write_lock_bh(&l2tp_ip_lock);
230	hlist_del_init(&sk->sk_bind_node);
231	sk_del_node_init(sk);
232	write_unlock_bh(&l2tp_ip_lock);
233	sk_common_release(sk);
234}
235
236static void l2tp_ip_destroy_sock(struct sock *sk)
237{
238	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
239	struct sk_buff *skb;
 
240
241	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
242		kfree_skb(skb);
243
244	if (tunnel)
245		l2tp_tunnel_delete(tunnel);
 
 
 
 
246}
247
248static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
249{
250	struct inet_sock *inet = inet_sk(sk);
251	struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
252	struct net *net = sock_net(sk);
253	int ret;
254	int chk_addr_ret;
255
 
 
256	if (addr_len < sizeof(struct sockaddr_l2tpip))
257		return -EINVAL;
258	if (addr->l2tp_family != AF_INET)
259		return -EINVAL;
260
261	lock_sock(sk);
 
 
 
 
262
263	ret = -EINVAL;
264	if (!sock_flag(sk, SOCK_ZAPPED))
265		goto out;
266
267	if (sk->sk_state != TCP_CLOSE)
 
268		goto out;
269
270	chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
271	ret = -EADDRNOTAVAIL;
272	if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
273	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
274		goto out;
275
276	if (addr->l2tp_addr.s_addr) {
277		inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
278		inet->inet_saddr = addr->l2tp_addr.s_addr;
279	}
280	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
281		inet->inet_saddr = 0;  /* Use device */
282
283	write_lock_bh(&l2tp_ip_lock);
284	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
285				  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
286		write_unlock_bh(&l2tp_ip_lock);
287		ret = -EADDRINUSE;
288		goto out;
289	}
290
291	sk_dst_reset(sk);
 
292	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
293
 
294	sk_add_bind_node(sk, &l2tp_ip_bind_table);
295	sk_del_node_init(sk);
296	write_unlock_bh(&l2tp_ip_lock);
297
298	ret = 0;
299	sock_reset_flag(sk, SOCK_ZAPPED);
300
301out:
302	release_sock(sk);
303
304	return ret;
 
 
 
 
 
305}
306
307static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
308{
309	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
310	int rc;
311
 
 
 
312	if (addr_len < sizeof(*lsa))
313		return -EINVAL;
314
315	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
316		return -EINVAL;
317
318	lock_sock(sk);
319
320	/* Must bind first - autobinding does not work */
321	if (sock_flag(sk, SOCK_ZAPPED)) {
322		rc = -EINVAL;
323		goto out_sk;
324	}
325
326	rc = __ip4_datagram_connect(sk, uaddr, addr_len);
327	if (rc < 0)
328		goto out_sk;
 
 
329
330	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
331
332	write_lock_bh(&l2tp_ip_lock);
333	hlist_del_init(&sk->sk_bind_node);
334	sk_add_bind_node(sk, &l2tp_ip_bind_table);
335	write_unlock_bh(&l2tp_ip_lock);
336
337out_sk:
338	release_sock(sk);
339
340	return rc;
341}
342
343static int l2tp_ip_disconnect(struct sock *sk, int flags)
344{
345	if (sock_flag(sk, SOCK_ZAPPED))
346		return 0;
347
348	return __udp_disconnect(sk, flags);
349}
350
351static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
352			   int peer)
353{
354	struct sock *sk		= sock->sk;
355	struct inet_sock *inet	= inet_sk(sk);
356	struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
357	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
358
359	memset(lsa, 0, sizeof(*lsa));
360	lsa->l2tp_family = AF_INET;
361	if (peer) {
362		if (!inet->inet_dport)
363			return -ENOTCONN;
364		lsa->l2tp_conn_id = lsk->peer_conn_id;
365		lsa->l2tp_addr.s_addr = inet->inet_daddr;
366	} else {
367		__be32 addr = inet->inet_rcv_saddr;
368
369		if (!addr)
370			addr = inet->inet_saddr;
371		lsa->l2tp_conn_id = lsk->conn_id;
372		lsa->l2tp_addr.s_addr = addr;
373	}
374	return sizeof(*lsa);
 
375}
376
377static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
378{
379	int rc;
380
381	/* Charge it to the socket, dropping if the queue is full. */
382	rc = sock_queue_rcv_skb(sk, skb);
383	if (rc < 0)
384		goto drop;
385
386	return 0;
387
388drop:
389	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
390	kfree_skb(skb);
391	return 0;
392}
393
394/* Userspace will call sendmsg() on the tunnel socket to send L2TP
395 * control frames.
396 */
397static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
398{
399	struct sk_buff *skb;
400	int rc;
401	struct inet_sock *inet = inet_sk(sk);
402	struct rtable *rt = NULL;
403	struct flowi4 *fl4;
404	int connected = 0;
405	__be32 daddr;
406
407	lock_sock(sk);
408
409	rc = -ENOTCONN;
410	if (sock_flag(sk, SOCK_DEAD))
411		goto out;
412
413	/* Get and verify the address. */
414	if (msg->msg_name) {
415		DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
416
417		rc = -EINVAL;
418		if (msg->msg_namelen < sizeof(*lip))
419			goto out;
420
421		if (lip->l2tp_family != AF_INET) {
422			rc = -EAFNOSUPPORT;
423			if (lip->l2tp_family != AF_UNSPEC)
424				goto out;
425		}
426
427		daddr = lip->l2tp_addr.s_addr;
428	} else {
429		rc = -EDESTADDRREQ;
430		if (sk->sk_state != TCP_ESTABLISHED)
431			goto out;
432
433		daddr = inet->inet_daddr;
434		connected = 1;
435	}
436
437	/* Allocate a socket buffer */
438	rc = -ENOMEM;
439	skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
440			   4 + len, 0, GFP_KERNEL);
441	if (!skb)
442		goto error;
443
444	/* Reserve space for headers, putting IP header on 4-byte boundary. */
445	skb_reserve(skb, 2 + NET_SKB_PAD);
446	skb_reset_network_header(skb);
447	skb_reserve(skb, sizeof(struct iphdr));
448	skb_reset_transport_header(skb);
449
450	/* Insert 0 session_id */
451	*((__be32 *)skb_put(skb, 4)) = 0;
452
453	/* Copy user data into skb */
454	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
455	if (rc < 0) {
456		kfree_skb(skb);
457		goto error;
458	}
459
460	fl4 = &inet->cork.fl.u.ip4;
461	if (connected)
462		rt = (struct rtable *)__sk_dst_check(sk, 0);
463
464	rcu_read_lock();
465	if (!rt) {
466		const struct ip_options_rcu *inet_opt;
467
468		inet_opt = rcu_dereference(inet->inet_opt);
469
470		/* Use correct destination address if we have options. */
471		if (inet_opt && inet_opt->opt.srr)
472			daddr = inet_opt->opt.faddr;
473
474		/* If this fails, retransmit mechanism of transport layer will
475		 * keep trying until route appears or the connection times
476		 * itself out.
477		 */
478		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
479					   daddr, inet->inet_saddr,
480					   inet->inet_dport, inet->inet_sport,
481					   sk->sk_protocol, RT_CONN_FLAGS(sk),
482					   sk->sk_bound_dev_if);
483		if (IS_ERR(rt))
484			goto no_route;
485		if (connected) {
486			sk_setup_caps(sk, &rt->dst);
487		} else {
488			skb_dst_set(skb, &rt->dst);
489			goto xmit;
490		}
491	}
492
493	/* We don't need to clone dst here, it is guaranteed to not disappear.
494	 *  __dev_xmit_skb() might force a refcount if needed.
495	 */
496	skb_dst_set_noref(skb, &rt->dst);
497
498xmit:
499	/* Queue the packet to IP for output */
500	rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
501	rcu_read_unlock();
502
503error:
504	if (rc >= 0)
505		rc = len;
506
507out:
508	release_sock(sk);
509	return rc;
510
511no_route:
512	rcu_read_unlock();
513	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
514	kfree_skb(skb);
515	rc = -EHOSTUNREACH;
516	goto out;
517}
518
519static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
520			   size_t len, int flags, int *addr_len)
521{
522	struct inet_sock *inet = inet_sk(sk);
523	size_t copied = 0;
524	int err = -EOPNOTSUPP;
525	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
526	struct sk_buff *skb;
527
528	if (flags & MSG_OOB)
529		goto out;
530
531	skb = skb_recv_datagram(sk, flags, &err);
532	if (!skb)
533		goto out;
534
535	copied = skb->len;
536	if (len < copied) {
537		msg->msg_flags |= MSG_TRUNC;
538		copied = len;
539	}
540
541	err = skb_copy_datagram_msg(skb, 0, msg, copied);
542	if (err)
543		goto done;
544
545	sock_recv_timestamp(msg, sk, skb);
546
547	/* Copy the address. */
548	if (sin) {
549		sin->sin_family = AF_INET;
550		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
551		sin->sin_port = 0;
552		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
553		*addr_len = sizeof(*sin);
554	}
555	if (inet_cmsg_flags(inet))
556		ip_cmsg_recv(msg, skb);
557	if (flags & MSG_TRUNC)
558		copied = skb->len;
559done:
560	skb_free_datagram(sk, skb);
561out:
562	return err ? err : copied;
563}
564
565int l2tp_ioctl(struct sock *sk, int cmd, int *karg)
566{
567	struct sk_buff *skb;
568
569	switch (cmd) {
570	case SIOCOUTQ:
571		*karg = sk_wmem_alloc_get(sk);
572		break;
573	case SIOCINQ:
574		spin_lock_bh(&sk->sk_receive_queue.lock);
575		skb = skb_peek(&sk->sk_receive_queue);
576		*karg = skb ? skb->len : 0;
577		spin_unlock_bh(&sk->sk_receive_queue.lock);
578		break;
579
580	default:
581		return -ENOIOCTLCMD;
582	}
583
584	return 0;
585}
586EXPORT_SYMBOL_GPL(l2tp_ioctl);
587
588static struct proto l2tp_ip_prot = {
589	.name		   = "L2TP/IP",
590	.owner		   = THIS_MODULE,
591	.init		   = l2tp_ip_open,
592	.close		   = l2tp_ip_close,
593	.bind		   = l2tp_ip_bind,
594	.connect	   = l2tp_ip_connect,
595	.disconnect	   = l2tp_ip_disconnect,
596	.ioctl		   = l2tp_ioctl,
597	.destroy	   = l2tp_ip_destroy_sock,
598	.setsockopt	   = ip_setsockopt,
599	.getsockopt	   = ip_getsockopt,
600	.sendmsg	   = l2tp_ip_sendmsg,
601	.recvmsg	   = l2tp_ip_recvmsg,
602	.backlog_rcv	   = l2tp_ip_backlog_recv,
603	.hash		   = l2tp_ip_hash,
604	.unhash		   = l2tp_ip_unhash,
605	.obj_size	   = sizeof(struct l2tp_ip_sock),
 
 
 
 
606};
607
608static const struct proto_ops l2tp_ip_ops = {
609	.family		   = PF_INET,
610	.owner		   = THIS_MODULE,
611	.release	   = inet_release,
612	.bind		   = inet_bind,
613	.connect	   = inet_dgram_connect,
614	.socketpair	   = sock_no_socketpair,
615	.accept		   = sock_no_accept,
616	.getname	   = l2tp_ip_getname,
617	.poll		   = datagram_poll,
618	.ioctl		   = inet_ioctl,
619	.gettstamp	   = sock_gettstamp,
620	.listen		   = sock_no_listen,
621	.shutdown	   = inet_shutdown,
622	.setsockopt	   = sock_common_setsockopt,
623	.getsockopt	   = sock_common_getsockopt,
624	.sendmsg	   = inet_sendmsg,
625	.recvmsg	   = sock_common_recvmsg,
626	.mmap		   = sock_no_mmap,
 
 
 
 
 
627};
628
629static struct inet_protosw l2tp_ip_protosw = {
630	.type		= SOCK_DGRAM,
631	.protocol	= IPPROTO_L2TP,
632	.prot		= &l2tp_ip_prot,
633	.ops		= &l2tp_ip_ops,
634};
635
636static struct net_protocol l2tp_ip_protocol __read_mostly = {
637	.handler	= l2tp_ip_recv,
 
638};
639
640static int __init l2tp_ip_init(void)
641{
642	int err;
643
644	pr_info("L2TP IP encapsulation support (L2TPv3)\n");
645
646	err = proto_register(&l2tp_ip_prot, 1);
647	if (err != 0)
648		goto out;
649
650	err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
651	if (err)
652		goto out1;
653
654	inet_register_protosw(&l2tp_ip_protosw);
655	return 0;
656
657out1:
658	proto_unregister(&l2tp_ip_prot);
659out:
660	return err;
661}
662
663static void __exit l2tp_ip_exit(void)
664{
665	inet_unregister_protosw(&l2tp_ip_protosw);
666	inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
667	proto_unregister(&l2tp_ip_prot);
668}
669
670module_init(l2tp_ip_init);
671module_exit(l2tp_ip_exit);
672
673MODULE_LICENSE("GPL");
674MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
675MODULE_DESCRIPTION("L2TP over IP");
676MODULE_VERSION("1.0");
677
678/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
679 * because __stringify doesn't like enums
680 */
681MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
682MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
v4.6
  1/*
  2 * L2TPv3 IP encapsulation support
  3 *
  4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  5 *
  6 *	This program is free software; you can redistribute it and/or
  7 *	modify it under the terms of the GNU General Public License
  8 *	as published by the Free Software Foundation; either version
  9 *	2 of the License, or (at your option) any later version.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 
 14#include <linux/icmp.h>
 15#include <linux/module.h>
 16#include <linux/skbuff.h>
 17#include <linux/random.h>
 18#include <linux/socket.h>
 19#include <linux/l2tp.h>
 20#include <linux/in.h>
 21#include <net/sock.h>
 22#include <net/ip.h>
 23#include <net/icmp.h>
 24#include <net/udp.h>
 25#include <net/inet_common.h>
 26#include <net/inet_hashtables.h>
 27#include <net/tcp_states.h>
 28#include <net/protocol.h>
 29#include <net/xfrm.h>
 30
 31#include "l2tp_core.h"
 32
 33struct l2tp_ip_sock {
 34	/* inet_sock has to be the first member of l2tp_ip_sock */
 35	struct inet_sock	inet;
 36
 37	u32			conn_id;
 38	u32			peer_conn_id;
 39};
 40
 41static DEFINE_RWLOCK(l2tp_ip_lock);
 42static struct hlist_head l2tp_ip_table;
 43static struct hlist_head l2tp_ip_bind_table;
 44
 45static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
 46{
 47	return (struct l2tp_ip_sock *)sk;
 48}
 49
 50static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
 
 51{
 52	struct sock *sk;
 53
 54	sk_for_each_bound(sk, &l2tp_ip_bind_table) {
 55		struct inet_sock *inet = inet_sk(sk);
 56		struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
 
 
 
 
 57
 58		if (l2tp == NULL)
 
 59			continue;
 60
 61		if ((l2tp->conn_id == tunnel_id) &&
 62		    net_eq(sock_net(sk), net) &&
 63		    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
 64		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
 65			goto found;
 
 
 
 
 
 
 66	}
 67
 68	sk = NULL;
 69found:
 70	return sk;
 71}
 72
 73static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
 74{
 75	struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
 76	if (sk)
 77		sock_hold(sk);
 78
 79	return sk;
 80}
 81
 82/* When processing receive frames, there are two cases to
 83 * consider. Data frames consist of a non-zero session-id and an
 84 * optional cookie. Control frames consist of a regular L2TP header
 85 * preceded by 32-bits of zeros.
 86 *
 87 * L2TPv3 Session Header Over IP
 88 *
 89 *  0                   1                   2                   3
 90 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 92 * |                           Session ID                          |
 93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 94 * |               Cookie (optional, maximum 64 bits)...
 95 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 96 *                                                                 |
 97 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 98 *
 99 * L2TPv3 Control Message Header Over IP
100 *
101 *  0                   1                   2                   3
102 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |                      (32 bits of zeros)                       |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * |T|L|x|x|S|x|x|x|x|x|x|x|  Ver  |             Length            |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 * |                     Control Connection ID                     |
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 * |               Ns              |               Nr              |
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 *
113 * All control frames are passed to userspace.
114 */
115static int l2tp_ip_recv(struct sk_buff *skb)
116{
117	struct net *net = dev_net(skb->dev);
118	struct sock *sk;
119	u32 session_id;
120	u32 tunnel_id;
121	unsigned char *ptr, *optr;
122	struct l2tp_session *session;
123	struct l2tp_tunnel *tunnel = NULL;
124	int length;
125
126	if (!pskb_may_pull(skb, 4))
127		goto discard;
128
129	/* Point to L2TP header */
130	optr = ptr = skb->data;
131	session_id = ntohl(*((__be32 *) ptr));
 
132	ptr += 4;
133
134	/* RFC3931: L2TP/IP packets have the first 4 bytes containing
135	 * the session_id. If it is 0, the packet is a L2TP control
136	 * frame and the session_id value can be discarded.
137	 */
138	if (session_id == 0) {
139		__skb_pull(skb, 4);
140		goto pass_up;
141	}
142
143	/* Ok, this is a data packet. Lookup the session. */
144	session = l2tp_session_find(net, NULL, session_id);
145	if (session == NULL)
146		goto discard;
147
148	tunnel = session->tunnel;
149	if (tunnel == NULL)
150		goto discard;
151
152	/* Trace packet contents, if enabled */
153	if (tunnel->debug & L2TP_MSG_DATA) {
154		length = min(32u, skb->len);
155		if (!pskb_may_pull(skb, length))
156			goto discard;
157
158		/* Point to L2TP header */
159		optr = ptr = skb->data;
160		ptr += 4;
161		pr_debug("%s: ip recv\n", tunnel->name);
162		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
163	}
164
165	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
 
166
167	return 0;
168
169pass_up:
170	/* Get the tunnel_id from the L2TP header */
171	if (!pskb_may_pull(skb, 12))
172		goto discard;
173
174	if ((skb->data[0] & 0xc0) != 0xc0)
175		goto discard;
176
177	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
178	tunnel = l2tp_tunnel_find(net, tunnel_id);
179	if (tunnel != NULL)
180		sk = tunnel->sock;
181	else {
182		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
183
184		read_lock_bh(&l2tp_ip_lock);
185		sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
 
 
186		read_unlock_bh(&l2tp_ip_lock);
 
187	}
188
189	if (sk == NULL)
190		goto discard;
191
192	sock_hold(sk);
 
193
194	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
195		goto discard_put;
196
197	nf_reset(skb);
198
199	return sk_receive_skb(sk, skb, 1);
200
 
 
 
 
201discard_put:
202	sock_put(sk);
203
204discard:
205	kfree_skb(skb);
206	return 0;
207}
208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209static int l2tp_ip_open(struct sock *sk)
210{
211	/* Prevent autobind. We don't have ports. */
212	inet_sk(sk)->inet_num = IPPROTO_L2TP;
213
214	write_lock_bh(&l2tp_ip_lock);
215	sk_add_node(sk, &l2tp_ip_table);
216	write_unlock_bh(&l2tp_ip_lock);
217
218	return 0;
219}
220
221static void l2tp_ip_close(struct sock *sk, long timeout)
222{
223	write_lock_bh(&l2tp_ip_lock);
224	hlist_del_init(&sk->sk_bind_node);
225	sk_del_node_init(sk);
226	write_unlock_bh(&l2tp_ip_lock);
227	sk_common_release(sk);
228}
229
230static void l2tp_ip_destroy_sock(struct sock *sk)
231{
 
232	struct sk_buff *skb;
233	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
234
235	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
236		kfree_skb(skb);
237
238	if (tunnel) {
239		l2tp_tunnel_closeall(tunnel);
240		sock_put(sk);
241	}
242
243	sk_refcnt_debug_dec(sk);
244}
245
246static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
247{
248	struct inet_sock *inet = inet_sk(sk);
249	struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
250	struct net *net = sock_net(sk);
251	int ret;
252	int chk_addr_ret;
253
254	if (!sock_flag(sk, SOCK_ZAPPED))
255		return -EINVAL;
256	if (addr_len < sizeof(struct sockaddr_l2tpip))
257		return -EINVAL;
258	if (addr->l2tp_family != AF_INET)
259		return -EINVAL;
260
261	ret = -EADDRINUSE;
262	read_lock_bh(&l2tp_ip_lock);
263	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
264				  sk->sk_bound_dev_if, addr->l2tp_conn_id))
265		goto out_in_use;
266
267	read_unlock_bh(&l2tp_ip_lock);
 
 
268
269	lock_sock(sk);
270	if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
271		goto out;
272
273	chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
274	ret = -EADDRNOTAVAIL;
275	if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
276	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
277		goto out;
278
279	if (addr->l2tp_addr.s_addr)
280		inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
 
 
281	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
282		inet->inet_saddr = 0;  /* Use device */
 
 
 
 
 
 
 
 
 
283	sk_dst_reset(sk);
284
285	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
286
287	write_lock_bh(&l2tp_ip_lock);
288	sk_add_bind_node(sk, &l2tp_ip_bind_table);
289	sk_del_node_init(sk);
290	write_unlock_bh(&l2tp_ip_lock);
 
291	ret = 0;
292	sock_reset_flag(sk, SOCK_ZAPPED);
293
294out:
295	release_sock(sk);
296
297	return ret;
298
299out_in_use:
300	read_unlock_bh(&l2tp_ip_lock);
301
302	return ret;
303}
304
305static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
306{
307	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
308	int rc;
309
310	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
311		return -EINVAL;
312
313	if (addr_len < sizeof(*lsa))
314		return -EINVAL;
315
316	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
317		return -EINVAL;
318
319	rc = ip4_datagram_connect(sk, uaddr, addr_len);
 
 
 
 
 
 
 
 
320	if (rc < 0)
321		return rc;
322
323	lock_sock(sk);
324
325	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
326
327	write_lock_bh(&l2tp_ip_lock);
328	hlist_del_init(&sk->sk_bind_node);
329	sk_add_bind_node(sk, &l2tp_ip_bind_table);
330	write_unlock_bh(&l2tp_ip_lock);
331
 
332	release_sock(sk);
 
333	return rc;
334}
335
336static int l2tp_ip_disconnect(struct sock *sk, int flags)
337{
338	if (sock_flag(sk, SOCK_ZAPPED))
339		return 0;
340
341	return udp_disconnect(sk, flags);
342}
343
344static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
345			   int *uaddr_len, int peer)
346{
347	struct sock *sk		= sock->sk;
348	struct inet_sock *inet	= inet_sk(sk);
349	struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
350	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
351
352	memset(lsa, 0, sizeof(*lsa));
353	lsa->l2tp_family = AF_INET;
354	if (peer) {
355		if (!inet->inet_dport)
356			return -ENOTCONN;
357		lsa->l2tp_conn_id = lsk->peer_conn_id;
358		lsa->l2tp_addr.s_addr = inet->inet_daddr;
359	} else {
360		__be32 addr = inet->inet_rcv_saddr;
 
361		if (!addr)
362			addr = inet->inet_saddr;
363		lsa->l2tp_conn_id = lsk->conn_id;
364		lsa->l2tp_addr.s_addr = addr;
365	}
366	*uaddr_len = sizeof(*lsa);
367	return 0;
368}
369
370static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
371{
372	int rc;
373
374	/* Charge it to the socket, dropping if the queue is full. */
375	rc = sock_queue_rcv_skb(sk, skb);
376	if (rc < 0)
377		goto drop;
378
379	return 0;
380
381drop:
382	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
383	kfree_skb(skb);
384	return -1;
385}
386
387/* Userspace will call sendmsg() on the tunnel socket to send L2TP
388 * control frames.
389 */
390static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
391{
392	struct sk_buff *skb;
393	int rc;
394	struct inet_sock *inet = inet_sk(sk);
395	struct rtable *rt = NULL;
396	struct flowi4 *fl4;
397	int connected = 0;
398	__be32 daddr;
399
400	lock_sock(sk);
401
402	rc = -ENOTCONN;
403	if (sock_flag(sk, SOCK_DEAD))
404		goto out;
405
406	/* Get and verify the address. */
407	if (msg->msg_name) {
408		DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
 
409		rc = -EINVAL;
410		if (msg->msg_namelen < sizeof(*lip))
411			goto out;
412
413		if (lip->l2tp_family != AF_INET) {
414			rc = -EAFNOSUPPORT;
415			if (lip->l2tp_family != AF_UNSPEC)
416				goto out;
417		}
418
419		daddr = lip->l2tp_addr.s_addr;
420	} else {
421		rc = -EDESTADDRREQ;
422		if (sk->sk_state != TCP_ESTABLISHED)
423			goto out;
424
425		daddr = inet->inet_daddr;
426		connected = 1;
427	}
428
429	/* Allocate a socket buffer */
430	rc = -ENOMEM;
431	skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
432			   4 + len, 0, GFP_KERNEL);
433	if (!skb)
434		goto error;
435
436	/* Reserve space for headers, putting IP header on 4-byte boundary. */
437	skb_reserve(skb, 2 + NET_SKB_PAD);
438	skb_reset_network_header(skb);
439	skb_reserve(skb, sizeof(struct iphdr));
440	skb_reset_transport_header(skb);
441
442	/* Insert 0 session_id */
443	*((__be32 *) skb_put(skb, 4)) = 0;
444
445	/* Copy user data into skb */
446	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
447	if (rc < 0) {
448		kfree_skb(skb);
449		goto error;
450	}
451
452	fl4 = &inet->cork.fl.u.ip4;
453	if (connected)
454		rt = (struct rtable *) __sk_dst_check(sk, 0);
455
456	rcu_read_lock();
457	if (rt == NULL) {
458		const struct ip_options_rcu *inet_opt;
459
460		inet_opt = rcu_dereference(inet->inet_opt);
461
462		/* Use correct destination address if we have options. */
463		if (inet_opt && inet_opt->opt.srr)
464			daddr = inet_opt->opt.faddr;
465
466		/* If this fails, retransmit mechanism of transport layer will
467		 * keep trying until route appears or the connection times
468		 * itself out.
469		 */
470		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
471					   daddr, inet->inet_saddr,
472					   inet->inet_dport, inet->inet_sport,
473					   sk->sk_protocol, RT_CONN_FLAGS(sk),
474					   sk->sk_bound_dev_if);
475		if (IS_ERR(rt))
476			goto no_route;
477		if (connected) {
478			sk_setup_caps(sk, &rt->dst);
479		} else {
480			skb_dst_set(skb, &rt->dst);
481			goto xmit;
482		}
483	}
484
485	/* We dont need to clone dst here, it is guaranteed to not disappear.
486	 *  __dev_xmit_skb() might force a refcount if needed.
487	 */
488	skb_dst_set_noref(skb, &rt->dst);
489
490xmit:
491	/* Queue the packet to IP for output */
492	rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
493	rcu_read_unlock();
494
495error:
496	if (rc >= 0)
497		rc = len;
498
499out:
500	release_sock(sk);
501	return rc;
502
503no_route:
504	rcu_read_unlock();
505	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
506	kfree_skb(skb);
507	rc = -EHOSTUNREACH;
508	goto out;
509}
510
511static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
512			   size_t len, int noblock, int flags, int *addr_len)
513{
514	struct inet_sock *inet = inet_sk(sk);
515	size_t copied = 0;
516	int err = -EOPNOTSUPP;
517	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
518	struct sk_buff *skb;
519
520	if (flags & MSG_OOB)
521		goto out;
522
523	skb = skb_recv_datagram(sk, flags, noblock, &err);
524	if (!skb)
525		goto out;
526
527	copied = skb->len;
528	if (len < copied) {
529		msg->msg_flags |= MSG_TRUNC;
530		copied = len;
531	}
532
533	err = skb_copy_datagram_msg(skb, 0, msg, copied);
534	if (err)
535		goto done;
536
537	sock_recv_timestamp(msg, sk, skb);
538
539	/* Copy the address. */
540	if (sin) {
541		sin->sin_family = AF_INET;
542		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
543		sin->sin_port = 0;
544		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
545		*addr_len = sizeof(*sin);
546	}
547	if (inet->cmsg_flags)
548		ip_cmsg_recv(msg, skb);
549	if (flags & MSG_TRUNC)
550		copied = skb->len;
551done:
552	skb_free_datagram(sk, skb);
553out:
554	return err ? err : copied;
555}
556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557static struct proto l2tp_ip_prot = {
558	.name		   = "L2TP/IP",
559	.owner		   = THIS_MODULE,
560	.init		   = l2tp_ip_open,
561	.close		   = l2tp_ip_close,
562	.bind		   = l2tp_ip_bind,
563	.connect	   = l2tp_ip_connect,
564	.disconnect	   = l2tp_ip_disconnect,
565	.ioctl		   = udp_ioctl,
566	.destroy	   = l2tp_ip_destroy_sock,
567	.setsockopt	   = ip_setsockopt,
568	.getsockopt	   = ip_getsockopt,
569	.sendmsg	   = l2tp_ip_sendmsg,
570	.recvmsg	   = l2tp_ip_recvmsg,
571	.backlog_rcv	   = l2tp_ip_backlog_recv,
572	.hash		   = inet_hash,
573	.unhash		   = inet_unhash,
574	.obj_size	   = sizeof(struct l2tp_ip_sock),
575#ifdef CONFIG_COMPAT
576	.compat_setsockopt = compat_ip_setsockopt,
577	.compat_getsockopt = compat_ip_getsockopt,
578#endif
579};
580
581static const struct proto_ops l2tp_ip_ops = {
582	.family		   = PF_INET,
583	.owner		   = THIS_MODULE,
584	.release	   = inet_release,
585	.bind		   = inet_bind,
586	.connect	   = inet_dgram_connect,
587	.socketpair	   = sock_no_socketpair,
588	.accept		   = sock_no_accept,
589	.getname	   = l2tp_ip_getname,
590	.poll		   = datagram_poll,
591	.ioctl		   = inet_ioctl,
 
592	.listen		   = sock_no_listen,
593	.shutdown	   = inet_shutdown,
594	.setsockopt	   = sock_common_setsockopt,
595	.getsockopt	   = sock_common_getsockopt,
596	.sendmsg	   = inet_sendmsg,
597	.recvmsg	   = sock_common_recvmsg,
598	.mmap		   = sock_no_mmap,
599	.sendpage	   = sock_no_sendpage,
600#ifdef CONFIG_COMPAT
601	.compat_setsockopt = compat_sock_common_setsockopt,
602	.compat_getsockopt = compat_sock_common_getsockopt,
603#endif
604};
605
606static struct inet_protosw l2tp_ip_protosw = {
607	.type		= SOCK_DGRAM,
608	.protocol	= IPPROTO_L2TP,
609	.prot		= &l2tp_ip_prot,
610	.ops		= &l2tp_ip_ops,
611};
612
613static struct net_protocol l2tp_ip_protocol __read_mostly = {
614	.handler	= l2tp_ip_recv,
615	.netns_ok	= 1,
616};
617
618static int __init l2tp_ip_init(void)
619{
620	int err;
621
622	pr_info("L2TP IP encapsulation support (L2TPv3)\n");
623
624	err = proto_register(&l2tp_ip_prot, 1);
625	if (err != 0)
626		goto out;
627
628	err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
629	if (err)
630		goto out1;
631
632	inet_register_protosw(&l2tp_ip_protosw);
633	return 0;
634
635out1:
636	proto_unregister(&l2tp_ip_prot);
637out:
638	return err;
639}
640
641static void __exit l2tp_ip_exit(void)
642{
643	inet_unregister_protosw(&l2tp_ip_protosw);
644	inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
645	proto_unregister(&l2tp_ip_prot);
646}
647
648module_init(l2tp_ip_init);
649module_exit(l2tp_ip_exit);
650
651MODULE_LICENSE("GPL");
652MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
653MODULE_DESCRIPTION("L2TP over IP");
654MODULE_VERSION("1.0");
655
656/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
657 * enums
658 */
659MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
660MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);