Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <asm/ioctls.h>
10#include <linux/icmp.h>
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/random.h>
14#include <linux/socket.h>
15#include <linux/l2tp.h>
16#include <linux/in.h>
17#include <net/sock.h>
18#include <net/ip.h>
19#include <net/icmp.h>
20#include <net/udp.h>
21#include <net/inet_common.h>
22#include <net/tcp_states.h>
23#include <net/protocol.h>
24#include <net/xfrm.h>
25#include <net/net_namespace.h>
26#include <net/netns/generic.h>
27
28#include "l2tp_core.h"
29
30/* per-net private data for this module */
31static unsigned int l2tp_ip_net_id;
32struct l2tp_ip_net {
33 rwlock_t l2tp_ip_lock;
34 struct hlist_head l2tp_ip_table;
35 struct hlist_head l2tp_ip_bind_table;
36};
37
38struct l2tp_ip_sock {
39 /* inet_sock has to be the first member of l2tp_ip_sock */
40 struct inet_sock inet;
41
42 u32 conn_id;
43 u32 peer_conn_id;
44};
45
46static struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
47{
48 return (struct l2tp_ip_sock *)sk;
49}
50
51static struct l2tp_ip_net *l2tp_ip_pernet(const struct net *net)
52{
53 return net_generic(net, l2tp_ip_net_id);
54}
55
56static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
57 __be32 raddr, int dif, u32 tunnel_id)
58{
59 struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
60 struct sock *sk;
61
62 sk_for_each_bound(sk, &pn->l2tp_ip_bind_table) {
63 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
64 const struct inet_sock *inet = inet_sk(sk);
65 int bound_dev_if;
66
67 if (!net_eq(sock_net(sk), net))
68 continue;
69
70 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
71 if (bound_dev_if && dif && bound_dev_if != dif)
72 continue;
73
74 if (inet->inet_rcv_saddr && laddr &&
75 inet->inet_rcv_saddr != laddr)
76 continue;
77
78 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
79 continue;
80
81 if (l2tp->conn_id != tunnel_id)
82 continue;
83
84 goto found;
85 }
86
87 sk = NULL;
88found:
89 return sk;
90}
91
92/* When processing receive frames, there are two cases to
93 * consider. Data frames consist of a non-zero session-id and an
94 * optional cookie. Control frames consist of a regular L2TP header
95 * preceded by 32-bits of zeros.
96 *
97 * L2TPv3 Session Header Over IP
98 *
99 * 0 1 2 3
100 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * | Session ID |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * | Cookie (optional, maximum 64 bits)...
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 *
109 * L2TPv3 Control Message Header Over IP
110 *
111 * 0 1 2 3
112 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 * | (32 bits of zeros) |
115 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 * | Control Connection ID |
119 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120 * | Ns | Nr |
121 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
122 *
123 * All control frames are passed to userspace.
124 */
125static int l2tp_ip_recv(struct sk_buff *skb)
126{
127 struct net *net = dev_net(skb->dev);
128 struct l2tp_ip_net *pn;
129 struct sock *sk;
130 u32 session_id;
131 u32 tunnel_id;
132 unsigned char *ptr, *optr;
133 struct l2tp_session *session;
134 struct l2tp_tunnel *tunnel = NULL;
135 struct iphdr *iph;
136
137 pn = l2tp_ip_pernet(net);
138
139 if (!pskb_may_pull(skb, 4))
140 goto discard;
141
142 /* Point to L2TP header */
143 optr = skb->data;
144 ptr = skb->data;
145 session_id = ntohl(*((__be32 *)ptr));
146 ptr += 4;
147
148 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
149 * the session_id. If it is 0, the packet is a L2TP control
150 * frame and the session_id value can be discarded.
151 */
152 if (session_id == 0) {
153 __skb_pull(skb, 4);
154 goto pass_up;
155 }
156
157 /* Ok, this is a data packet. Lookup the session. */
158 session = l2tp_v3_session_get(net, NULL, session_id);
159 if (!session)
160 goto discard;
161
162 tunnel = session->tunnel;
163 if (!tunnel)
164 goto discard_sess;
165
166 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
167 goto discard_sess;
168
169 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
170 l2tp_session_put(session);
171
172 return 0;
173
174pass_up:
175 /* Get the tunnel_id from the L2TP header */
176 if (!pskb_may_pull(skb, 12))
177 goto discard;
178
179 if ((skb->data[0] & 0xc0) != 0xc0)
180 goto discard;
181
182 tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
183 iph = (struct iphdr *)skb_network_header(skb);
184
185 read_lock_bh(&pn->l2tp_ip_lock);
186 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
187 tunnel_id);
188 if (!sk) {
189 read_unlock_bh(&pn->l2tp_ip_lock);
190 goto discard;
191 }
192 sock_hold(sk);
193 read_unlock_bh(&pn->l2tp_ip_lock);
194
195 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
196 goto discard_put;
197
198 nf_reset_ct(skb);
199
200 return sk_receive_skb(sk, skb, 1);
201
202discard_sess:
203 l2tp_session_put(session);
204 goto discard;
205
206discard_put:
207 sock_put(sk);
208
209discard:
210 kfree_skb(skb);
211 return 0;
212}
213
214static int l2tp_ip_hash(struct sock *sk)
215{
216 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
217
218 if (sk_unhashed(sk)) {
219 write_lock_bh(&pn->l2tp_ip_lock);
220 sk_add_node(sk, &pn->l2tp_ip_table);
221 write_unlock_bh(&pn->l2tp_ip_lock);
222 }
223 return 0;
224}
225
226static void l2tp_ip_unhash(struct sock *sk)
227{
228 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
229
230 if (sk_unhashed(sk))
231 return;
232 write_lock_bh(&pn->l2tp_ip_lock);
233 sk_del_node_init(sk);
234 write_unlock_bh(&pn->l2tp_ip_lock);
235}
236
237static int l2tp_ip_open(struct sock *sk)
238{
239 /* Prevent autobind. We don't have ports. */
240 inet_sk(sk)->inet_num = IPPROTO_L2TP;
241
242 l2tp_ip_hash(sk);
243 return 0;
244}
245
246static void l2tp_ip_close(struct sock *sk, long timeout)
247{
248 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
249
250 write_lock_bh(&pn->l2tp_ip_lock);
251 hlist_del_init(&sk->sk_bind_node);
252 sk_del_node_init(sk);
253 write_unlock_bh(&pn->l2tp_ip_lock);
254 sk_common_release(sk);
255}
256
257static void l2tp_ip_destroy_sock(struct sock *sk)
258{
259 struct l2tp_tunnel *tunnel;
260
261 __skb_queue_purge(&sk->sk_write_queue);
262
263 tunnel = l2tp_sk_to_tunnel(sk);
264 if (tunnel) {
265 l2tp_tunnel_delete(tunnel);
266 l2tp_tunnel_put(tunnel);
267 }
268}
269
270static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
271{
272 struct inet_sock *inet = inet_sk(sk);
273 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
274 struct net *net = sock_net(sk);
275 struct l2tp_ip_net *pn;
276 int ret;
277 int chk_addr_ret;
278
279 if (addr_len < sizeof(struct sockaddr_l2tpip))
280 return -EINVAL;
281 if (addr->l2tp_family != AF_INET)
282 return -EINVAL;
283
284 lock_sock(sk);
285
286 ret = -EINVAL;
287 if (!sock_flag(sk, SOCK_ZAPPED))
288 goto out;
289
290 if (sk->sk_state != TCP_CLOSE)
291 goto out;
292
293 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
294 ret = -EADDRNOTAVAIL;
295 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
296 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
297 goto out;
298
299 if (addr->l2tp_addr.s_addr) {
300 inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
301 inet->inet_saddr = addr->l2tp_addr.s_addr;
302 }
303 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
304 inet->inet_saddr = 0; /* Use device */
305
306 pn = l2tp_ip_pernet(net);
307 write_lock_bh(&pn->l2tp_ip_lock);
308 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
309 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
310 write_unlock_bh(&pn->l2tp_ip_lock);
311 ret = -EADDRINUSE;
312 goto out;
313 }
314
315 sk_dst_reset(sk);
316 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
317
318 sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
319 sk_del_node_init(sk);
320 write_unlock_bh(&pn->l2tp_ip_lock);
321
322 ret = 0;
323 sock_reset_flag(sk, SOCK_ZAPPED);
324
325out:
326 release_sock(sk);
327
328 return ret;
329}
330
331static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
332{
333 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
334 struct l2tp_ip_net *pn = l2tp_ip_pernet(sock_net(sk));
335 int rc;
336
337 if (addr_len < sizeof(*lsa))
338 return -EINVAL;
339
340 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
341 return -EINVAL;
342
343 lock_sock(sk);
344
345 /* Must bind first - autobinding does not work */
346 if (sock_flag(sk, SOCK_ZAPPED)) {
347 rc = -EINVAL;
348 goto out_sk;
349 }
350
351 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
352 if (rc < 0)
353 goto out_sk;
354
355 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
356
357 write_lock_bh(&pn->l2tp_ip_lock);
358 hlist_del_init(&sk->sk_bind_node);
359 sk_add_bind_node(sk, &pn->l2tp_ip_bind_table);
360 write_unlock_bh(&pn->l2tp_ip_lock);
361
362out_sk:
363 release_sock(sk);
364
365 return rc;
366}
367
368static int l2tp_ip_disconnect(struct sock *sk, int flags)
369{
370 if (sock_flag(sk, SOCK_ZAPPED))
371 return 0;
372
373 return __udp_disconnect(sk, flags);
374}
375
376static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
377 int peer)
378{
379 struct sock *sk = sock->sk;
380 struct inet_sock *inet = inet_sk(sk);
381 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
382 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
383
384 memset(lsa, 0, sizeof(*lsa));
385 lsa->l2tp_family = AF_INET;
386 if (peer) {
387 if (!inet->inet_dport)
388 return -ENOTCONN;
389 lsa->l2tp_conn_id = lsk->peer_conn_id;
390 lsa->l2tp_addr.s_addr = inet->inet_daddr;
391 } else {
392 __be32 addr = inet->inet_rcv_saddr;
393
394 if (!addr)
395 addr = inet->inet_saddr;
396 lsa->l2tp_conn_id = lsk->conn_id;
397 lsa->l2tp_addr.s_addr = addr;
398 }
399 return sizeof(*lsa);
400}
401
402static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
403{
404 int rc;
405
406 /* Charge it to the socket, dropping if the queue is full. */
407 rc = sock_queue_rcv_skb(sk, skb);
408 if (rc < 0)
409 goto drop;
410
411 return 0;
412
413drop:
414 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
415 kfree_skb(skb);
416 return 0;
417}
418
419/* Userspace will call sendmsg() on the tunnel socket to send L2TP
420 * control frames.
421 */
422static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
423{
424 struct sk_buff *skb;
425 int rc;
426 struct inet_sock *inet = inet_sk(sk);
427 struct rtable *rt = NULL;
428 struct flowi4 *fl4;
429 int connected = 0;
430 __be32 daddr;
431
432 lock_sock(sk);
433
434 rc = -ENOTCONN;
435 if (sock_flag(sk, SOCK_DEAD))
436 goto out;
437
438 /* Get and verify the address. */
439 if (msg->msg_name) {
440 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
441
442 rc = -EINVAL;
443 if (msg->msg_namelen < sizeof(*lip))
444 goto out;
445
446 if (lip->l2tp_family != AF_INET) {
447 rc = -EAFNOSUPPORT;
448 if (lip->l2tp_family != AF_UNSPEC)
449 goto out;
450 }
451
452 daddr = lip->l2tp_addr.s_addr;
453 } else {
454 rc = -EDESTADDRREQ;
455 if (sk->sk_state != TCP_ESTABLISHED)
456 goto out;
457
458 daddr = inet->inet_daddr;
459 connected = 1;
460 }
461
462 /* Allocate a socket buffer */
463 rc = -ENOMEM;
464 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
465 4 + len, 0, GFP_KERNEL);
466 if (!skb)
467 goto error;
468
469 /* Reserve space for headers, putting IP header on 4-byte boundary. */
470 skb_reserve(skb, 2 + NET_SKB_PAD);
471 skb_reset_network_header(skb);
472 skb_reserve(skb, sizeof(struct iphdr));
473 skb_reset_transport_header(skb);
474
475 /* Insert 0 session_id */
476 *((__be32 *)skb_put(skb, 4)) = 0;
477
478 /* Copy user data into skb */
479 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
480 if (rc < 0) {
481 kfree_skb(skb);
482 goto error;
483 }
484
485 fl4 = &inet->cork.fl.u.ip4;
486 if (connected)
487 rt = dst_rtable(__sk_dst_check(sk, 0));
488
489 rcu_read_lock();
490 if (!rt) {
491 const struct ip_options_rcu *inet_opt;
492
493 inet_opt = rcu_dereference(inet->inet_opt);
494
495 /* Use correct destination address if we have options. */
496 if (inet_opt && inet_opt->opt.srr)
497 daddr = inet_opt->opt.faddr;
498
499 /* If this fails, retransmit mechanism of transport layer will
500 * keep trying until route appears or the connection times
501 * itself out.
502 */
503 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
504 daddr, inet->inet_saddr,
505 inet->inet_dport, inet->inet_sport,
506 sk->sk_protocol, ip_sock_rt_tos(sk),
507 sk->sk_bound_dev_if);
508 if (IS_ERR(rt))
509 goto no_route;
510 if (connected) {
511 sk_setup_caps(sk, &rt->dst);
512 } else {
513 skb_dst_set(skb, &rt->dst);
514 goto xmit;
515 }
516 }
517
518 /* We don't need to clone dst here, it is guaranteed to not disappear.
519 * __dev_xmit_skb() might force a refcount if needed.
520 */
521 skb_dst_set_noref(skb, &rt->dst);
522
523xmit:
524 /* Queue the packet to IP for output */
525 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
526 rcu_read_unlock();
527
528error:
529 if (rc >= 0)
530 rc = len;
531
532out:
533 release_sock(sk);
534 return rc;
535
536no_route:
537 rcu_read_unlock();
538 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
539 kfree_skb(skb);
540 rc = -EHOSTUNREACH;
541 goto out;
542}
543
544static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
545 size_t len, int flags, int *addr_len)
546{
547 struct inet_sock *inet = inet_sk(sk);
548 size_t copied = 0;
549 int err = -EOPNOTSUPP;
550 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
551 struct sk_buff *skb;
552
553 if (flags & MSG_OOB)
554 goto out;
555
556 skb = skb_recv_datagram(sk, flags, &err);
557 if (!skb)
558 goto out;
559
560 copied = skb->len;
561 if (len < copied) {
562 msg->msg_flags |= MSG_TRUNC;
563 copied = len;
564 }
565
566 err = skb_copy_datagram_msg(skb, 0, msg, copied);
567 if (err)
568 goto done;
569
570 sock_recv_timestamp(msg, sk, skb);
571
572 /* Copy the address. */
573 if (sin) {
574 sin->sin_family = AF_INET;
575 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
576 sin->sin_port = 0;
577 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
578 *addr_len = sizeof(*sin);
579 }
580 if (inet_cmsg_flags(inet))
581 ip_cmsg_recv(msg, skb);
582 if (flags & MSG_TRUNC)
583 copied = skb->len;
584done:
585 skb_free_datagram(sk, skb);
586out:
587 return err ? err : copied;
588}
589
590int l2tp_ioctl(struct sock *sk, int cmd, int *karg)
591{
592 struct sk_buff *skb;
593
594 switch (cmd) {
595 case SIOCOUTQ:
596 *karg = sk_wmem_alloc_get(sk);
597 break;
598 case SIOCINQ:
599 spin_lock_bh(&sk->sk_receive_queue.lock);
600 skb = skb_peek(&sk->sk_receive_queue);
601 *karg = skb ? skb->len : 0;
602 spin_unlock_bh(&sk->sk_receive_queue.lock);
603 break;
604
605 default:
606 return -ENOIOCTLCMD;
607 }
608
609 return 0;
610}
611EXPORT_SYMBOL_GPL(l2tp_ioctl);
612
613static struct proto l2tp_ip_prot = {
614 .name = "L2TP/IP",
615 .owner = THIS_MODULE,
616 .init = l2tp_ip_open,
617 .close = l2tp_ip_close,
618 .bind = l2tp_ip_bind,
619 .connect = l2tp_ip_connect,
620 .disconnect = l2tp_ip_disconnect,
621 .ioctl = l2tp_ioctl,
622 .destroy = l2tp_ip_destroy_sock,
623 .setsockopt = ip_setsockopt,
624 .getsockopt = ip_getsockopt,
625 .sendmsg = l2tp_ip_sendmsg,
626 .recvmsg = l2tp_ip_recvmsg,
627 .backlog_rcv = l2tp_ip_backlog_recv,
628 .hash = l2tp_ip_hash,
629 .unhash = l2tp_ip_unhash,
630 .obj_size = sizeof(struct l2tp_ip_sock),
631};
632
633static const struct proto_ops l2tp_ip_ops = {
634 .family = PF_INET,
635 .owner = THIS_MODULE,
636 .release = inet_release,
637 .bind = inet_bind,
638 .connect = inet_dgram_connect,
639 .socketpair = sock_no_socketpair,
640 .accept = sock_no_accept,
641 .getname = l2tp_ip_getname,
642 .poll = datagram_poll,
643 .ioctl = inet_ioctl,
644 .gettstamp = sock_gettstamp,
645 .listen = sock_no_listen,
646 .shutdown = inet_shutdown,
647 .setsockopt = sock_common_setsockopt,
648 .getsockopt = sock_common_getsockopt,
649 .sendmsg = inet_sendmsg,
650 .recvmsg = sock_common_recvmsg,
651 .mmap = sock_no_mmap,
652};
653
654static struct inet_protosw l2tp_ip_protosw = {
655 .type = SOCK_DGRAM,
656 .protocol = IPPROTO_L2TP,
657 .prot = &l2tp_ip_prot,
658 .ops = &l2tp_ip_ops,
659};
660
661static struct net_protocol l2tp_ip_protocol __read_mostly = {
662 .handler = l2tp_ip_recv,
663};
664
665static __net_init int l2tp_ip_init_net(struct net *net)
666{
667 struct l2tp_ip_net *pn = net_generic(net, l2tp_ip_net_id);
668
669 rwlock_init(&pn->l2tp_ip_lock);
670 INIT_HLIST_HEAD(&pn->l2tp_ip_table);
671 INIT_HLIST_HEAD(&pn->l2tp_ip_bind_table);
672 return 0;
673}
674
675static __net_exit void l2tp_ip_exit_net(struct net *net)
676{
677 struct l2tp_ip_net *pn = l2tp_ip_pernet(net);
678
679 write_lock_bh(&pn->l2tp_ip_lock);
680 WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_table) != 0);
681 WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip_bind_table) != 0);
682 write_unlock_bh(&pn->l2tp_ip_lock);
683}
684
685static struct pernet_operations l2tp_ip_net_ops = {
686 .init = l2tp_ip_init_net,
687 .exit = l2tp_ip_exit_net,
688 .id = &l2tp_ip_net_id,
689 .size = sizeof(struct l2tp_ip_net),
690};
691
692static int __init l2tp_ip_init(void)
693{
694 int err;
695
696 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
697
698 err = register_pernet_device(&l2tp_ip_net_ops);
699 if (err)
700 goto out;
701
702 err = proto_register(&l2tp_ip_prot, 1);
703 if (err != 0)
704 goto out1;
705
706 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
707 if (err)
708 goto out2;
709
710 inet_register_protosw(&l2tp_ip_protosw);
711 return 0;
712
713out2:
714 proto_unregister(&l2tp_ip_prot);
715out1:
716 unregister_pernet_device(&l2tp_ip_net_ops);
717out:
718 return err;
719}
720
721static void __exit l2tp_ip_exit(void)
722{
723 inet_unregister_protosw(&l2tp_ip_protosw);
724 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
725 proto_unregister(&l2tp_ip_prot);
726 unregister_pernet_device(&l2tp_ip_net_ops);
727}
728
729module_init(l2tp_ip_init);
730module_exit(l2tp_ip_exit);
731
732MODULE_LICENSE("GPL");
733MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
734MODULE_DESCRIPTION("L2TP over IP");
735MODULE_VERSION("1.0");
736
737/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
738 * because __stringify doesn't like enums
739 */
740MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
741MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * L2TPv3 IP encapsulation support
4 *
5 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <asm/ioctls.h>
11#include <linux/icmp.h>
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/random.h>
15#include <linux/socket.h>
16#include <linux/l2tp.h>
17#include <linux/in.h>
18#include <net/sock.h>
19#include <net/ip.h>
20#include <net/icmp.h>
21#include <net/udp.h>
22#include <net/inet_common.h>
23#include <net/inet_hashtables.h>
24#include <net/tcp_states.h>
25#include <net/protocol.h>
26#include <net/xfrm.h>
27
28#include "l2tp_core.h"
29
30struct l2tp_ip_sock {
31 /* inet_sock has to be the first member of l2tp_ip_sock */
32 struct inet_sock inet;
33
34 u32 conn_id;
35 u32 peer_conn_id;
36};
37
38static DEFINE_RWLOCK(l2tp_ip_lock);
39static struct hlist_head l2tp_ip_table;
40static struct hlist_head l2tp_ip_bind_table;
41
42static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
43{
44 return (struct l2tp_ip_sock *)sk;
45}
46
47static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
48 __be32 raddr, int dif, u32 tunnel_id)
49{
50 struct sock *sk;
51
52 sk_for_each_bound(sk, &l2tp_ip_bind_table) {
53 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
54 const struct inet_sock *inet = inet_sk(sk);
55
56 if (!net_eq(sock_net(sk), net))
57 continue;
58
59 if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
60 continue;
61
62 if (inet->inet_rcv_saddr && laddr &&
63 inet->inet_rcv_saddr != laddr)
64 continue;
65
66 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
67 continue;
68
69 if (l2tp->conn_id != tunnel_id)
70 continue;
71
72 goto found;
73 }
74
75 sk = NULL;
76found:
77 return sk;
78}
79
80/* When processing receive frames, there are two cases to
81 * consider. Data frames consist of a non-zero session-id and an
82 * optional cookie. Control frames consist of a regular L2TP header
83 * preceded by 32-bits of zeros.
84 *
85 * L2TPv3 Session Header Over IP
86 *
87 * 0 1 2 3
88 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
90 * | Session ID |
91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
92 * | Cookie (optional, maximum 64 bits)...
93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
94 * |
95 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
96 *
97 * L2TPv3 Control Message Header Over IP
98 *
99 * 0 1 2 3
100 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * | (32 bits of zeros) |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * | Control Connection ID |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 * | Ns | Nr |
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 *
111 * All control frames are passed to userspace.
112 */
113static int l2tp_ip_recv(struct sk_buff *skb)
114{
115 struct net *net = dev_net(skb->dev);
116 struct sock *sk;
117 u32 session_id;
118 u32 tunnel_id;
119 unsigned char *ptr, *optr;
120 struct l2tp_session *session;
121 struct l2tp_tunnel *tunnel = NULL;
122 struct iphdr *iph;
123 int length;
124
125 if (!pskb_may_pull(skb, 4))
126 goto discard;
127
128 /* Point to L2TP header */
129 optr = ptr = skb->data;
130 session_id = ntohl(*((__be32 *) ptr));
131 ptr += 4;
132
133 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
134 * the session_id. If it is 0, the packet is a L2TP control
135 * frame and the session_id value can be discarded.
136 */
137 if (session_id == 0) {
138 __skb_pull(skb, 4);
139 goto pass_up;
140 }
141
142 /* Ok, this is a data packet. Lookup the session. */
143 session = l2tp_session_get(net, session_id);
144 if (!session)
145 goto discard;
146
147 tunnel = session->tunnel;
148 if (!tunnel)
149 goto discard_sess;
150
151 /* Trace packet contents, if enabled */
152 if (tunnel->debug & L2TP_MSG_DATA) {
153 length = min(32u, skb->len);
154 if (!pskb_may_pull(skb, length))
155 goto discard_sess;
156
157 /* Point to L2TP header */
158 optr = ptr = skb->data;
159 ptr += 4;
160 pr_debug("%s: ip recv\n", tunnel->name);
161 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
162 }
163
164 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
165 goto discard_sess;
166
167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
168 l2tp_session_dec_refcount(session);
169
170 return 0;
171
172pass_up:
173 /* Get the tunnel_id from the L2TP header */
174 if (!pskb_may_pull(skb, 12))
175 goto discard;
176
177 if ((skb->data[0] & 0xc0) != 0xc0)
178 goto discard;
179
180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
181 iph = (struct iphdr *)skb_network_header(skb);
182
183 read_lock_bh(&l2tp_ip_lock);
184 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
185 tunnel_id);
186 if (!sk) {
187 read_unlock_bh(&l2tp_ip_lock);
188 goto discard;
189 }
190 sock_hold(sk);
191 read_unlock_bh(&l2tp_ip_lock);
192
193 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
194 goto discard_put;
195
196 nf_reset_ct(skb);
197
198 return sk_receive_skb(sk, skb, 1);
199
200discard_sess:
201 l2tp_session_dec_refcount(session);
202 goto discard;
203
204discard_put:
205 sock_put(sk);
206
207discard:
208 kfree_skb(skb);
209 return 0;
210}
211
212static int l2tp_ip_open(struct sock *sk)
213{
214 /* Prevent autobind. We don't have ports. */
215 inet_sk(sk)->inet_num = IPPROTO_L2TP;
216
217 write_lock_bh(&l2tp_ip_lock);
218 sk_add_node(sk, &l2tp_ip_table);
219 write_unlock_bh(&l2tp_ip_lock);
220
221 return 0;
222}
223
224static void l2tp_ip_close(struct sock *sk, long timeout)
225{
226 write_lock_bh(&l2tp_ip_lock);
227 hlist_del_init(&sk->sk_bind_node);
228 sk_del_node_init(sk);
229 write_unlock_bh(&l2tp_ip_lock);
230 sk_common_release(sk);
231}
232
233static void l2tp_ip_destroy_sock(struct sock *sk)
234{
235 struct sk_buff *skb;
236 struct l2tp_tunnel *tunnel = sk->sk_user_data;
237
238 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
239 kfree_skb(skb);
240
241 if (tunnel)
242 l2tp_tunnel_delete(tunnel);
243}
244
245static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
246{
247 struct inet_sock *inet = inet_sk(sk);
248 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
249 struct net *net = sock_net(sk);
250 int ret;
251 int chk_addr_ret;
252
253 if (addr_len < sizeof(struct sockaddr_l2tpip))
254 return -EINVAL;
255 if (addr->l2tp_family != AF_INET)
256 return -EINVAL;
257
258 lock_sock(sk);
259
260 ret = -EINVAL;
261 if (!sock_flag(sk, SOCK_ZAPPED))
262 goto out;
263
264 if (sk->sk_state != TCP_CLOSE)
265 goto out;
266
267 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
268 ret = -EADDRNOTAVAIL;
269 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
270 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
271 goto out;
272
273 if (addr->l2tp_addr.s_addr)
274 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
275 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
276 inet->inet_saddr = 0; /* Use device */
277
278 write_lock_bh(&l2tp_ip_lock);
279 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
280 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
281 write_unlock_bh(&l2tp_ip_lock);
282 ret = -EADDRINUSE;
283 goto out;
284 }
285
286 sk_dst_reset(sk);
287 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
288
289 sk_add_bind_node(sk, &l2tp_ip_bind_table);
290 sk_del_node_init(sk);
291 write_unlock_bh(&l2tp_ip_lock);
292
293 ret = 0;
294 sock_reset_flag(sk, SOCK_ZAPPED);
295
296out:
297 release_sock(sk);
298
299 return ret;
300}
301
302static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
303{
304 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
305 int rc;
306
307 if (addr_len < sizeof(*lsa))
308 return -EINVAL;
309
310 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
311 return -EINVAL;
312
313 lock_sock(sk);
314
315 /* Must bind first - autobinding does not work */
316 if (sock_flag(sk, SOCK_ZAPPED)) {
317 rc = -EINVAL;
318 goto out_sk;
319 }
320
321 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
322 if (rc < 0)
323 goto out_sk;
324
325 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
326
327 write_lock_bh(&l2tp_ip_lock);
328 hlist_del_init(&sk->sk_bind_node);
329 sk_add_bind_node(sk, &l2tp_ip_bind_table);
330 write_unlock_bh(&l2tp_ip_lock);
331
332out_sk:
333 release_sock(sk);
334
335 return rc;
336}
337
338static int l2tp_ip_disconnect(struct sock *sk, int flags)
339{
340 if (sock_flag(sk, SOCK_ZAPPED))
341 return 0;
342
343 return __udp_disconnect(sk, flags);
344}
345
346static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
347 int peer)
348{
349 struct sock *sk = sock->sk;
350 struct inet_sock *inet = inet_sk(sk);
351 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
352 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
353
354 memset(lsa, 0, sizeof(*lsa));
355 lsa->l2tp_family = AF_INET;
356 if (peer) {
357 if (!inet->inet_dport)
358 return -ENOTCONN;
359 lsa->l2tp_conn_id = lsk->peer_conn_id;
360 lsa->l2tp_addr.s_addr = inet->inet_daddr;
361 } else {
362 __be32 addr = inet->inet_rcv_saddr;
363 if (!addr)
364 addr = inet->inet_saddr;
365 lsa->l2tp_conn_id = lsk->conn_id;
366 lsa->l2tp_addr.s_addr = addr;
367 }
368 return sizeof(*lsa);
369}
370
371static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
372{
373 int rc;
374
375 /* Charge it to the socket, dropping if the queue is full. */
376 rc = sock_queue_rcv_skb(sk, skb);
377 if (rc < 0)
378 goto drop;
379
380 return 0;
381
382drop:
383 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
384 kfree_skb(skb);
385 return 0;
386}
387
388/* Userspace will call sendmsg() on the tunnel socket to send L2TP
389 * control frames.
390 */
391static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
392{
393 struct sk_buff *skb;
394 int rc;
395 struct inet_sock *inet = inet_sk(sk);
396 struct rtable *rt = NULL;
397 struct flowi4 *fl4;
398 int connected = 0;
399 __be32 daddr;
400
401 lock_sock(sk);
402
403 rc = -ENOTCONN;
404 if (sock_flag(sk, SOCK_DEAD))
405 goto out;
406
407 /* Get and verify the address. */
408 if (msg->msg_name) {
409 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
410 rc = -EINVAL;
411 if (msg->msg_namelen < sizeof(*lip))
412 goto out;
413
414 if (lip->l2tp_family != AF_INET) {
415 rc = -EAFNOSUPPORT;
416 if (lip->l2tp_family != AF_UNSPEC)
417 goto out;
418 }
419
420 daddr = lip->l2tp_addr.s_addr;
421 } else {
422 rc = -EDESTADDRREQ;
423 if (sk->sk_state != TCP_ESTABLISHED)
424 goto out;
425
426 daddr = inet->inet_daddr;
427 connected = 1;
428 }
429
430 /* Allocate a socket buffer */
431 rc = -ENOMEM;
432 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
433 4 + len, 0, GFP_KERNEL);
434 if (!skb)
435 goto error;
436
437 /* Reserve space for headers, putting IP header on 4-byte boundary. */
438 skb_reserve(skb, 2 + NET_SKB_PAD);
439 skb_reset_network_header(skb);
440 skb_reserve(skb, sizeof(struct iphdr));
441 skb_reset_transport_header(skb);
442
443 /* Insert 0 session_id */
444 *((__be32 *) skb_put(skb, 4)) = 0;
445
446 /* Copy user data into skb */
447 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
448 if (rc < 0) {
449 kfree_skb(skb);
450 goto error;
451 }
452
453 fl4 = &inet->cork.fl.u.ip4;
454 if (connected)
455 rt = (struct rtable *) __sk_dst_check(sk, 0);
456
457 rcu_read_lock();
458 if (rt == NULL) {
459 const struct ip_options_rcu *inet_opt;
460
461 inet_opt = rcu_dereference(inet->inet_opt);
462
463 /* Use correct destination address if we have options. */
464 if (inet_opt && inet_opt->opt.srr)
465 daddr = inet_opt->opt.faddr;
466
467 /* If this fails, retransmit mechanism of transport layer will
468 * keep trying until route appears or the connection times
469 * itself out.
470 */
471 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
472 daddr, inet->inet_saddr,
473 inet->inet_dport, inet->inet_sport,
474 sk->sk_protocol, RT_CONN_FLAGS(sk),
475 sk->sk_bound_dev_if);
476 if (IS_ERR(rt))
477 goto no_route;
478 if (connected) {
479 sk_setup_caps(sk, &rt->dst);
480 } else {
481 skb_dst_set(skb, &rt->dst);
482 goto xmit;
483 }
484 }
485
486 /* We dont need to clone dst here, it is guaranteed to not disappear.
487 * __dev_xmit_skb() might force a refcount if needed.
488 */
489 skb_dst_set_noref(skb, &rt->dst);
490
491xmit:
492 /* Queue the packet to IP for output */
493 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
494 rcu_read_unlock();
495
496error:
497 if (rc >= 0)
498 rc = len;
499
500out:
501 release_sock(sk);
502 return rc;
503
504no_route:
505 rcu_read_unlock();
506 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
507 kfree_skb(skb);
508 rc = -EHOSTUNREACH;
509 goto out;
510}
511
512static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
513 size_t len, int noblock, int flags, int *addr_len)
514{
515 struct inet_sock *inet = inet_sk(sk);
516 size_t copied = 0;
517 int err = -EOPNOTSUPP;
518 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
519 struct sk_buff *skb;
520
521 if (flags & MSG_OOB)
522 goto out;
523
524 skb = skb_recv_datagram(sk, flags, noblock, &err);
525 if (!skb)
526 goto out;
527
528 copied = skb->len;
529 if (len < copied) {
530 msg->msg_flags |= MSG_TRUNC;
531 copied = len;
532 }
533
534 err = skb_copy_datagram_msg(skb, 0, msg, copied);
535 if (err)
536 goto done;
537
538 sock_recv_timestamp(msg, sk, skb);
539
540 /* Copy the address. */
541 if (sin) {
542 sin->sin_family = AF_INET;
543 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
544 sin->sin_port = 0;
545 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
546 *addr_len = sizeof(*sin);
547 }
548 if (inet->cmsg_flags)
549 ip_cmsg_recv(msg, skb);
550 if (flags & MSG_TRUNC)
551 copied = skb->len;
552done:
553 skb_free_datagram(sk, skb);
554out:
555 return err ? err : copied;
556}
557
558int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
559{
560 struct sk_buff *skb;
561 int amount;
562
563 switch (cmd) {
564 case SIOCOUTQ:
565 amount = sk_wmem_alloc_get(sk);
566 break;
567 case SIOCINQ:
568 spin_lock_bh(&sk->sk_receive_queue.lock);
569 skb = skb_peek(&sk->sk_receive_queue);
570 amount = skb ? skb->len : 0;
571 spin_unlock_bh(&sk->sk_receive_queue.lock);
572 break;
573
574 default:
575 return -ENOIOCTLCMD;
576 }
577
578 return put_user(amount, (int __user *)arg);
579}
580EXPORT_SYMBOL(l2tp_ioctl);
581
582static struct proto l2tp_ip_prot = {
583 .name = "L2TP/IP",
584 .owner = THIS_MODULE,
585 .init = l2tp_ip_open,
586 .close = l2tp_ip_close,
587 .bind = l2tp_ip_bind,
588 .connect = l2tp_ip_connect,
589 .disconnect = l2tp_ip_disconnect,
590 .ioctl = l2tp_ioctl,
591 .destroy = l2tp_ip_destroy_sock,
592 .setsockopt = ip_setsockopt,
593 .getsockopt = ip_getsockopt,
594 .sendmsg = l2tp_ip_sendmsg,
595 .recvmsg = l2tp_ip_recvmsg,
596 .backlog_rcv = l2tp_ip_backlog_recv,
597 .hash = inet_hash,
598 .unhash = inet_unhash,
599 .obj_size = sizeof(struct l2tp_ip_sock),
600#ifdef CONFIG_COMPAT
601 .compat_setsockopt = compat_ip_setsockopt,
602 .compat_getsockopt = compat_ip_getsockopt,
603#endif
604};
605
606static const struct proto_ops l2tp_ip_ops = {
607 .family = PF_INET,
608 .owner = THIS_MODULE,
609 .release = inet_release,
610 .bind = inet_bind,
611 .connect = inet_dgram_connect,
612 .socketpair = sock_no_socketpair,
613 .accept = sock_no_accept,
614 .getname = l2tp_ip_getname,
615 .poll = datagram_poll,
616 .ioctl = inet_ioctl,
617 .gettstamp = sock_gettstamp,
618 .listen = sock_no_listen,
619 .shutdown = inet_shutdown,
620 .setsockopt = sock_common_setsockopt,
621 .getsockopt = sock_common_getsockopt,
622 .sendmsg = inet_sendmsg,
623 .recvmsg = sock_common_recvmsg,
624 .mmap = sock_no_mmap,
625 .sendpage = sock_no_sendpage,
626#ifdef CONFIG_COMPAT
627 .compat_setsockopt = compat_sock_common_setsockopt,
628 .compat_getsockopt = compat_sock_common_getsockopt,
629#endif
630};
631
632static struct inet_protosw l2tp_ip_protosw = {
633 .type = SOCK_DGRAM,
634 .protocol = IPPROTO_L2TP,
635 .prot = &l2tp_ip_prot,
636 .ops = &l2tp_ip_ops,
637};
638
639static struct net_protocol l2tp_ip_protocol __read_mostly = {
640 .handler = l2tp_ip_recv,
641 .netns_ok = 1,
642};
643
644static int __init l2tp_ip_init(void)
645{
646 int err;
647
648 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
649
650 err = proto_register(&l2tp_ip_prot, 1);
651 if (err != 0)
652 goto out;
653
654 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
655 if (err)
656 goto out1;
657
658 inet_register_protosw(&l2tp_ip_protosw);
659 return 0;
660
661out1:
662 proto_unregister(&l2tp_ip_prot);
663out:
664 return err;
665}
666
667static void __exit l2tp_ip_exit(void)
668{
669 inet_unregister_protosw(&l2tp_ip_protosw);
670 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
671 proto_unregister(&l2tp_ip_prot);
672}
673
674module_init(l2tp_ip_init);
675module_exit(l2tp_ip_exit);
676
677MODULE_LICENSE("GPL");
678MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
679MODULE_DESCRIPTION("L2TP over IP");
680MODULE_VERSION("1.0");
681
682/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
683 * enums
684 */
685MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
686MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);