Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* L2TP core.
   3 *
   4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
   5 *
   6 * This file contains some code of the original L2TPv2 pppol2tp
   7 * driver, which has the following copyright:
   8 *
   9 * Authors:	Martijn van Oosterhout <kleptog@svana.org>
  10 *		James Chapman (jchapman@katalix.com)
  11 * Contributors:
  12 *		Michal Ostrowski <mostrows@speakeasy.net>
  13 *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
  14 *		David S. Miller (davem@redhat.com)
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/module.h>
  20#include <linux/string.h>
  21#include <linux/list.h>
  22#include <linux/rculist.h>
  23#include <linux/uaccess.h>
  24
  25#include <linux/kernel.h>
  26#include <linux/spinlock.h>
  27#include <linux/kthread.h>
  28#include <linux/sched.h>
  29#include <linux/slab.h>
  30#include <linux/errno.h>
  31#include <linux/jiffies.h>
  32
  33#include <linux/netdevice.h>
  34#include <linux/net.h>
  35#include <linux/inetdevice.h>
  36#include <linux/skbuff.h>
  37#include <linux/init.h>
  38#include <linux/in.h>
  39#include <linux/ip.h>
  40#include <linux/udp.h>
  41#include <linux/l2tp.h>
 
  42#include <linux/sort.h>
  43#include <linux/file.h>
  44#include <linux/nsproxy.h>
  45#include <net/net_namespace.h>
  46#include <net/netns/generic.h>
  47#include <net/dst.h>
  48#include <net/ip.h>
  49#include <net/udp.h>
  50#include <net/udp_tunnel.h>
  51#include <net/inet_common.h>
  52#include <net/xfrm.h>
  53#include <net/protocol.h>
  54#include <net/inet6_connection_sock.h>
  55#include <net/inet_ecn.h>
  56#include <net/ip6_route.h>
  57#include <net/ip6_checksum.h>
  58
  59#include <asm/byteorder.h>
  60#include <linux/atomic.h>
  61
  62#include "l2tp_core.h"
 
  63
  64#define CREATE_TRACE_POINTS
  65#include "trace.h"
  66
  67#define L2TP_DRV_VERSION	"V2.0"
  68
  69/* L2TP header constants */
  70#define L2TP_HDRFLAG_T	   0x8000
  71#define L2TP_HDRFLAG_L	   0x4000
  72#define L2TP_HDRFLAG_S	   0x0800
  73#define L2TP_HDRFLAG_O	   0x0200
  74#define L2TP_HDRFLAG_P	   0x0100
  75
  76#define L2TP_HDR_VER_MASK  0x000F
  77#define L2TP_HDR_VER_2	   0x0002
  78#define L2TP_HDR_VER_3	   0x0003
  79
  80/* L2TPv3 default L2-specific sublayer */
  81#define L2TP_SLFLAG_S	   0x40000000
  82#define L2TP_SL_SEQ_MASK   0x00ffffff
  83
  84#define L2TP_HDR_SIZE_MAX		14
  85
  86/* Default trace flags */
  87#define L2TP_DEFAULT_DEBUG_FLAGS	0
  88
  89#define L2TP_DEPTH_NESTING		2
  90#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
  91#error "L2TP requires its own lockdep subclass"
  92#endif
  93
  94/* Private data stored for received packets in the skb.
  95 */
  96struct l2tp_skb_cb {
  97	u32			ns;
  98	u16			has_seq;
  99	u16			length;
 100	unsigned long		expires;
 101};
 102
 103#define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
 104
 105static struct workqueue_struct *l2tp_wq;
 106
 107/* per-net private data for this module */
 108static unsigned int l2tp_net_id;
 109struct l2tp_net {
 110	/* Lock for write access to l2tp_tunnel_idr */
 111	spinlock_t l2tp_tunnel_idr_lock;
 112	struct idr l2tp_tunnel_idr;
 113	/* Lock for write access to l2tp_v[23]_session_idr/htable */
 114	spinlock_t l2tp_session_idr_lock;
 115	struct idr l2tp_v2_session_idr;
 116	struct idr l2tp_v3_session_idr;
 117	struct hlist_head l2tp_v3_session_htable[16];
 118};
 119
 120static u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
 121{
 122	return ((u32)tunnel_id) << 16 | session_id;
 123}
 124
 125static unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
 126{
 127	return ((unsigned long)sk) + session_id;
 128}
 129
 130#if IS_ENABLED(CONFIG_IPV6)
 131static bool l2tp_sk_is_v6(struct sock *sk)
 132{
 133	return sk->sk_family == PF_INET6 &&
 134	       !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
 135}
 136#endif
 137
 138static struct l2tp_net *l2tp_pernet(const struct net *net)
 139{
 140	return net_generic(net, l2tp_net_id);
 141}
 142
 143static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
 
 
 
 
 
 
 144{
 145	struct sock *sk = tunnel->sock;
 146
 147	trace_free_tunnel(tunnel);
 148
 149	if (sk) {
 150		/* Disable udp encapsulation */
 151		switch (tunnel->encap) {
 152		case L2TP_ENCAPTYPE_UDP:
 153			/* No longer an encapsulation socket. See net/ipv4/udp.c */
 154			WRITE_ONCE(udp_sk(sk)->encap_type, 0);
 155			udp_sk(sk)->encap_rcv = NULL;
 156			udp_sk(sk)->encap_destroy = NULL;
 157			break;
 158		case L2TP_ENCAPTYPE_IP:
 159			break;
 160		}
 161
 162		tunnel->sock = NULL;
 163		sock_put(sk);
 164	}
 
 
 
 
 
 
 
 
 165
 166	kfree_rcu(tunnel, rcu);
 
 
 
 
 167}
 168
 169static void l2tp_session_free(struct l2tp_session *session)
 170{
 171	trace_free_session(session);
 172	if (session->tunnel)
 173		l2tp_tunnel_put(session->tunnel);
 174	kfree_rcu(session, rcu);
 175}
 176
 177struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk)
 178{
 179	const struct net *net = sock_net(sk);
 180	unsigned long tunnel_id, tmp;
 181	struct l2tp_tunnel *tunnel;
 182	struct l2tp_net *pn;
 183
 184	rcu_read_lock_bh();
 185	pn = l2tp_pernet(net);
 186	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
 187		if (tunnel &&
 188		    tunnel->sock == sk &&
 189		    refcount_inc_not_zero(&tunnel->ref_count)) {
 190			rcu_read_unlock_bh();
 191			return tunnel;
 192		}
 193	}
 194	rcu_read_unlock_bh();
 195
 196	return NULL;
 197}
 198EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
 199
 200void l2tp_tunnel_put(struct l2tp_tunnel *tunnel)
 
 
 
 
 
 
 201{
 202	if (refcount_dec_and_test(&tunnel->ref_count))
 203		l2tp_tunnel_free(tunnel);
 204}
 205EXPORT_SYMBOL_GPL(l2tp_tunnel_put);
 206
 207void l2tp_session_put(struct l2tp_session *session)
 
 
 
 
 
 
 208{
 209	if (refcount_dec_and_test(&session->ref_count))
 210		l2tp_session_free(session);
 211}
 212EXPORT_SYMBOL_GPL(l2tp_session_put);
 213
 214/* Lookup a tunnel. A new reference is held on the returned tunnel. */
 215struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
 216{
 217	const struct l2tp_net *pn = l2tp_pernet(net);
 218	struct l2tp_tunnel *tunnel;
 219
 220	rcu_read_lock_bh();
 221	tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
 222	if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
 223		rcu_read_unlock_bh();
 224		return tunnel;
 225	}
 226	rcu_read_unlock_bh();
 227
 228	return NULL;
 229}
 230EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
 231
 232struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key)
 233{
 234	struct l2tp_net *pn = l2tp_pernet(net);
 235	struct l2tp_tunnel *tunnel = NULL;
 
 
 236
 237	rcu_read_lock_bh();
 238again:
 239	tunnel = idr_get_next_ul(&pn->l2tp_tunnel_idr, key);
 240	if (tunnel) {
 241		if (refcount_inc_not_zero(&tunnel->ref_count)) {
 242			rcu_read_unlock_bh();
 243			return tunnel;
 244		}
 245		(*key)++;
 246		goto again;
 247	}
 248	rcu_read_unlock_bh();
 249
 250	return NULL;
 251}
 252EXPORT_SYMBOL_GPL(l2tp_tunnel_get_next);
 253
 254struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
 
 255{
 256	const struct l2tp_net *pn = l2tp_pernet(net);
 257	struct l2tp_session *session;
 258
 259	rcu_read_lock_bh();
 260	session = idr_find(&pn->l2tp_v3_session_idr, session_id);
 261	if (session && !hash_hashed(&session->hlist) &&
 262	    refcount_inc_not_zero(&session->ref_count)) {
 263		rcu_read_unlock_bh();
 264		return session;
 265	}
 266
 267	/* If we get here and session is non-NULL, the session_id
 268	 * collides with one in another tunnel. If sk is non-NULL,
 269	 * find the session matching sk.
 270	 */
 271	if (session && sk) {
 272		unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
 273
 274		hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
 275					   hlist, key) {
 276			/* session->tunnel may be NULL if another thread is in
 277			 * l2tp_session_register and has added an item to
 278			 * l2tp_v3_session_htable but hasn't yet added the
 279			 * session to its tunnel's session_list.
 280			 */
 281			struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
 282
 283			if (session->session_id == session_id &&
 284			    tunnel && tunnel->sock == sk &&
 285			    refcount_inc_not_zero(&session->ref_count)) {
 286				rcu_read_unlock_bh();
 287				return session;
 288			}
 289		}
 290	}
 291	rcu_read_unlock_bh();
 292
 293	return NULL;
 294}
 295EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
 296
 297struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
 298{
 299	u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
 300	const struct l2tp_net *pn = l2tp_pernet(net);
 301	struct l2tp_session *session;
 302
 303	rcu_read_lock_bh();
 304	session = idr_find(&pn->l2tp_v2_session_idr, session_key);
 305	if (session && refcount_inc_not_zero(&session->ref_count)) {
 306		rcu_read_unlock_bh();
 307		return session;
 308	}
 309	rcu_read_unlock_bh();
 310
 311	return NULL;
 312}
 313EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
 314
 315struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
 316				      u32 tunnel_id, u32 session_id)
 317{
 318	if (pver == L2TP_HDR_VER_2)
 319		return l2tp_v2_session_get(net, tunnel_id, session_id);
 320	else
 321		return l2tp_v3_session_get(net, sk, session_id);
 322}
 323EXPORT_SYMBOL_GPL(l2tp_session_get);
 324
 325static struct l2tp_session *l2tp_v2_session_get_next(const struct net *net,
 326						     u16 tid,
 327						     unsigned long *key)
 328{
 329	struct l2tp_net *pn = l2tp_pernet(net);
 330	struct l2tp_session *session = NULL;
 331
 332	/* Start searching within the range of the tid */
 333	if (*key == 0)
 334		*key = l2tp_v2_session_key(tid, 0);
 335
 336	rcu_read_lock_bh();
 337again:
 338	session = idr_get_next_ul(&pn->l2tp_v2_session_idr, key);
 339	if (session) {
 340		struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
 341
 342		/* ignore sessions with id 0 as they are internal for pppol2tp */
 343		if (session->session_id == 0) {
 344			(*key)++;
 345			goto again;
 346		}
 347
 348		if (tunnel->tunnel_id == tid &&
 349		    refcount_inc_not_zero(&session->ref_count)) {
 350			rcu_read_unlock_bh();
 
 351			return session;
 352		}
 353
 354		(*key)++;
 355		if (tunnel->tunnel_id == tid)
 356			goto again;
 357	}
 358	rcu_read_unlock_bh();
 359
 360	return NULL;
 361}
 
 362
 363static struct l2tp_session *l2tp_v3_session_get_next(const struct net *net,
 364						     u32 tid, struct sock *sk,
 365						     unsigned long *key)
 366{
 367	struct l2tp_net *pn = l2tp_pernet(net);
 368	struct l2tp_session *session = NULL;
 
 369
 370	rcu_read_lock_bh();
 371again:
 372	session = idr_get_next_ul(&pn->l2tp_v3_session_idr, key);
 373	if (session && !hash_hashed(&session->hlist)) {
 374		struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
 375
 376		if (tunnel && tunnel->tunnel_id == tid &&
 377		    refcount_inc_not_zero(&session->ref_count)) {
 378			rcu_read_unlock_bh();
 379			return session;
 380		}
 381
 382		(*key)++;
 383		goto again;
 384	}
 385
 386	/* If we get here and session is non-NULL, the IDR entry may be one
 387	 * where the session_id collides with one in another tunnel. Check
 388	 * session_htable for a match. There can only be one session of a given
 389	 * ID per tunnel so we can return as soon as a match is found.
 390	 */
 391	if (session && hash_hashed(&session->hlist)) {
 392		unsigned long hkey = l2tp_v3_session_hashkey(sk, session->session_id);
 393		u32 sid = session->session_id;
 394
 395		hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
 396					   hlist, hkey) {
 397			struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
 398
 399			if (session->session_id == sid &&
 400			    tunnel && tunnel->tunnel_id == tid &&
 401			    refcount_inc_not_zero(&session->ref_count)) {
 402				rcu_read_unlock_bh();
 403				return session;
 404			}
 405		}
 406
 407		/* If no match found, the colliding session ID isn't in our
 408		 * tunnel so try the next session ID.
 409		 */
 410		(*key)++;
 411		goto again;
 412	}
 413
 414	rcu_read_unlock_bh();
 415
 416	return NULL;
 417}
 418
 419struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
 420					   u32 tunnel_id, unsigned long *key)
 421{
 422	if (pver == L2TP_HDR_VER_2)
 423		return l2tp_v2_session_get_next(net, tunnel_id, key);
 424	else
 425		return l2tp_v3_session_get_next(net, tunnel_id, sk, key);
 426}
 427EXPORT_SYMBOL_GPL(l2tp_session_get_next);
 428
 429/* Lookup a session by interface name.
 430 * This is very inefficient but is only used by management interfaces.
 431 */
 432struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
 433						const char *ifname)
 434{
 435	struct l2tp_net *pn = l2tp_pernet(net);
 436	unsigned long tunnel_id, tmp;
 437	struct l2tp_session *session;
 438	struct l2tp_tunnel *tunnel;
 439
 440	rcu_read_lock_bh();
 441	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
 442		if (tunnel) {
 443			list_for_each_entry_rcu(session, &tunnel->session_list, list) {
 444				if (!strcmp(session->ifname, ifname)) {
 445					refcount_inc(&session->ref_count);
 446					rcu_read_unlock_bh();
 447
 448					return session;
 449				}
 450			}
 451		}
 452	}
 
 453	rcu_read_unlock_bh();
 454
 455	return NULL;
 456}
 457EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
 458
 459static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
 460				       struct l2tp_session *session)
 461{
 462	refcount_inc(&session->ref_count);
 463	WARN_ON_ONCE(session->coll_list);
 464	session->coll_list = clist;
 465	spin_lock(&clist->lock);
 466	list_add(&session->clist, &clist->list);
 467	spin_unlock(&clist->lock);
 468}
 469
 470static int l2tp_session_collision_add(struct l2tp_net *pn,
 471				      struct l2tp_session *session1,
 472				      struct l2tp_session *session2)
 473{
 474	struct l2tp_session_coll_list *clist;
 475
 476	lockdep_assert_held(&pn->l2tp_session_idr_lock);
 477
 478	if (!session2)
 479		return -EEXIST;
 480
 481	/* If existing session is in IP-encap tunnel, refuse new session */
 482	if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
 483		return -EEXIST;
 484
 485	clist = session2->coll_list;
 486	if (!clist) {
 487		/* First collision. Allocate list to manage the collided sessions
 488		 * and add the existing session to the list.
 489		 */
 490		clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
 491		if (!clist)
 492			return -ENOMEM;
 493
 494		spin_lock_init(&clist->lock);
 495		INIT_LIST_HEAD(&clist->list);
 496		refcount_set(&clist->ref_count, 1);
 497		l2tp_session_coll_list_add(clist, session2);
 498	}
 499
 500	/* If existing session isn't already in the session hlist, add it. */
 501	if (!hash_hashed(&session2->hlist))
 502		hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
 503			     session2->hlist_key);
 504
 505	/* Add new session to the hlist and collision list */
 506	hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
 507		     session1->hlist_key);
 508	refcount_inc(&clist->ref_count);
 509	l2tp_session_coll_list_add(clist, session1);
 510
 511	return 0;
 512}
 513
 514static void l2tp_session_collision_del(struct l2tp_net *pn,
 515				       struct l2tp_session *session)
 516{
 517	struct l2tp_session_coll_list *clist = session->coll_list;
 518	unsigned long session_key = session->session_id;
 519	struct l2tp_session *session2;
 520
 521	lockdep_assert_held(&pn->l2tp_session_idr_lock);
 522
 523	hash_del_rcu(&session->hlist);
 524
 525	if (clist) {
 526		/* Remove session from its collision list. If there
 527		 * are other sessions with the same ID, replace this
 528		 * session's IDR entry with that session, otherwise
 529		 * remove the IDR entry. If this is the last session,
 530		 * the collision list data is freed.
 531		 */
 532		spin_lock(&clist->lock);
 533		list_del_init(&session->clist);
 534		session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
 535		if (session2) {
 536			void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
 537
 538			WARN_ON_ONCE(IS_ERR_VALUE(old));
 539		} else {
 540			void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
 541
 542			WARN_ON_ONCE(removed != session);
 543		}
 544		session->coll_list = NULL;
 545		spin_unlock(&clist->lock);
 546		if (refcount_dec_and_test(&clist->ref_count))
 547			kfree(clist);
 548		l2tp_session_put(session);
 549	}
 550}
 551
 552int l2tp_session_register(struct l2tp_session *session,
 553			  struct l2tp_tunnel *tunnel)
 554{
 555	struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
 556	struct l2tp_session *other_session = NULL;
 557	void *old = NULL;
 558	u32 session_key;
 559	int err;
 560
 561	spin_lock_bh(&tunnel->list_lock);
 562	spin_lock_bh(&pn->l2tp_session_idr_lock);
 563
 
 564	if (!tunnel->acpt_newsess) {
 565		err = -ENODEV;
 566		goto out;
 567	}
 568
 
 
 
 
 
 
 569	if (tunnel->version == L2TP_HDR_VER_3) {
 570		session_key = session->session_id;
 571		err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
 572				    &session_key, session_key, GFP_ATOMIC);
 
 
 573		/* IP encap expects session IDs to be globally unique, while
 574		 * UDP encap doesn't. This isn't per the RFC, which says that
 575		 * sessions are identified only by the session ID, but is to
 576		 * support existing userspace which depends on it.
 577		 */
 578		if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
 579			other_session = idr_find(&pn->l2tp_v3_session_idr,
 580						 session_key);
 581			err = l2tp_session_collision_add(pn, session,
 582							 other_session);
 583		}
 584	} else {
 585		session_key = l2tp_v2_session_key(tunnel->tunnel_id,
 586						  session->session_id);
 587		err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
 588				    &session_key, session_key, GFP_ATOMIC);
 589	}
 590
 591	if (err) {
 592		if (err == -ENOSPC)
 593			err = -EEXIST;
 594		goto out;
 595	}
 596
 597	refcount_inc(&tunnel->ref_count);
 598	WRITE_ONCE(session->tunnel, tunnel);
 599	list_add_rcu(&session->list, &tunnel->session_list);
 600
 601	/* this makes session available to lockless getters */
 602	if (tunnel->version == L2TP_HDR_VER_3) {
 603		if (!other_session)
 604			old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
 605	} else {
 606		old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
 607	}
 608
 609	/* old should be NULL, unless something removed or modified
 610	 * the IDR entry after our idr_alloc_32 above (which shouldn't
 611	 * happen).
 612	 */
 613	WARN_ON_ONCE(old);
 614out:
 615	spin_unlock_bh(&pn->l2tp_session_idr_lock);
 616	spin_unlock_bh(&tunnel->list_lock);
 617
 618	if (!err)
 619		trace_register_session(session);
 
 
 
 
 620
 621	return err;
 622}
 623EXPORT_SYMBOL_GPL(l2tp_session_register);
 624
 625/*****************************************************************************
 626 * Receive data handling
 627 *****************************************************************************/
 628
 629/* Queue a skb in order. We come here only if the skb has an L2TP sequence
 630 * number.
 631 */
 632static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
 633{
 634	struct sk_buff *skbp;
 635	struct sk_buff *tmp;
 636	u32 ns = L2TP_SKB_CB(skb)->ns;
 637
 638	spin_lock_bh(&session->reorder_q.lock);
 639	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
 640		if (L2TP_SKB_CB(skbp)->ns > ns) {
 641			__skb_queue_before(&session->reorder_q, skbp, skb);
 642			atomic_long_inc(&session->stats.rx_oos_packets);
 643			goto out;
 644		}
 645	}
 646
 647	__skb_queue_tail(&session->reorder_q, skb);
 648
 649out:
 650	spin_unlock_bh(&session->reorder_q.lock);
 651}
 652
 653/* Dequeue a single skb.
 654 */
 655static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
 656{
 657	struct l2tp_tunnel *tunnel = session->tunnel;
 658	int length = L2TP_SKB_CB(skb)->length;
 659
 660	/* We're about to requeue the skb, so return resources
 661	 * to its current owner (a socket receive buffer).
 662	 */
 663	skb_orphan(skb);
 664
 665	atomic_long_inc(&tunnel->stats.rx_packets);
 666	atomic_long_add(length, &tunnel->stats.rx_bytes);
 667	atomic_long_inc(&session->stats.rx_packets);
 668	atomic_long_add(length, &session->stats.rx_bytes);
 669
 670	if (L2TP_SKB_CB(skb)->has_seq) {
 671		/* Bump our Nr */
 672		session->nr++;
 673		session->nr &= session->nr_max;
 674		trace_session_seqnum_update(session);
 675	}
 676
 677	/* call private receive handler */
 678	if (session->recv_skb)
 679		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
 680	else
 681		kfree_skb(skb);
 682}
 683
 684/* Dequeue skbs from the session's reorder_q, subject to packet order.
 685 * Skbs that have been in the queue for too long are simply discarded.
 686 */
 687static void l2tp_recv_dequeue(struct l2tp_session *session)
 688{
 689	struct sk_buff *skb;
 690	struct sk_buff *tmp;
 691
 692	/* If the pkt at the head of the queue has the nr that we
 693	 * expect to send up next, dequeue it and any other
 694	 * in-sequence packets behind it.
 695	 */
 696start:
 697	spin_lock_bh(&session->reorder_q.lock);
 698	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
 699		struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
 700
 701		/* If the packet has been pending on the queue for too long, discard it */
 702		if (time_after(jiffies, cb->expires)) {
 703			atomic_long_inc(&session->stats.rx_seq_discards);
 704			atomic_long_inc(&session->stats.rx_errors);
 705			trace_session_pkt_expired(session, cb->ns);
 706			session->reorder_skip = 1;
 707			__skb_unlink(skb, &session->reorder_q);
 708			kfree_skb(skb);
 709			continue;
 710		}
 711
 712		if (cb->has_seq) {
 713			if (session->reorder_skip) {
 714				session->reorder_skip = 0;
 715				session->nr = cb->ns;
 716				trace_session_seqnum_reset(session);
 717			}
 718			if (cb->ns != session->nr)
 719				goto out;
 720		}
 721		__skb_unlink(skb, &session->reorder_q);
 722
 723		/* Process the skb. We release the queue lock while we
 724		 * do so to let other contexts process the queue.
 725		 */
 726		spin_unlock_bh(&session->reorder_q.lock);
 727		l2tp_recv_dequeue_skb(session, skb);
 728		goto start;
 729	}
 730
 731out:
 732	spin_unlock_bh(&session->reorder_q.lock);
 733}
 734
 735static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
 736{
 737	u32 nws;
 738
 739	if (nr >= session->nr)
 740		nws = nr - session->nr;
 741	else
 742		nws = (session->nr_max + 1) - (session->nr - nr);
 743
 744	return nws < session->nr_window_size;
 745}
 746
 747/* If packet has sequence numbers, queue it if acceptable. Returns 0 if
 748 * acceptable, else non-zero.
 749 */
 750static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
 751{
 752	struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
 753
 754	if (!l2tp_seq_check_rx_window(session, cb->ns)) {
 755		/* Packet sequence number is outside allowed window.
 756		 * Discard it.
 757		 */
 758		trace_session_pkt_outside_rx_window(session, cb->ns);
 759		goto discard;
 760	}
 761
 762	if (session->reorder_timeout != 0) {
 763		/* Packet reordering enabled. Add skb to session's
 764		 * reorder queue, in order of ns.
 765		 */
 766		l2tp_recv_queue_skb(session, skb);
 767		goto out;
 768	}
 769
 770	/* Packet reordering disabled. Discard out-of-sequence packets, while
 771	 * tracking the number if in-sequence packets after the first OOS packet
 772	 * is seen. After nr_oos_count_max in-sequence packets, reset the
 773	 * sequence number to re-enable packet reception.
 774	 */
 775	if (cb->ns == session->nr) {
 776		skb_queue_tail(&session->reorder_q, skb);
 777	} else {
 778		u32 nr_oos = cb->ns;
 779		u32 nr_next = (session->nr_oos + 1) & session->nr_max;
 780
 781		if (nr_oos == nr_next)
 782			session->nr_oos_count++;
 783		else
 784			session->nr_oos_count = 0;
 785
 786		session->nr_oos = nr_oos;
 787		if (session->nr_oos_count > session->nr_oos_count_max) {
 788			session->reorder_skip = 1;
 789		}
 790		if (!session->reorder_skip) {
 791			atomic_long_inc(&session->stats.rx_seq_discards);
 792			trace_session_pkt_oos(session, cb->ns);
 793			goto discard;
 794		}
 795		skb_queue_tail(&session->reorder_q, skb);
 796	}
 797
 798out:
 799	return 0;
 800
 801discard:
 802	return 1;
 803}
 804
 805/* Do receive processing of L2TP data frames. We handle both L2TPv2
 806 * and L2TPv3 data frames here.
 807 *
 808 * L2TPv2 Data Message Header
 809 *
 810 *  0                   1                   2                   3
 811 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 812 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 813 * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
 814 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 815 * |           Tunnel ID           |           Session ID          |
 816 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 817 * |             Ns (opt)          |             Nr (opt)          |
 818 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 819 * |      Offset Size (opt)        |    Offset pad... (opt)
 820 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 821 *
 822 * Data frames are marked by T=0. All other fields are the same as
 823 * those in L2TP control frames.
 824 *
 825 * L2TPv3 Data Message Header
 826 *
 827 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 828 * |                      L2TP Session Header                      |
 829 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 830 * |                      L2-Specific Sublayer                     |
 831 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 832 * |                        Tunnel Payload                      ...
 833 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 834 *
 835 * L2TPv3 Session Header Over IP
 836 *
 837 *  0                   1                   2                   3
 838 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 839 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 840 * |                           Session ID                          |
 841 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 842 * |               Cookie (optional, maximum 64 bits)...
 843 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 844 *                                                                 |
 845 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 846 *
 847 * L2TPv3 L2-Specific Sublayer Format
 848 *
 849 *  0                   1                   2                   3
 850 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 851 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 852 * |x|S|x|x|x|x|x|x|              Sequence Number                  |
 853 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 854 *
 855 * Cookie value and sublayer format are negotiated with the peer when
 856 * the session is set up. Unlike L2TPv2, we do not need to parse the
 857 * packet header to determine if optional fields are present.
 858 *
 859 * Caller must already have parsed the frame and determined that it is
 860 * a data (not control) frame before coming here. Fields up to the
 861 * session-id have already been parsed and ptr points to the data
 862 * after the session-id.
 863 */
 864void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
 865		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
 866		      int length)
 867{
 868	struct l2tp_tunnel *tunnel = session->tunnel;
 869	int offset;
 870
 871	/* Parse and check optional cookie */
 872	if (session->peer_cookie_len > 0) {
 873		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
 874			pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
 875					     tunnel->name, tunnel->tunnel_id,
 876					     session->session_id);
 877			atomic_long_inc(&session->stats.rx_cookie_discards);
 878			goto discard;
 879		}
 880		ptr += session->peer_cookie_len;
 881	}
 882
 883	/* Handle the optional sequence numbers. Sequence numbers are
 884	 * in different places for L2TPv2 and L2TPv3.
 885	 *
 886	 * If we are the LAC, enable/disable sequence numbers under
 887	 * the control of the LNS.  If no sequence numbers present but
 888	 * we were expecting them, discard frame.
 889	 */
 890	L2TP_SKB_CB(skb)->has_seq = 0;
 891	if (tunnel->version == L2TP_HDR_VER_2) {
 892		if (hdrflags & L2TP_HDRFLAG_S) {
 893			/* Store L2TP info in the skb */
 894			L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
 895			L2TP_SKB_CB(skb)->has_seq = 1;
 896			ptr += 2;
 897			/* Skip past nr in the header */
 898			ptr += 2;
 899
 900		}
 901	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
 902		u32 l2h = ntohl(*(__be32 *)ptr);
 903
 904		if (l2h & 0x40000000) {
 905			/* Store L2TP info in the skb */
 906			L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
 907			L2TP_SKB_CB(skb)->has_seq = 1;
 908		}
 909		ptr += 4;
 910	}
 911
 912	if (L2TP_SKB_CB(skb)->has_seq) {
 913		/* Received a packet with sequence numbers. If we're the LAC,
 914		 * check if we sre sending sequence numbers and if not,
 915		 * configure it so.
 916		 */
 917		if (!session->lns_mode && !session->send_seq) {
 918			trace_session_seqnum_lns_enable(session);
 919			session->send_seq = 1;
 920			l2tp_session_set_header_len(session, tunnel->version,
 921						    tunnel->encap);
 922		}
 923	} else {
 924		/* No sequence numbers.
 925		 * If user has configured mandatory sequence numbers, discard.
 926		 */
 927		if (session->recv_seq) {
 928			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
 929					     session->name);
 930			atomic_long_inc(&session->stats.rx_seq_discards);
 931			goto discard;
 932		}
 933
 934		/* If we're the LAC and we're sending sequence numbers, the
 935		 * LNS has requested that we no longer send sequence numbers.
 936		 * If we're the LNS and we're sending sequence numbers, the
 937		 * LAC is broken. Discard the frame.
 938		 */
 939		if (!session->lns_mode && session->send_seq) {
 940			trace_session_seqnum_lns_disable(session);
 941			session->send_seq = 0;
 942			l2tp_session_set_header_len(session, tunnel->version,
 943						    tunnel->encap);
 944		} else if (session->send_seq) {
 945			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
 946					     session->name);
 947			atomic_long_inc(&session->stats.rx_seq_discards);
 948			goto discard;
 949		}
 950	}
 951
 952	/* Session data offset is defined only for L2TPv2 and is
 953	 * indicated by an optional 16-bit value in the header.
 954	 */
 955	if (tunnel->version == L2TP_HDR_VER_2) {
 956		/* If offset bit set, skip it. */
 957		if (hdrflags & L2TP_HDRFLAG_O) {
 958			offset = ntohs(*(__be16 *)ptr);
 959			ptr += 2 + offset;
 960		}
 961	}
 962
 963	offset = ptr - optr;
 964	if (!pskb_may_pull(skb, offset))
 965		goto discard;
 966
 967	__skb_pull(skb, offset);
 968
 969	/* Prepare skb for adding to the session's reorder_q.  Hold
 970	 * packets for max reorder_timeout or 1 second if not
 971	 * reordering.
 972	 */
 973	L2TP_SKB_CB(skb)->length = length;
 974	L2TP_SKB_CB(skb)->expires = jiffies +
 975		(session->reorder_timeout ? session->reorder_timeout : HZ);
 976
 977	/* Add packet to the session's receive queue. Reordering is done here, if
 978	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
 979	 */
 980	if (L2TP_SKB_CB(skb)->has_seq) {
 981		if (l2tp_recv_data_seq(session, skb))
 982			goto discard;
 983	} else {
 984		/* No sequence numbers. Add the skb to the tail of the
 985		 * reorder queue. This ensures that it will be
 986		 * delivered after all previous sequenced skbs.
 987		 */
 988		skb_queue_tail(&session->reorder_q, skb);
 989	}
 990
 991	/* Try to dequeue as many skbs from reorder_q as we can. */
 992	l2tp_recv_dequeue(session);
 993
 994	return;
 995
 996discard:
 997	atomic_long_inc(&session->stats.rx_errors);
 998	kfree_skb(skb);
 999}
1000EXPORT_SYMBOL_GPL(l2tp_recv_common);
1001
1002/* Drop skbs from the session's reorder_q
1003 */
1004static void l2tp_session_queue_purge(struct l2tp_session *session)
1005{
1006	struct sk_buff *skb = NULL;
1007
1008	while ((skb = skb_dequeue(&session->reorder_q))) {
1009		atomic_long_inc(&session->stats.rx_errors);
1010		kfree_skb(skb);
1011	}
1012}
1013
1014/* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
1015int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
1016{
1017	struct l2tp_session *session = NULL;
1018	struct l2tp_tunnel *tunnel = NULL;
1019	struct net *net = sock_net(sk);
1020	unsigned char *ptr, *optr;
1021	u16 hdrflags;
 
1022	u16 version;
1023	int length;
1024
1025	/* UDP has verified checksum */
1026
1027	/* UDP always verifies the packet length. */
1028	__skb_pull(skb, sizeof(struct udphdr));
1029
1030	/* Short packet? */
1031	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
1032		goto pass;
 
 
 
1033
1034	/* Point to L2TP header */
1035	optr = skb->data;
1036	ptr = skb->data;
1037
1038	/* Get L2TP header flags */
1039	hdrflags = ntohs(*(__be16 *)ptr);
1040
1041	/* Get protocol version */
1042	version = hdrflags & L2TP_HDR_VER_MASK;
 
 
 
 
 
1043
1044	/* Get length of L2TP packet */
1045	length = skb->len;
1046
1047	/* If type is control packet, it is handled by userspace. */
1048	if (hdrflags & L2TP_HDRFLAG_T)
1049		goto pass;
1050
1051	/* Skip flags */
1052	ptr += 2;
1053
1054	if (version == L2TP_HDR_VER_2) {
1055		u16 tunnel_id, session_id;
1056
1057		/* If length is present, skip it */
1058		if (hdrflags & L2TP_HDRFLAG_L)
1059			ptr += 2;
1060
1061		/* Extract tunnel and session ID */
1062		tunnel_id = ntohs(*(__be16 *)ptr);
1063		ptr += 2;
1064		session_id = ntohs(*(__be16 *)ptr);
1065		ptr += 2;
1066
1067		session = l2tp_v2_session_get(net, tunnel_id, session_id);
1068	} else {
1069		u32 session_id;
1070
1071		ptr += 2;	/* skip reserved bits */
 
1072		session_id = ntohl(*(__be32 *)ptr);
1073		ptr += 4;
1074
1075		session = l2tp_v3_session_get(net, sk, session_id);
1076	}
1077
 
 
1078	if (!session || !session->recv_skb) {
1079		if (session)
1080			l2tp_session_put(session);
1081
1082		/* Not found? Pass to userspace to deal with */
 
 
1083		goto pass;
1084	}
1085
1086	tunnel = session->tunnel;
1087
1088	/* Check protocol version */
1089	if (version != tunnel->version)
1090		goto invalid;
1091
1092	if (version == L2TP_HDR_VER_3 &&
1093	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
1094		l2tp_session_put(session);
1095		goto invalid;
1096	}
1097
1098	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
1099	l2tp_session_put(session);
1100
1101	return 0;
1102
1103invalid:
1104	atomic_long_inc(&tunnel->stats.rx_invalid);
1105
1106pass:
1107	/* Put UDP header back */
1108	__skb_push(skb, sizeof(struct udphdr));
1109
1110	return 1;
1111}
1112EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1113
1114/* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
1115static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
1116				    __be16 port, u32 info, u8 *payload)
1117{
1118	sk->sk_err = err;
1119	sk_error_report(sk);
1120
1121	if (ip_hdr(skb)->version == IPVERSION) {
1122		if (inet_test_bit(RECVERR, sk))
1123			return ip_icmp_error(sk, skb, err, port, info, payload);
1124#if IS_ENABLED(CONFIG_IPV6)
1125	} else {
1126		if (inet6_test_bit(RECVERR6, sk))
1127			return ipv6_icmp_error(sk, skb, err, port, info, payload);
1128#endif
1129	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1130}
 
1131
1132/************************************************************************
1133 * Transmit handling
1134 ***********************************************************************/
1135
1136/* Build an L2TP header for the session into the buffer provided.
1137 */
1138static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1139{
1140	struct l2tp_tunnel *tunnel = session->tunnel;
1141	__be16 *bufp = buf;
1142	__be16 *optr = buf;
1143	u16 flags = L2TP_HDR_VER_2;
1144	u32 tunnel_id = tunnel->peer_tunnel_id;
1145	u32 session_id = session->peer_session_id;
1146
1147	if (session->send_seq)
1148		flags |= L2TP_HDRFLAG_S;
1149
1150	/* Setup L2TP header. */
1151	*bufp++ = htons(flags);
1152	*bufp++ = htons(tunnel_id);
1153	*bufp++ = htons(session_id);
1154	if (session->send_seq) {
1155		*bufp++ = htons(session->ns);
1156		*bufp++ = 0;
1157		session->ns++;
1158		session->ns &= 0xffff;
1159		trace_session_seqnum_update(session);
1160	}
1161
1162	return bufp - optr;
1163}
1164
1165static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1166{
1167	struct l2tp_tunnel *tunnel = session->tunnel;
1168	char *bufp = buf;
1169	char *optr = bufp;
1170
1171	/* Setup L2TP header. The header differs slightly for UDP and
1172	 * IP encapsulations. For UDP, there is 4 bytes of flags.
1173	 */
1174	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1175		u16 flags = L2TP_HDR_VER_3;
1176		*((__be16 *)bufp) = htons(flags);
1177		bufp += 2;
1178		*((__be16 *)bufp) = 0;
1179		bufp += 2;
1180	}
1181
1182	*((__be32 *)bufp) = htonl(session->peer_session_id);
1183	bufp += 4;
1184	if (session->cookie_len) {
1185		memcpy(bufp, &session->cookie[0], session->cookie_len);
1186		bufp += session->cookie_len;
1187	}
1188	if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1189		u32 l2h = 0;
1190
1191		if (session->send_seq) {
1192			l2h = 0x40000000 | session->ns;
1193			session->ns++;
1194			session->ns &= 0xffffff;
1195			trace_session_seqnum_update(session);
1196		}
1197
1198		*((__be32 *)bufp) = htonl(l2h);
1199		bufp += 4;
1200	}
1201
1202	return bufp - optr;
1203}
1204
1205/* Queue the packet to IP for output: tunnel socket lock must be held */
1206static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1207{
1208	int err;
1209
1210	skb->ignore_df = 1;
1211	skb_dst_drop(skb);
1212#if IS_ENABLED(CONFIG_IPV6)
1213	if (l2tp_sk_is_v6(tunnel->sock))
1214		err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1215	else
1216#endif
1217		err = ip_queue_xmit(tunnel->sock, skb, fl);
1218
1219	return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1220}
1221
1222static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1223{
1224	struct l2tp_tunnel *tunnel = session->tunnel;
1225	unsigned int data_len = skb->len;
1226	struct sock *sk = tunnel->sock;
1227	int headroom, uhlen, udp_len;
1228	int ret = NET_XMIT_SUCCESS;
1229	struct inet_sock *inet;
1230	struct udphdr *uh;
1231
1232	/* Check that there's enough headroom in the skb to insert IP,
1233	 * UDP and L2TP headers. If not enough, expand it to
1234	 * make room. Adjust truesize.
1235	 */
1236	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1237	headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1238	if (skb_cow_head(skb, headroom)) {
1239		kfree_skb(skb);
1240		return NET_XMIT_DROP;
1241	}
1242
1243	/* Setup L2TP header */
1244	if (tunnel->version == L2TP_HDR_VER_2)
1245		l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1246	else
1247		l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1248
1249	/* Reset skb netfilter state */
1250	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1251	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1252	nf_reset_ct(skb);
1253
1254	/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1255	 * nested socket calls on the same lockdep socket class. This can
1256	 * happen when data from a user socket is routed over l2tp, which uses
1257	 * another userspace socket.
1258	 */
1259	spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1260
1261	if (sock_owned_by_user(sk)) {
1262		kfree_skb(skb);
1263		ret = NET_XMIT_DROP;
1264		goto out_unlock;
1265	}
1266
1267	/* The user-space may change the connection status for the user-space
1268	 * provided socket at run time: we must check it under the socket lock
1269	 */
1270	if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1271		kfree_skb(skb);
1272		ret = NET_XMIT_DROP;
1273		goto out_unlock;
1274	}
1275
1276	/* Report transmitted length before we add encap header, which keeps
1277	 * statistics consistent for both UDP and IP encap tx/rx paths.
1278	 */
1279	*len = skb->len;
1280
1281	inet = inet_sk(sk);
1282	switch (tunnel->encap) {
1283	case L2TP_ENCAPTYPE_UDP:
1284		/* Setup UDP header */
1285		__skb_push(skb, sizeof(*uh));
1286		skb_reset_transport_header(skb);
1287		uh = udp_hdr(skb);
1288		uh->source = inet->inet_sport;
1289		uh->dest = inet->inet_dport;
1290		udp_len = uhlen + session->hdr_len + data_len;
1291		uh->len = htons(udp_len);
1292
1293		/* Calculate UDP checksum if configured to do so */
1294#if IS_ENABLED(CONFIG_IPV6)
1295		if (l2tp_sk_is_v6(sk))
1296			udp6_set_csum(udp_get_no_check6_tx(sk),
1297				      skb, &inet6_sk(sk)->saddr,
1298				      &sk->sk_v6_daddr, udp_len);
1299		else
1300#endif
1301			udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1302				     inet->inet_daddr, udp_len);
1303		break;
1304
1305	case L2TP_ENCAPTYPE_IP:
1306		break;
1307	}
1308
1309	ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1310
1311out_unlock:
1312	spin_unlock(&sk->sk_lock.slock);
1313
1314	return ret;
1315}
1316
1317/* If caller requires the skb to have a ppp header, the header must be
1318 * inserted in the skb data before calling this function.
1319 */
1320int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1321{
1322	unsigned int len = 0;
1323	int ret;
1324
1325	ret = l2tp_xmit_core(session, skb, &len);
1326	if (ret == NET_XMIT_SUCCESS) {
1327		atomic_long_inc(&session->tunnel->stats.tx_packets);
1328		atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1329		atomic_long_inc(&session->stats.tx_packets);
1330		atomic_long_add(len, &session->stats.tx_bytes);
1331	} else {
1332		atomic_long_inc(&session->tunnel->stats.tx_errors);
1333		atomic_long_inc(&session->stats.tx_errors);
1334	}
1335	return ret;
1336}
1337EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1338
1339/*****************************************************************************
1340 * Tinnel and session create/destroy.
1341 *****************************************************************************/
1342
1343/* Remove an l2tp session from l2tp_core's lists. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344static void l2tp_session_unhash(struct l2tp_session *session)
1345{
1346	struct l2tp_tunnel *tunnel = session->tunnel;
1347
 
1348	if (tunnel) {
1349		struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1350		struct l2tp_session *removed = session;
 
 
1351
1352		spin_lock_bh(&tunnel->list_lock);
1353		spin_lock_bh(&pn->l2tp_session_idr_lock);
 
1354
1355		/* Remove from the per-tunnel list */
1356		list_del_init(&session->list);
1357
1358		/* Remove from per-net IDR */
1359		if (tunnel->version == L2TP_HDR_VER_3) {
1360			if (hash_hashed(&session->hlist))
1361				l2tp_session_collision_del(pn, session);
1362			else
1363				removed = idr_remove(&pn->l2tp_v3_session_idr,
1364						     session->session_id);
1365		} else {
1366			u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
1367							      session->session_id);
1368			removed = idr_remove(&pn->l2tp_v2_session_idr,
1369					     session_key);
1370		}
1371		WARN_ON_ONCE(removed && removed != session);
1372
1373		spin_unlock_bh(&pn->l2tp_session_idr_lock);
1374		spin_unlock_bh(&tunnel->list_lock);
1375	}
1376}
1377
1378/* When the tunnel is closed, all the attached sessions need to go too.
1379 */
1380static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1381{
1382	struct l2tp_session *session;
 
1383
1384	spin_lock_bh(&tunnel->list_lock);
1385	tunnel->acpt_newsess = false;
1386	list_for_each_entry(session, &tunnel->session_list, list)
1387		l2tp_session_delete(session);
1388	spin_unlock_bh(&tunnel->list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389}
1390
1391/* Tunnel socket destroy hook for UDP encapsulation */
1392static void l2tp_udp_encap_destroy(struct sock *sk)
1393{
1394	struct l2tp_tunnel *tunnel;
1395
1396	tunnel = l2tp_sk_to_tunnel(sk);
1397	if (tunnel) {
1398		l2tp_tunnel_delete(tunnel);
1399		l2tp_tunnel_put(tunnel);
1400	}
1401}
1402
1403static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1404{
1405	struct l2tp_net *pn = l2tp_pernet(net);
1406
1407	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1408	idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1409	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1410}
1411
1412/* Workqueue tunnel deletion function */
1413static void l2tp_tunnel_del_work(struct work_struct *work)
1414{
1415	struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1416						  del_work);
1417	struct sock *sk = tunnel->sock;
1418	struct socket *sock = sk->sk_socket;
1419
1420	l2tp_tunnel_closeall(tunnel);
1421
1422	/* If the tunnel socket was created within the kernel, use
1423	 * the sk API to release it here.
1424	 */
1425	if (tunnel->fd < 0) {
1426		if (sock) {
1427			kernel_sock_shutdown(sock, SHUT_RDWR);
1428			sock_release(sock);
1429		}
1430	}
1431
1432	l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1433	/* drop initial ref */
1434	l2tp_tunnel_put(tunnel);
1435
1436	/* drop workqueue ref */
1437	l2tp_tunnel_put(tunnel);
1438}
1439
1440/* Create a socket for the tunnel, if one isn't set up by
1441 * userspace. This is used for static tunnels where there is no
1442 * managing L2TP daemon.
1443 *
1444 * Since we don't want these sockets to keep a namespace alive by
1445 * themselves, we drop the socket's namespace refcount after creation.
1446 * These sockets are freed when the namespace exits using the pernet
1447 * exit hook.
1448 */
1449static int l2tp_tunnel_sock_create(struct net *net,
1450				   u32 tunnel_id,
1451				   u32 peer_tunnel_id,
1452				   struct l2tp_tunnel_cfg *cfg,
1453				   struct socket **sockp)
1454{
1455	int err = -EINVAL;
1456	struct socket *sock = NULL;
1457	struct udp_port_cfg udp_conf;
1458
1459	switch (cfg->encap) {
1460	case L2TP_ENCAPTYPE_UDP:
1461		memset(&udp_conf, 0, sizeof(udp_conf));
1462
1463#if IS_ENABLED(CONFIG_IPV6)
1464		if (cfg->local_ip6 && cfg->peer_ip6) {
1465			udp_conf.family = AF_INET6;
1466			memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1467			       sizeof(udp_conf.local_ip6));
1468			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1469			       sizeof(udp_conf.peer_ip6));
1470			udp_conf.use_udp6_tx_checksums =
1471			  !cfg->udp6_zero_tx_checksums;
1472			udp_conf.use_udp6_rx_checksums =
1473			  !cfg->udp6_zero_rx_checksums;
1474		} else
1475#endif
1476		{
1477			udp_conf.family = AF_INET;
1478			udp_conf.local_ip = cfg->local_ip;
1479			udp_conf.peer_ip = cfg->peer_ip;
1480			udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1481		}
1482
1483		udp_conf.local_udp_port = htons(cfg->local_udp_port);
1484		udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1485
1486		err = udp_sock_create(net, &udp_conf, &sock);
1487		if (err < 0)
1488			goto out;
1489
1490		break;
1491
1492	case L2TP_ENCAPTYPE_IP:
1493#if IS_ENABLED(CONFIG_IPV6)
1494		if (cfg->local_ip6 && cfg->peer_ip6) {
1495			struct sockaddr_l2tpip6 ip6_addr = {0};
1496
1497			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1498					       IPPROTO_L2TP, &sock);
1499			if (err < 0)
1500				goto out;
1501
1502			ip6_addr.l2tp_family = AF_INET6;
1503			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1504			       sizeof(ip6_addr.l2tp_addr));
1505			ip6_addr.l2tp_conn_id = tunnel_id;
1506			err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1507					  sizeof(ip6_addr));
1508			if (err < 0)
1509				goto out;
1510
1511			ip6_addr.l2tp_family = AF_INET6;
1512			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1513			       sizeof(ip6_addr.l2tp_addr));
1514			ip6_addr.l2tp_conn_id = peer_tunnel_id;
1515			err = kernel_connect(sock,
1516					     (struct sockaddr *)&ip6_addr,
1517					     sizeof(ip6_addr), 0);
1518			if (err < 0)
1519				goto out;
1520		} else
1521#endif
1522		{
1523			struct sockaddr_l2tpip ip_addr = {0};
1524
1525			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1526					       IPPROTO_L2TP, &sock);
1527			if (err < 0)
1528				goto out;
1529
1530			ip_addr.l2tp_family = AF_INET;
1531			ip_addr.l2tp_addr = cfg->local_ip;
1532			ip_addr.l2tp_conn_id = tunnel_id;
1533			err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1534					  sizeof(ip_addr));
1535			if (err < 0)
1536				goto out;
1537
1538			ip_addr.l2tp_family = AF_INET;
1539			ip_addr.l2tp_addr = cfg->peer_ip;
1540			ip_addr.l2tp_conn_id = peer_tunnel_id;
1541			err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1542					     sizeof(ip_addr), 0);
1543			if (err < 0)
1544				goto out;
1545		}
1546		break;
1547
1548	default:
1549		goto out;
1550	}
1551
1552out:
1553	*sockp = sock;
1554	if (err < 0 && sock) {
1555		kernel_sock_shutdown(sock, SHUT_RDWR);
1556		sock_release(sock);
1557		*sockp = NULL;
1558	}
1559
1560	return err;
1561}
1562
1563int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1564		       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1565{
1566	struct l2tp_tunnel *tunnel = NULL;
1567	int err;
1568	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1569
1570	if (cfg)
1571		encap = cfg->encap;
1572
1573	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1574	if (!tunnel) {
1575		err = -ENOMEM;
1576		goto err;
1577	}
1578
1579	tunnel->version = version;
1580	tunnel->tunnel_id = tunnel_id;
1581	tunnel->peer_tunnel_id = peer_tunnel_id;
1582
 
1583	sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1584	spin_lock_init(&tunnel->list_lock);
1585	tunnel->acpt_newsess = true;
1586	INIT_LIST_HEAD(&tunnel->session_list);
1587
1588	tunnel->encap = encap;
1589
1590	refcount_set(&tunnel->ref_count, 1);
1591	tunnel->fd = fd;
1592
1593	/* Init delete workqueue struct */
1594	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1595
 
 
1596	err = 0;
1597err:
1598	if (tunnelp)
1599		*tunnelp = tunnel;
1600
1601	return err;
1602}
1603EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1604
1605static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1606				enum l2tp_encap_type encap)
1607{
1608	struct l2tp_tunnel *tunnel;
1609
1610	if (!net_eq(sock_net(sk), net))
1611		return -EINVAL;
1612
1613	if (sk->sk_type != SOCK_DGRAM)
1614		return -EPROTONOSUPPORT;
1615
1616	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1617		return -EPROTONOSUPPORT;
1618
1619	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1620	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1621		return -EPROTONOSUPPORT;
1622
1623	if (encap == L2TP_ENCAPTYPE_UDP && sk->sk_user_data)
1624		return -EBUSY;
1625
1626	tunnel = l2tp_sk_to_tunnel(sk);
1627	if (tunnel) {
1628		l2tp_tunnel_put(tunnel);
1629		return -EBUSY;
1630	}
1631
1632	return 0;
1633}
1634
1635int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1636			 struct l2tp_tunnel_cfg *cfg)
1637{
1638	struct l2tp_net *pn = l2tp_pernet(net);
1639	u32 tunnel_id = tunnel->tunnel_id;
1640	struct socket *sock;
1641	struct sock *sk;
1642	int ret;
1643
1644	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1645	ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1646			    GFP_ATOMIC);
1647	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1648	if (ret)
1649		return ret == -ENOSPC ? -EEXIST : ret;
1650
1651	if (tunnel->fd < 0) {
1652		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1653					      tunnel->peer_tunnel_id, cfg,
1654					      &sock);
1655		if (ret < 0)
1656			goto err;
1657	} else {
1658		sock = sockfd_lookup(tunnel->fd, &ret);
1659		if (!sock)
1660			goto err;
1661	}
1662
1663	sk = sock->sk;
1664	lock_sock(sk);
1665	write_lock_bh(&sk->sk_callback_lock);
1666	ret = l2tp_validate_socket(sk, net, tunnel->encap);
1667	if (ret < 0)
1668		goto err_inval_sock;
 
1669	write_unlock_bh(&sk->sk_callback_lock);
1670
1671	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1672		struct udp_tunnel_sock_cfg udp_cfg = {
 
1673			.encap_type = UDP_ENCAP_L2TPINUDP,
1674			.encap_rcv = l2tp_udp_encap_recv,
1675			.encap_err_rcv = l2tp_udp_encap_err_recv,
1676			.encap_destroy = l2tp_udp_encap_destroy,
1677		};
1678
1679		setup_udp_tunnel_sock(net, sock, &udp_cfg);
1680	}
1681
 
 
1682	sk->sk_allocation = GFP_ATOMIC;
1683	release_sock(sk);
1684
1685	sock_hold(sk);
1686	tunnel->sock = sk;
1687	tunnel->l2tp_net = net;
1688
1689	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1690	idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1691	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1692
1693	trace_register_tunnel(tunnel);
1694
1695	if (tunnel->fd >= 0)
1696		sockfd_put(sock);
1697
1698	return 0;
1699
1700err_inval_sock:
1701	write_unlock_bh(&sk->sk_callback_lock);
1702	release_sock(sk);
1703
1704	if (tunnel->fd < 0)
1705		sock_release(sock);
1706	else
1707		sockfd_put(sock);
1708err:
1709	l2tp_tunnel_remove(net, tunnel);
1710	return ret;
1711}
1712EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1713
1714/* This function is used by the netlink TUNNEL_DELETE command.
1715 */
1716void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1717{
1718	if (!test_and_set_bit(0, &tunnel->dead)) {
1719		trace_delete_tunnel(tunnel);
1720		refcount_inc(&tunnel->ref_count);
1721		queue_work(l2tp_wq, &tunnel->del_work);
1722	}
1723}
1724EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1725
1726void l2tp_session_delete(struct l2tp_session *session)
1727{
1728	if (!test_and_set_bit(0, &session->dead)) {
1729		trace_delete_session(session);
1730		refcount_inc(&session->ref_count);
1731		queue_work(l2tp_wq, &session->del_work);
1732	}
1733}
1734EXPORT_SYMBOL_GPL(l2tp_session_delete);
1735
1736/* Workqueue session deletion function */
1737static void l2tp_session_del_work(struct work_struct *work)
1738{
1739	struct l2tp_session *session = container_of(work, struct l2tp_session,
1740						    del_work);
1741
 
1742	l2tp_session_unhash(session);
1743	l2tp_session_queue_purge(session);
1744	if (session->session_close)
1745		(*session->session_close)(session);
1746
1747	/* drop initial ref */
1748	l2tp_session_put(session);
1749
1750	/* drop workqueue ref */
1751	l2tp_session_put(session);
1752}
 
1753
1754/* We come here whenever a session's send_seq, cookie_len or
1755 * l2specific_type parameters are set.
1756 */
1757void l2tp_session_set_header_len(struct l2tp_session *session, int version,
1758				 enum l2tp_encap_type encap)
1759{
1760	if (version == L2TP_HDR_VER_2) {
1761		session->hdr_len = 6;
1762		if (session->send_seq)
1763			session->hdr_len += 4;
1764	} else {
1765		session->hdr_len = 4 + session->cookie_len;
1766		session->hdr_len += l2tp_get_l2specific_len(session);
1767		if (encap == L2TP_ENCAPTYPE_UDP)
1768			session->hdr_len += 4;
1769	}
1770}
1771EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1772
1773struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1774					 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1775{
1776	struct l2tp_session *session;
1777
1778	session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1779	if (session) {
1780		session->magic = L2TP_SESSION_MAGIC;
 
1781
1782		session->session_id = session_id;
1783		session->peer_session_id = peer_session_id;
1784		session->nr = 0;
1785		if (tunnel->version == L2TP_HDR_VER_2)
1786			session->nr_max = 0xffff;
1787		else
1788			session->nr_max = 0xffffff;
1789		session->nr_window_size = session->nr_max / 2;
1790		session->nr_oos_count_max = 4;
1791
1792		/* Use NR of first received packet */
1793		session->reorder_skip = 1;
1794
1795		sprintf(&session->name[0], "sess %u/%u",
1796			tunnel->tunnel_id, session->session_id);
1797
1798		skb_queue_head_init(&session->reorder_q);
1799
1800		session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
1801		INIT_HLIST_NODE(&session->hlist);
1802		INIT_LIST_HEAD(&session->clist);
1803		INIT_LIST_HEAD(&session->list);
1804		INIT_WORK(&session->del_work, l2tp_session_del_work);
1805
1806		if (cfg) {
1807			session->pwtype = cfg->pw_type;
1808			session->send_seq = cfg->send_seq;
1809			session->recv_seq = cfg->recv_seq;
1810			session->lns_mode = cfg->lns_mode;
1811			session->reorder_timeout = cfg->reorder_timeout;
1812			session->l2specific_type = cfg->l2specific_type;
1813			session->cookie_len = cfg->cookie_len;
1814			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1815			session->peer_cookie_len = cfg->peer_cookie_len;
1816			memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1817		}
1818
1819		l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
1820
1821		refcount_set(&session->ref_count, 1);
1822
1823		return session;
1824	}
1825
1826	return ERR_PTR(-ENOMEM);
1827}
1828EXPORT_SYMBOL_GPL(l2tp_session_create);
1829
1830/*****************************************************************************
1831 * Init and cleanup
1832 *****************************************************************************/
1833
1834static __net_init int l2tp_init_net(struct net *net)
1835{
1836	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
 
1837
1838	idr_init(&pn->l2tp_tunnel_idr);
1839	spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1840
1841	idr_init(&pn->l2tp_v2_session_idr);
1842	idr_init(&pn->l2tp_v3_session_idr);
1843	spin_lock_init(&pn->l2tp_session_idr_lock);
 
1844
1845	return 0;
1846}
1847
1848static __net_exit void l2tp_pre_exit_net(struct net *net)
1849{
1850	struct l2tp_net *pn = l2tp_pernet(net);
1851	struct l2tp_tunnel *tunnel = NULL;
1852	unsigned long tunnel_id, tmp;
 
1853
1854	rcu_read_lock_bh();
1855	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1856		if (tunnel)
1857			l2tp_tunnel_delete(tunnel);
1858	}
1859	rcu_read_unlock_bh();
1860
1861	if (l2tp_wq) {
1862		/* Run all TUNNEL_DELETE work items just queued. */
1863		__flush_workqueue(l2tp_wq);
1864
1865		/* Each TUNNEL_DELETE work item will queue a SESSION_DELETE
1866		 * work item for each session in the tunnel. Flush the
1867		 * workqueue again to process these.
1868		 */
1869		__flush_workqueue(l2tp_wq);
1870	}
1871}
1872
1873static int l2tp_idr_item_unexpected(int id, void *p, void *data)
1874{
1875	const char *idr_name = data;
1876
1877	pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id);
1878	WARN_ON_ONCE(1);
1879	return 1;
1880}
1881
1882static __net_exit void l2tp_exit_net(struct net *net)
1883{
1884	struct l2tp_net *pn = l2tp_pernet(net);
1885
1886	/* Our per-net IDRs should be empty. Check that is so, to
1887	 * help catch cleanup races or refcnt leaks.
1888	 */
1889	idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected,
1890		     "v2_session");
1891	idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected,
1892		     "v3_session");
1893	idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected,
1894		     "tunnel");
1895
1896	idr_destroy(&pn->l2tp_v2_session_idr);
1897	idr_destroy(&pn->l2tp_v3_session_idr);
1898	idr_destroy(&pn->l2tp_tunnel_idr);
1899}
1900
1901static struct pernet_operations l2tp_net_ops = {
1902	.init = l2tp_init_net,
1903	.exit = l2tp_exit_net,
1904	.pre_exit = l2tp_pre_exit_net,
1905	.id   = &l2tp_net_id,
1906	.size = sizeof(struct l2tp_net),
1907};
1908
1909static int __init l2tp_init(void)
1910{
1911	int rc = 0;
1912
1913	rc = register_pernet_device(&l2tp_net_ops);
1914	if (rc)
1915		goto out;
1916
1917	l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1918	if (!l2tp_wq) {
1919		pr_err("alloc_workqueue failed\n");
1920		unregister_pernet_device(&l2tp_net_ops);
1921		rc = -ENOMEM;
1922		goto out;
1923	}
1924
1925	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1926
1927out:
1928	return rc;
1929}
1930
1931static void __exit l2tp_exit(void)
1932{
1933	unregister_pernet_device(&l2tp_net_ops);
1934	if (l2tp_wq) {
1935		destroy_workqueue(l2tp_wq);
1936		l2tp_wq = NULL;
1937	}
1938}
1939
1940module_init(l2tp_init);
1941module_exit(l2tp_exit);
1942
1943MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1944MODULE_DESCRIPTION("L2TP core");
1945MODULE_LICENSE("GPL");
1946MODULE_VERSION(L2TP_DRV_VERSION);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* L2TP core.
   3 *
   4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
   5 *
   6 * This file contains some code of the original L2TPv2 pppol2tp
   7 * driver, which has the following copyright:
   8 *
   9 * Authors:	Martijn van Oosterhout <kleptog@svana.org>
  10 *		James Chapman (jchapman@katalix.com)
  11 * Contributors:
  12 *		Michal Ostrowski <mostrows@speakeasy.net>
  13 *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
  14 *		David S. Miller (davem@redhat.com)
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/module.h>
  20#include <linux/string.h>
  21#include <linux/list.h>
  22#include <linux/rculist.h>
  23#include <linux/uaccess.h>
  24
  25#include <linux/kernel.h>
  26#include <linux/spinlock.h>
  27#include <linux/kthread.h>
  28#include <linux/sched.h>
  29#include <linux/slab.h>
  30#include <linux/errno.h>
  31#include <linux/jiffies.h>
  32
  33#include <linux/netdevice.h>
  34#include <linux/net.h>
  35#include <linux/inetdevice.h>
  36#include <linux/skbuff.h>
  37#include <linux/init.h>
  38#include <linux/in.h>
  39#include <linux/ip.h>
  40#include <linux/udp.h>
  41#include <linux/l2tp.h>
  42#include <linux/hash.h>
  43#include <linux/sort.h>
  44#include <linux/file.h>
  45#include <linux/nsproxy.h>
  46#include <net/net_namespace.h>
  47#include <net/netns/generic.h>
  48#include <net/dst.h>
  49#include <net/ip.h>
  50#include <net/udp.h>
  51#include <net/udp_tunnel.h>
  52#include <net/inet_common.h>
  53#include <net/xfrm.h>
  54#include <net/protocol.h>
  55#include <net/inet6_connection_sock.h>
  56#include <net/inet_ecn.h>
  57#include <net/ip6_route.h>
  58#include <net/ip6_checksum.h>
  59
  60#include <asm/byteorder.h>
  61#include <linux/atomic.h>
  62
  63#include "l2tp_core.h"
  64#include "trace.h"
  65
  66#define CREATE_TRACE_POINTS
  67#include "trace.h"
  68
  69#define L2TP_DRV_VERSION	"V2.0"
  70
  71/* L2TP header constants */
  72#define L2TP_HDRFLAG_T	   0x8000
  73#define L2TP_HDRFLAG_L	   0x4000
  74#define L2TP_HDRFLAG_S	   0x0800
  75#define L2TP_HDRFLAG_O	   0x0200
  76#define L2TP_HDRFLAG_P	   0x0100
  77
  78#define L2TP_HDR_VER_MASK  0x000F
  79#define L2TP_HDR_VER_2	   0x0002
  80#define L2TP_HDR_VER_3	   0x0003
  81
  82/* L2TPv3 default L2-specific sublayer */
  83#define L2TP_SLFLAG_S	   0x40000000
  84#define L2TP_SL_SEQ_MASK   0x00ffffff
  85
  86#define L2TP_HDR_SIZE_MAX		14
  87
  88/* Default trace flags */
  89#define L2TP_DEFAULT_DEBUG_FLAGS	0
  90
 
 
 
 
 
  91/* Private data stored for received packets in the skb.
  92 */
  93struct l2tp_skb_cb {
  94	u32			ns;
  95	u16			has_seq;
  96	u16			length;
  97	unsigned long		expires;
  98};
  99
 100#define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
 101
 102static struct workqueue_struct *l2tp_wq;
 103
 104/* per-net private data for this module */
 105static unsigned int l2tp_net_id;
 106struct l2tp_net {
 107	/* Lock for write access to l2tp_tunnel_idr */
 108	spinlock_t l2tp_tunnel_idr_lock;
 109	struct idr l2tp_tunnel_idr;
 110	struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
 111	/* Lock for write access to l2tp_session_hlist */
 112	spinlock_t l2tp_session_hlist_lock;
 
 
 113};
 114
 
 
 
 
 
 
 
 
 
 
 115#if IS_ENABLED(CONFIG_IPV6)
 116static bool l2tp_sk_is_v6(struct sock *sk)
 117{
 118	return sk->sk_family == PF_INET6 &&
 119	       !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
 120}
 121#endif
 122
 123static inline struct l2tp_net *l2tp_pernet(const struct net *net)
 124{
 125	return net_generic(net, l2tp_net_id);
 126}
 127
 128/* Session hash global list for L2TPv3.
 129 * The session_id SHOULD be random according to RFC3931, but several
 130 * L2TP implementations use incrementing session_ids.  So we do a real
 131 * hash on the session_id, rather than a simple bitmask.
 132 */
 133static inline struct hlist_head *
 134l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
 135{
 136	return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
 137}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 138
 139/* Session hash list.
 140 * The session_id SHOULD be random according to RFC2661, but several
 141 * L2TP implementations (Cisco and Microsoft) use incrementing
 142 * session_ids.  So we do a real hash on the session_id, rather than a
 143 * simple bitmask.
 144 */
 145static inline struct hlist_head *
 146l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
 147{
 148	return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
 149}
 150
 151static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
 152{
 153	trace_free_tunnel(tunnel);
 154	sock_put(tunnel->sock);
 155	/* the tunnel is freed in the socket destructor */
 156}
 157
 158static void l2tp_session_free(struct l2tp_session *session)
 159{
 160	trace_free_session(session);
 161	if (session->tunnel)
 162		l2tp_tunnel_dec_refcount(session->tunnel);
 163	kfree(session);
 164}
 165
 166struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
 167{
 168	struct l2tp_tunnel *tunnel = sk->sk_user_data;
 
 
 
 169
 170	if (tunnel)
 171		if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
 172			return NULL;
 
 
 
 
 
 
 
 
 173
 174	return tunnel;
 175}
 176EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
 177
 178void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
 179{
 180	refcount_inc(&tunnel->ref_count);
 181}
 182EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
 183
 184void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
 185{
 186	if (refcount_dec_and_test(&tunnel->ref_count))
 187		l2tp_tunnel_free(tunnel);
 188}
 189EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
 190
 191void l2tp_session_inc_refcount(struct l2tp_session *session)
 192{
 193	refcount_inc(&session->ref_count);
 194}
 195EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
 196
 197void l2tp_session_dec_refcount(struct l2tp_session *session)
 198{
 199	if (refcount_dec_and_test(&session->ref_count))
 200		l2tp_session_free(session);
 201}
 202EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
 203
 204/* Lookup a tunnel. A new reference is held on the returned tunnel. */
 205struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
 206{
 207	const struct l2tp_net *pn = l2tp_pernet(net);
 208	struct l2tp_tunnel *tunnel;
 209
 210	rcu_read_lock_bh();
 211	tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
 212	if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
 213		rcu_read_unlock_bh();
 214		return tunnel;
 215	}
 216	rcu_read_unlock_bh();
 217
 218	return NULL;
 219}
 220EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
 221
 222struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
 223{
 224	struct l2tp_net *pn = l2tp_pernet(net);
 225	unsigned long tunnel_id, tmp;
 226	struct l2tp_tunnel *tunnel;
 227	int count = 0;
 228
 229	rcu_read_lock_bh();
 230	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
 231		if (tunnel && ++count > nth &&
 232		    refcount_inc_not_zero(&tunnel->ref_count)) {
 
 233			rcu_read_unlock_bh();
 234			return tunnel;
 235		}
 
 
 236	}
 237	rcu_read_unlock_bh();
 238
 239	return NULL;
 240}
 241EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
 242
 243struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
 244					     u32 session_id)
 245{
 246	struct hlist_head *session_list;
 247	struct l2tp_session *session;
 248
 249	session_list = l2tp_session_id_hash(tunnel, session_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 250
 251	rcu_read_lock_bh();
 252	hlist_for_each_entry_rcu(session, session_list, hlist)
 253		if (session->session_id == session_id) {
 254			l2tp_session_inc_refcount(session);
 255			rcu_read_unlock_bh();
 
 
 
 256
 257			return session;
 
 
 
 
 
 258		}
 
 259	rcu_read_unlock_bh();
 260
 261	return NULL;
 262}
 263EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
 264
 265struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
 266{
 267	struct hlist_head *session_list;
 
 268	struct l2tp_session *session;
 269
 270	session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271
 272	rcu_read_lock_bh();
 273	hlist_for_each_entry_rcu(session, session_list, global_hlist)
 274		if (session->session_id == session_id) {
 275			l2tp_session_inc_refcount(session);
 
 
 
 
 
 
 
 
 
 
 276			rcu_read_unlock_bh();
 277
 278			return session;
 279		}
 
 
 
 
 
 280	rcu_read_unlock_bh();
 281
 282	return NULL;
 283}
 284EXPORT_SYMBOL_GPL(l2tp_session_get);
 285
 286struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
 
 
 287{
 288	int hash;
 289	struct l2tp_session *session;
 290	int count = 0;
 291
 292	rcu_read_lock_bh();
 293	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
 294		hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
 295			if (++count > nth) {
 296				l2tp_session_inc_refcount(session);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297				rcu_read_unlock_bh();
 298				return session;
 299			}
 300		}
 
 
 
 
 
 
 301	}
 302
 303	rcu_read_unlock_bh();
 304
 305	return NULL;
 306}
 307EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
 
 
 
 
 
 
 
 
 
 308
 309/* Lookup a session by interface name.
 310 * This is very inefficient but is only used by management interfaces.
 311 */
 312struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
 313						const char *ifname)
 314{
 315	struct l2tp_net *pn = l2tp_pernet(net);
 316	int hash;
 317	struct l2tp_session *session;
 
 318
 319	rcu_read_lock_bh();
 320	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
 321		hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
 322			if (!strcmp(session->ifname, ifname)) {
 323				l2tp_session_inc_refcount(session);
 324				rcu_read_unlock_bh();
 
 325
 326				return session;
 
 327			}
 328		}
 329	}
 330
 331	rcu_read_unlock_bh();
 332
 333	return NULL;
 334}
 335EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
 336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337int l2tp_session_register(struct l2tp_session *session,
 338			  struct l2tp_tunnel *tunnel)
 339{
 340	struct l2tp_session *session_walk;
 341	struct hlist_head *g_head;
 342	struct hlist_head *head;
 343	struct l2tp_net *pn;
 344	int err;
 345
 346	head = l2tp_session_id_hash(tunnel, session->session_id);
 
 347
 348	spin_lock_bh(&tunnel->hlist_lock);
 349	if (!tunnel->acpt_newsess) {
 350		err = -ENODEV;
 351		goto err_tlock;
 352	}
 353
 354	hlist_for_each_entry(session_walk, head, hlist)
 355		if (session_walk->session_id == session->session_id) {
 356			err = -EEXIST;
 357			goto err_tlock;
 358		}
 359
 360	if (tunnel->version == L2TP_HDR_VER_3) {
 361		pn = l2tp_pernet(tunnel->l2tp_net);
 362		g_head = l2tp_session_id_hash_2(pn, session->session_id);
 363
 364		spin_lock_bh(&pn->l2tp_session_hlist_lock);
 365
 366		/* IP encap expects session IDs to be globally unique, while
 367		 * UDP encap doesn't.
 
 
 368		 */
 369		hlist_for_each_entry(session_walk, g_head, global_hlist)
 370			if (session_walk->session_id == session->session_id &&
 371			    (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
 372			     tunnel->encap == L2TP_ENCAPTYPE_IP)) {
 373				err = -EEXIST;
 374				goto err_tlock_pnlock;
 375			}
 
 
 
 
 
 
 
 
 
 
 
 376
 377		l2tp_tunnel_inc_refcount(tunnel);
 378		hlist_add_head_rcu(&session->global_hlist, g_head);
 
 379
 380		spin_unlock_bh(&pn->l2tp_session_hlist_lock);
 
 
 
 381	} else {
 382		l2tp_tunnel_inc_refcount(tunnel);
 383	}
 384
 385	hlist_add_head_rcu(&session->hlist, head);
 386	spin_unlock_bh(&tunnel->hlist_lock);
 387
 388	trace_register_session(session);
 
 
 
 
 389
 390	return 0;
 391
 392err_tlock_pnlock:
 393	spin_unlock_bh(&pn->l2tp_session_hlist_lock);
 394err_tlock:
 395	spin_unlock_bh(&tunnel->hlist_lock);
 396
 397	return err;
 398}
 399EXPORT_SYMBOL_GPL(l2tp_session_register);
 400
 401/*****************************************************************************
 402 * Receive data handling
 403 *****************************************************************************/
 404
 405/* Queue a skb in order. We come here only if the skb has an L2TP sequence
 406 * number.
 407 */
 408static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
 409{
 410	struct sk_buff *skbp;
 411	struct sk_buff *tmp;
 412	u32 ns = L2TP_SKB_CB(skb)->ns;
 413
 414	spin_lock_bh(&session->reorder_q.lock);
 415	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
 416		if (L2TP_SKB_CB(skbp)->ns > ns) {
 417			__skb_queue_before(&session->reorder_q, skbp, skb);
 418			atomic_long_inc(&session->stats.rx_oos_packets);
 419			goto out;
 420		}
 421	}
 422
 423	__skb_queue_tail(&session->reorder_q, skb);
 424
 425out:
 426	spin_unlock_bh(&session->reorder_q.lock);
 427}
 428
 429/* Dequeue a single skb.
 430 */
 431static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
 432{
 433	struct l2tp_tunnel *tunnel = session->tunnel;
 434	int length = L2TP_SKB_CB(skb)->length;
 435
 436	/* We're about to requeue the skb, so return resources
 437	 * to its current owner (a socket receive buffer).
 438	 */
 439	skb_orphan(skb);
 440
 441	atomic_long_inc(&tunnel->stats.rx_packets);
 442	atomic_long_add(length, &tunnel->stats.rx_bytes);
 443	atomic_long_inc(&session->stats.rx_packets);
 444	atomic_long_add(length, &session->stats.rx_bytes);
 445
 446	if (L2TP_SKB_CB(skb)->has_seq) {
 447		/* Bump our Nr */
 448		session->nr++;
 449		session->nr &= session->nr_max;
 450		trace_session_seqnum_update(session);
 451	}
 452
 453	/* call private receive handler */
 454	if (session->recv_skb)
 455		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
 456	else
 457		kfree_skb(skb);
 458}
 459
 460/* Dequeue skbs from the session's reorder_q, subject to packet order.
 461 * Skbs that have been in the queue for too long are simply discarded.
 462 */
 463static void l2tp_recv_dequeue(struct l2tp_session *session)
 464{
 465	struct sk_buff *skb;
 466	struct sk_buff *tmp;
 467
 468	/* If the pkt at the head of the queue has the nr that we
 469	 * expect to send up next, dequeue it and any other
 470	 * in-sequence packets behind it.
 471	 */
 472start:
 473	spin_lock_bh(&session->reorder_q.lock);
 474	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
 475		struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
 476
 477		/* If the packet has been pending on the queue for too long, discard it */
 478		if (time_after(jiffies, cb->expires)) {
 479			atomic_long_inc(&session->stats.rx_seq_discards);
 480			atomic_long_inc(&session->stats.rx_errors);
 481			trace_session_pkt_expired(session, cb->ns);
 482			session->reorder_skip = 1;
 483			__skb_unlink(skb, &session->reorder_q);
 484			kfree_skb(skb);
 485			continue;
 486		}
 487
 488		if (cb->has_seq) {
 489			if (session->reorder_skip) {
 490				session->reorder_skip = 0;
 491				session->nr = cb->ns;
 492				trace_session_seqnum_reset(session);
 493			}
 494			if (cb->ns != session->nr)
 495				goto out;
 496		}
 497		__skb_unlink(skb, &session->reorder_q);
 498
 499		/* Process the skb. We release the queue lock while we
 500		 * do so to let other contexts process the queue.
 501		 */
 502		spin_unlock_bh(&session->reorder_q.lock);
 503		l2tp_recv_dequeue_skb(session, skb);
 504		goto start;
 505	}
 506
 507out:
 508	spin_unlock_bh(&session->reorder_q.lock);
 509}
 510
 511static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
 512{
 513	u32 nws;
 514
 515	if (nr >= session->nr)
 516		nws = nr - session->nr;
 517	else
 518		nws = (session->nr_max + 1) - (session->nr - nr);
 519
 520	return nws < session->nr_window_size;
 521}
 522
 523/* If packet has sequence numbers, queue it if acceptable. Returns 0 if
 524 * acceptable, else non-zero.
 525 */
 526static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
 527{
 528	struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
 529
 530	if (!l2tp_seq_check_rx_window(session, cb->ns)) {
 531		/* Packet sequence number is outside allowed window.
 532		 * Discard it.
 533		 */
 534		trace_session_pkt_outside_rx_window(session, cb->ns);
 535		goto discard;
 536	}
 537
 538	if (session->reorder_timeout != 0) {
 539		/* Packet reordering enabled. Add skb to session's
 540		 * reorder queue, in order of ns.
 541		 */
 542		l2tp_recv_queue_skb(session, skb);
 543		goto out;
 544	}
 545
 546	/* Packet reordering disabled. Discard out-of-sequence packets, while
 547	 * tracking the number if in-sequence packets after the first OOS packet
 548	 * is seen. After nr_oos_count_max in-sequence packets, reset the
 549	 * sequence number to re-enable packet reception.
 550	 */
 551	if (cb->ns == session->nr) {
 552		skb_queue_tail(&session->reorder_q, skb);
 553	} else {
 554		u32 nr_oos = cb->ns;
 555		u32 nr_next = (session->nr_oos + 1) & session->nr_max;
 556
 557		if (nr_oos == nr_next)
 558			session->nr_oos_count++;
 559		else
 560			session->nr_oos_count = 0;
 561
 562		session->nr_oos = nr_oos;
 563		if (session->nr_oos_count > session->nr_oos_count_max) {
 564			session->reorder_skip = 1;
 565		}
 566		if (!session->reorder_skip) {
 567			atomic_long_inc(&session->stats.rx_seq_discards);
 568			trace_session_pkt_oos(session, cb->ns);
 569			goto discard;
 570		}
 571		skb_queue_tail(&session->reorder_q, skb);
 572	}
 573
 574out:
 575	return 0;
 576
 577discard:
 578	return 1;
 579}
 580
 581/* Do receive processing of L2TP data frames. We handle both L2TPv2
 582 * and L2TPv3 data frames here.
 583 *
 584 * L2TPv2 Data Message Header
 585 *
 586 *  0                   1                   2                   3
 587 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 588 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 589 * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
 590 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 591 * |           Tunnel ID           |           Session ID          |
 592 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 593 * |             Ns (opt)          |             Nr (opt)          |
 594 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 595 * |      Offset Size (opt)        |    Offset pad... (opt)
 596 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 597 *
 598 * Data frames are marked by T=0. All other fields are the same as
 599 * those in L2TP control frames.
 600 *
 601 * L2TPv3 Data Message Header
 602 *
 603 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 604 * |                      L2TP Session Header                      |
 605 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 606 * |                      L2-Specific Sublayer                     |
 607 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 608 * |                        Tunnel Payload                      ...
 609 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 610 *
 611 * L2TPv3 Session Header Over IP
 612 *
 613 *  0                   1                   2                   3
 614 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 615 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 616 * |                           Session ID                          |
 617 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 618 * |               Cookie (optional, maximum 64 bits)...
 619 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 620 *                                                                 |
 621 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 622 *
 623 * L2TPv3 L2-Specific Sublayer Format
 624 *
 625 *  0                   1                   2                   3
 626 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 627 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 628 * |x|S|x|x|x|x|x|x|              Sequence Number                  |
 629 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 630 *
 631 * Cookie value and sublayer format are negotiated with the peer when
 632 * the session is set up. Unlike L2TPv2, we do not need to parse the
 633 * packet header to determine if optional fields are present.
 634 *
 635 * Caller must already have parsed the frame and determined that it is
 636 * a data (not control) frame before coming here. Fields up to the
 637 * session-id have already been parsed and ptr points to the data
 638 * after the session-id.
 639 */
 640void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
 641		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
 642		      int length)
 643{
 644	struct l2tp_tunnel *tunnel = session->tunnel;
 645	int offset;
 646
 647	/* Parse and check optional cookie */
 648	if (session->peer_cookie_len > 0) {
 649		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
 650			pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
 651					     tunnel->name, tunnel->tunnel_id,
 652					     session->session_id);
 653			atomic_long_inc(&session->stats.rx_cookie_discards);
 654			goto discard;
 655		}
 656		ptr += session->peer_cookie_len;
 657	}
 658
 659	/* Handle the optional sequence numbers. Sequence numbers are
 660	 * in different places for L2TPv2 and L2TPv3.
 661	 *
 662	 * If we are the LAC, enable/disable sequence numbers under
 663	 * the control of the LNS.  If no sequence numbers present but
 664	 * we were expecting them, discard frame.
 665	 */
 666	L2TP_SKB_CB(skb)->has_seq = 0;
 667	if (tunnel->version == L2TP_HDR_VER_2) {
 668		if (hdrflags & L2TP_HDRFLAG_S) {
 669			/* Store L2TP info in the skb */
 670			L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
 671			L2TP_SKB_CB(skb)->has_seq = 1;
 672			ptr += 2;
 673			/* Skip past nr in the header */
 674			ptr += 2;
 675
 676		}
 677	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
 678		u32 l2h = ntohl(*(__be32 *)ptr);
 679
 680		if (l2h & 0x40000000) {
 681			/* Store L2TP info in the skb */
 682			L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
 683			L2TP_SKB_CB(skb)->has_seq = 1;
 684		}
 685		ptr += 4;
 686	}
 687
 688	if (L2TP_SKB_CB(skb)->has_seq) {
 689		/* Received a packet with sequence numbers. If we're the LAC,
 690		 * check if we sre sending sequence numbers and if not,
 691		 * configure it so.
 692		 */
 693		if (!session->lns_mode && !session->send_seq) {
 694			trace_session_seqnum_lns_enable(session);
 695			session->send_seq = 1;
 696			l2tp_session_set_header_len(session, tunnel->version);
 
 697		}
 698	} else {
 699		/* No sequence numbers.
 700		 * If user has configured mandatory sequence numbers, discard.
 701		 */
 702		if (session->recv_seq) {
 703			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
 704					     session->name);
 705			atomic_long_inc(&session->stats.rx_seq_discards);
 706			goto discard;
 707		}
 708
 709		/* If we're the LAC and we're sending sequence numbers, the
 710		 * LNS has requested that we no longer send sequence numbers.
 711		 * If we're the LNS and we're sending sequence numbers, the
 712		 * LAC is broken. Discard the frame.
 713		 */
 714		if (!session->lns_mode && session->send_seq) {
 715			trace_session_seqnum_lns_disable(session);
 716			session->send_seq = 0;
 717			l2tp_session_set_header_len(session, tunnel->version);
 
 718		} else if (session->send_seq) {
 719			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
 720					     session->name);
 721			atomic_long_inc(&session->stats.rx_seq_discards);
 722			goto discard;
 723		}
 724	}
 725
 726	/* Session data offset is defined only for L2TPv2 and is
 727	 * indicated by an optional 16-bit value in the header.
 728	 */
 729	if (tunnel->version == L2TP_HDR_VER_2) {
 730		/* If offset bit set, skip it. */
 731		if (hdrflags & L2TP_HDRFLAG_O) {
 732			offset = ntohs(*(__be16 *)ptr);
 733			ptr += 2 + offset;
 734		}
 735	}
 736
 737	offset = ptr - optr;
 738	if (!pskb_may_pull(skb, offset))
 739		goto discard;
 740
 741	__skb_pull(skb, offset);
 742
 743	/* Prepare skb for adding to the session's reorder_q.  Hold
 744	 * packets for max reorder_timeout or 1 second if not
 745	 * reordering.
 746	 */
 747	L2TP_SKB_CB(skb)->length = length;
 748	L2TP_SKB_CB(skb)->expires = jiffies +
 749		(session->reorder_timeout ? session->reorder_timeout : HZ);
 750
 751	/* Add packet to the session's receive queue. Reordering is done here, if
 752	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
 753	 */
 754	if (L2TP_SKB_CB(skb)->has_seq) {
 755		if (l2tp_recv_data_seq(session, skb))
 756			goto discard;
 757	} else {
 758		/* No sequence numbers. Add the skb to the tail of the
 759		 * reorder queue. This ensures that it will be
 760		 * delivered after all previous sequenced skbs.
 761		 */
 762		skb_queue_tail(&session->reorder_q, skb);
 763	}
 764
 765	/* Try to dequeue as many skbs from reorder_q as we can. */
 766	l2tp_recv_dequeue(session);
 767
 768	return;
 769
 770discard:
 771	atomic_long_inc(&session->stats.rx_errors);
 772	kfree_skb(skb);
 773}
 774EXPORT_SYMBOL_GPL(l2tp_recv_common);
 775
 776/* Drop skbs from the session's reorder_q
 777 */
 778static void l2tp_session_queue_purge(struct l2tp_session *session)
 779{
 780	struct sk_buff *skb = NULL;
 781
 782	while ((skb = skb_dequeue(&session->reorder_q))) {
 783		atomic_long_inc(&session->stats.rx_errors);
 784		kfree_skb(skb);
 785	}
 786}
 787
 788/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
 789 * here. The skb is not on a list when we get here.
 790 * Returns 0 if the packet was a data packet and was successfully passed on.
 791 * Returns 1 if the packet was not a good data packet and could not be
 792 * forwarded.  All such packets are passed up to userspace to deal with.
 793 */
 794static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
 795{
 796	struct l2tp_session *session = NULL;
 
 
 797	unsigned char *ptr, *optr;
 798	u16 hdrflags;
 799	u32 tunnel_id, session_id;
 800	u16 version;
 801	int length;
 802
 803	/* UDP has verified checksum */
 804
 805	/* UDP always verifies the packet length. */
 806	__skb_pull(skb, sizeof(struct udphdr));
 807
 808	/* Short packet? */
 809	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
 810		pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
 811				     tunnel->name, skb->len);
 812		goto invalid;
 813	}
 814
 815	/* Point to L2TP header */
 816	optr = skb->data;
 817	ptr = skb->data;
 818
 819	/* Get L2TP header flags */
 820	hdrflags = ntohs(*(__be16 *)ptr);
 821
 822	/* Check protocol version */
 823	version = hdrflags & L2TP_HDR_VER_MASK;
 824	if (version != tunnel->version) {
 825		pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
 826				     tunnel->name, version, tunnel->version);
 827		goto invalid;
 828	}
 829
 830	/* Get length of L2TP packet */
 831	length = skb->len;
 832
 833	/* If type is control packet, it is handled by userspace. */
 834	if (hdrflags & L2TP_HDRFLAG_T)
 835		goto pass;
 836
 837	/* Skip flags */
 838	ptr += 2;
 839
 840	if (tunnel->version == L2TP_HDR_VER_2) {
 
 
 841		/* If length is present, skip it */
 842		if (hdrflags & L2TP_HDRFLAG_L)
 843			ptr += 2;
 844
 845		/* Extract tunnel and session ID */
 846		tunnel_id = ntohs(*(__be16 *)ptr);
 847		ptr += 2;
 848		session_id = ntohs(*(__be16 *)ptr);
 849		ptr += 2;
 
 
 850	} else {
 
 
 851		ptr += 2;	/* skip reserved bits */
 852		tunnel_id = tunnel->tunnel_id;
 853		session_id = ntohl(*(__be32 *)ptr);
 854		ptr += 4;
 
 
 855	}
 856
 857	/* Find the session context */
 858	session = l2tp_tunnel_get_session(tunnel, session_id);
 859	if (!session || !session->recv_skb) {
 860		if (session)
 861			l2tp_session_dec_refcount(session);
 862
 863		/* Not found? Pass to userspace to deal with */
 864		pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
 865				     tunnel->name, tunnel_id, session_id);
 866		goto pass;
 867	}
 868
 869	if (tunnel->version == L2TP_HDR_VER_3 &&
 
 
 
 
 
 
 870	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
 871		l2tp_session_dec_refcount(session);
 872		goto invalid;
 873	}
 874
 875	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
 876	l2tp_session_dec_refcount(session);
 877
 878	return 0;
 879
 880invalid:
 881	atomic_long_inc(&tunnel->stats.rx_invalid);
 882
 883pass:
 884	/* Put UDP header back */
 885	__skb_push(skb, sizeof(struct udphdr));
 886
 887	return 1;
 888}
 
 889
 890/* UDP encapsulation receive handler. See net/ipv4/udp.c.
 891 * Return codes:
 892 * 0 : success.
 893 * <0: error
 894 * >0: skb should be passed up to userspace as UDP.
 895 */
 896int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 897{
 898	struct l2tp_tunnel *tunnel;
 899
 900	/* Note that this is called from the encap_rcv hook inside an
 901	 * RCU-protected region, but without the socket being locked.
 902	 * Hence we use rcu_dereference_sk_user_data to access the
 903	 * tunnel data structure rather the usual l2tp_sk_to_tunnel
 904	 * accessor function.
 905	 */
 906	tunnel = rcu_dereference_sk_user_data(sk);
 907	if (!tunnel)
 908		goto pass_up;
 909	if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
 910		goto pass_up;
 911
 912	if (l2tp_udp_recv_core(tunnel, skb))
 913		goto pass_up;
 914
 915	return 0;
 916
 917pass_up:
 918	return 1;
 919}
 920EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
 921
 922/************************************************************************
 923 * Transmit handling
 924 ***********************************************************************/
 925
 926/* Build an L2TP header for the session into the buffer provided.
 927 */
 928static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
 929{
 930	struct l2tp_tunnel *tunnel = session->tunnel;
 931	__be16 *bufp = buf;
 932	__be16 *optr = buf;
 933	u16 flags = L2TP_HDR_VER_2;
 934	u32 tunnel_id = tunnel->peer_tunnel_id;
 935	u32 session_id = session->peer_session_id;
 936
 937	if (session->send_seq)
 938		flags |= L2TP_HDRFLAG_S;
 939
 940	/* Setup L2TP header. */
 941	*bufp++ = htons(flags);
 942	*bufp++ = htons(tunnel_id);
 943	*bufp++ = htons(session_id);
 944	if (session->send_seq) {
 945		*bufp++ = htons(session->ns);
 946		*bufp++ = 0;
 947		session->ns++;
 948		session->ns &= 0xffff;
 949		trace_session_seqnum_update(session);
 950	}
 951
 952	return bufp - optr;
 953}
 954
 955static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
 956{
 957	struct l2tp_tunnel *tunnel = session->tunnel;
 958	char *bufp = buf;
 959	char *optr = bufp;
 960
 961	/* Setup L2TP header. The header differs slightly for UDP and
 962	 * IP encapsulations. For UDP, there is 4 bytes of flags.
 963	 */
 964	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
 965		u16 flags = L2TP_HDR_VER_3;
 966		*((__be16 *)bufp) = htons(flags);
 967		bufp += 2;
 968		*((__be16 *)bufp) = 0;
 969		bufp += 2;
 970	}
 971
 972	*((__be32 *)bufp) = htonl(session->peer_session_id);
 973	bufp += 4;
 974	if (session->cookie_len) {
 975		memcpy(bufp, &session->cookie[0], session->cookie_len);
 976		bufp += session->cookie_len;
 977	}
 978	if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
 979		u32 l2h = 0;
 980
 981		if (session->send_seq) {
 982			l2h = 0x40000000 | session->ns;
 983			session->ns++;
 984			session->ns &= 0xffffff;
 985			trace_session_seqnum_update(session);
 986		}
 987
 988		*((__be32 *)bufp) = htonl(l2h);
 989		bufp += 4;
 990	}
 991
 992	return bufp - optr;
 993}
 994
 995/* Queue the packet to IP for output: tunnel socket lock must be held */
 996static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
 997{
 998	int err;
 999
1000	skb->ignore_df = 1;
1001	skb_dst_drop(skb);
1002#if IS_ENABLED(CONFIG_IPV6)
1003	if (l2tp_sk_is_v6(tunnel->sock))
1004		err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1005	else
1006#endif
1007		err = ip_queue_xmit(tunnel->sock, skb, fl);
1008
1009	return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1010}
1011
1012static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1013{
1014	struct l2tp_tunnel *tunnel = session->tunnel;
1015	unsigned int data_len = skb->len;
1016	struct sock *sk = tunnel->sock;
1017	int headroom, uhlen, udp_len;
1018	int ret = NET_XMIT_SUCCESS;
1019	struct inet_sock *inet;
1020	struct udphdr *uh;
1021
1022	/* Check that there's enough headroom in the skb to insert IP,
1023	 * UDP and L2TP headers. If not enough, expand it to
1024	 * make room. Adjust truesize.
1025	 */
1026	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1027	headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1028	if (skb_cow_head(skb, headroom)) {
1029		kfree_skb(skb);
1030		return NET_XMIT_DROP;
1031	}
1032
1033	/* Setup L2TP header */
1034	if (tunnel->version == L2TP_HDR_VER_2)
1035		l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1036	else
1037		l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1038
1039	/* Reset skb netfilter state */
1040	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1041	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1042	nf_reset_ct(skb);
1043
1044	bh_lock_sock_nested(sk);
 
 
 
 
 
 
1045	if (sock_owned_by_user(sk)) {
1046		kfree_skb(skb);
1047		ret = NET_XMIT_DROP;
1048		goto out_unlock;
1049	}
1050
1051	/* The user-space may change the connection status for the user-space
1052	 * provided socket at run time: we must check it under the socket lock
1053	 */
1054	if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1055		kfree_skb(skb);
1056		ret = NET_XMIT_DROP;
1057		goto out_unlock;
1058	}
1059
1060	/* Report transmitted length before we add encap header, which keeps
1061	 * statistics consistent for both UDP and IP encap tx/rx paths.
1062	 */
1063	*len = skb->len;
1064
1065	inet = inet_sk(sk);
1066	switch (tunnel->encap) {
1067	case L2TP_ENCAPTYPE_UDP:
1068		/* Setup UDP header */
1069		__skb_push(skb, sizeof(*uh));
1070		skb_reset_transport_header(skb);
1071		uh = udp_hdr(skb);
1072		uh->source = inet->inet_sport;
1073		uh->dest = inet->inet_dport;
1074		udp_len = uhlen + session->hdr_len + data_len;
1075		uh->len = htons(udp_len);
1076
1077		/* Calculate UDP checksum if configured to do so */
1078#if IS_ENABLED(CONFIG_IPV6)
1079		if (l2tp_sk_is_v6(sk))
1080			udp6_set_csum(udp_get_no_check6_tx(sk),
1081				      skb, &inet6_sk(sk)->saddr,
1082				      &sk->sk_v6_daddr, udp_len);
1083		else
1084#endif
1085			udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1086				     inet->inet_daddr, udp_len);
1087		break;
1088
1089	case L2TP_ENCAPTYPE_IP:
1090		break;
1091	}
1092
1093	ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1094
1095out_unlock:
1096	bh_unlock_sock(sk);
1097
1098	return ret;
1099}
1100
1101/* If caller requires the skb to have a ppp header, the header must be
1102 * inserted in the skb data before calling this function.
1103 */
1104int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1105{
1106	unsigned int len = 0;
1107	int ret;
1108
1109	ret = l2tp_xmit_core(session, skb, &len);
1110	if (ret == NET_XMIT_SUCCESS) {
1111		atomic_long_inc(&session->tunnel->stats.tx_packets);
1112		atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1113		atomic_long_inc(&session->stats.tx_packets);
1114		atomic_long_add(len, &session->stats.tx_bytes);
1115	} else {
1116		atomic_long_inc(&session->tunnel->stats.tx_errors);
1117		atomic_long_inc(&session->stats.tx_errors);
1118	}
1119	return ret;
1120}
1121EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1122
1123/*****************************************************************************
1124 * Tinnel and session create/destroy.
1125 *****************************************************************************/
1126
1127/* Tunnel socket destruct hook.
1128 * The tunnel context is deleted only when all session sockets have been
1129 * closed.
1130 */
1131static void l2tp_tunnel_destruct(struct sock *sk)
1132{
1133	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1134
1135	if (!tunnel)
1136		goto end;
1137
1138	/* Disable udp encapsulation */
1139	switch (tunnel->encap) {
1140	case L2TP_ENCAPTYPE_UDP:
1141		/* No longer an encapsulation socket. See net/ipv4/udp.c */
1142		WRITE_ONCE(udp_sk(sk)->encap_type, 0);
1143		udp_sk(sk)->encap_rcv = NULL;
1144		udp_sk(sk)->encap_destroy = NULL;
1145		break;
1146	case L2TP_ENCAPTYPE_IP:
1147		break;
1148	}
1149
1150	/* Remove hooks into tunnel socket */
1151	write_lock_bh(&sk->sk_callback_lock);
1152	sk->sk_destruct = tunnel->old_sk_destruct;
1153	sk->sk_user_data = NULL;
1154	write_unlock_bh(&sk->sk_callback_lock);
1155
1156	/* Call the original destructor */
1157	if (sk->sk_destruct)
1158		(*sk->sk_destruct)(sk);
1159
1160	kfree_rcu(tunnel, rcu);
1161end:
1162	return;
1163}
1164
1165/* Remove an l2tp session from l2tp_core's hash lists. */
1166static void l2tp_session_unhash(struct l2tp_session *session)
1167{
1168	struct l2tp_tunnel *tunnel = session->tunnel;
1169
1170	/* Remove the session from core hashes */
1171	if (tunnel) {
1172		/* Remove from the per-tunnel hash */
1173		spin_lock_bh(&tunnel->hlist_lock);
1174		hlist_del_init_rcu(&session->hlist);
1175		spin_unlock_bh(&tunnel->hlist_lock);
1176
1177		/* For L2TPv3 we have a per-net hash: remove from there, too */
1178		if (tunnel->version != L2TP_HDR_VER_2) {
1179			struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1180
1181			spin_lock_bh(&pn->l2tp_session_hlist_lock);
1182			hlist_del_init_rcu(&session->global_hlist);
1183			spin_unlock_bh(&pn->l2tp_session_hlist_lock);
 
 
 
 
 
 
 
 
 
 
 
 
1184		}
 
1185
1186		synchronize_rcu();
 
1187	}
1188}
1189
1190/* When the tunnel is closed, all the attached sessions need to go too.
1191 */
1192static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1193{
1194	struct l2tp_session *session;
1195	int hash;
1196
1197	spin_lock_bh(&tunnel->hlist_lock);
1198	tunnel->acpt_newsess = false;
1199	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1200again:
1201		hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
1202			hlist_del_init_rcu(&session->hlist);
1203
1204			spin_unlock_bh(&tunnel->hlist_lock);
1205			l2tp_session_delete(session);
1206			spin_lock_bh(&tunnel->hlist_lock);
1207
1208			/* Now restart from the beginning of this hash
1209			 * chain.  We always remove a session from the
1210			 * list so we are guaranteed to make forward
1211			 * progress.
1212			 */
1213			goto again;
1214		}
1215	}
1216	spin_unlock_bh(&tunnel->hlist_lock);
1217}
1218
1219/* Tunnel socket destroy hook for UDP encapsulation */
1220static void l2tp_udp_encap_destroy(struct sock *sk)
1221{
1222	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1223
1224	if (tunnel)
 
1225		l2tp_tunnel_delete(tunnel);
 
 
1226}
1227
1228static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1229{
1230	struct l2tp_net *pn = l2tp_pernet(net);
1231
1232	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1233	idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1234	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1235}
1236
1237/* Workqueue tunnel deletion function */
1238static void l2tp_tunnel_del_work(struct work_struct *work)
1239{
1240	struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1241						  del_work);
1242	struct sock *sk = tunnel->sock;
1243	struct socket *sock = sk->sk_socket;
1244
1245	l2tp_tunnel_closeall(tunnel);
1246
1247	/* If the tunnel socket was created within the kernel, use
1248	 * the sk API to release it here.
1249	 */
1250	if (tunnel->fd < 0) {
1251		if (sock) {
1252			kernel_sock_shutdown(sock, SHUT_RDWR);
1253			sock_release(sock);
1254		}
1255	}
1256
1257	l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1258	/* drop initial ref */
1259	l2tp_tunnel_dec_refcount(tunnel);
1260
1261	/* drop workqueue ref */
1262	l2tp_tunnel_dec_refcount(tunnel);
1263}
1264
1265/* Create a socket for the tunnel, if one isn't set up by
1266 * userspace. This is used for static tunnels where there is no
1267 * managing L2TP daemon.
1268 *
1269 * Since we don't want these sockets to keep a namespace alive by
1270 * themselves, we drop the socket's namespace refcount after creation.
1271 * These sockets are freed when the namespace exits using the pernet
1272 * exit hook.
1273 */
1274static int l2tp_tunnel_sock_create(struct net *net,
1275				   u32 tunnel_id,
1276				   u32 peer_tunnel_id,
1277				   struct l2tp_tunnel_cfg *cfg,
1278				   struct socket **sockp)
1279{
1280	int err = -EINVAL;
1281	struct socket *sock = NULL;
1282	struct udp_port_cfg udp_conf;
1283
1284	switch (cfg->encap) {
1285	case L2TP_ENCAPTYPE_UDP:
1286		memset(&udp_conf, 0, sizeof(udp_conf));
1287
1288#if IS_ENABLED(CONFIG_IPV6)
1289		if (cfg->local_ip6 && cfg->peer_ip6) {
1290			udp_conf.family = AF_INET6;
1291			memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1292			       sizeof(udp_conf.local_ip6));
1293			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1294			       sizeof(udp_conf.peer_ip6));
1295			udp_conf.use_udp6_tx_checksums =
1296			  !cfg->udp6_zero_tx_checksums;
1297			udp_conf.use_udp6_rx_checksums =
1298			  !cfg->udp6_zero_rx_checksums;
1299		} else
1300#endif
1301		{
1302			udp_conf.family = AF_INET;
1303			udp_conf.local_ip = cfg->local_ip;
1304			udp_conf.peer_ip = cfg->peer_ip;
1305			udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1306		}
1307
1308		udp_conf.local_udp_port = htons(cfg->local_udp_port);
1309		udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1310
1311		err = udp_sock_create(net, &udp_conf, &sock);
1312		if (err < 0)
1313			goto out;
1314
1315		break;
1316
1317	case L2TP_ENCAPTYPE_IP:
1318#if IS_ENABLED(CONFIG_IPV6)
1319		if (cfg->local_ip6 && cfg->peer_ip6) {
1320			struct sockaddr_l2tpip6 ip6_addr = {0};
1321
1322			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1323					       IPPROTO_L2TP, &sock);
1324			if (err < 0)
1325				goto out;
1326
1327			ip6_addr.l2tp_family = AF_INET6;
1328			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1329			       sizeof(ip6_addr.l2tp_addr));
1330			ip6_addr.l2tp_conn_id = tunnel_id;
1331			err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1332					  sizeof(ip6_addr));
1333			if (err < 0)
1334				goto out;
1335
1336			ip6_addr.l2tp_family = AF_INET6;
1337			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1338			       sizeof(ip6_addr.l2tp_addr));
1339			ip6_addr.l2tp_conn_id = peer_tunnel_id;
1340			err = kernel_connect(sock,
1341					     (struct sockaddr *)&ip6_addr,
1342					     sizeof(ip6_addr), 0);
1343			if (err < 0)
1344				goto out;
1345		} else
1346#endif
1347		{
1348			struct sockaddr_l2tpip ip_addr = {0};
1349
1350			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1351					       IPPROTO_L2TP, &sock);
1352			if (err < 0)
1353				goto out;
1354
1355			ip_addr.l2tp_family = AF_INET;
1356			ip_addr.l2tp_addr = cfg->local_ip;
1357			ip_addr.l2tp_conn_id = tunnel_id;
1358			err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1359					  sizeof(ip_addr));
1360			if (err < 0)
1361				goto out;
1362
1363			ip_addr.l2tp_family = AF_INET;
1364			ip_addr.l2tp_addr = cfg->peer_ip;
1365			ip_addr.l2tp_conn_id = peer_tunnel_id;
1366			err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1367					     sizeof(ip_addr), 0);
1368			if (err < 0)
1369				goto out;
1370		}
1371		break;
1372
1373	default:
1374		goto out;
1375	}
1376
1377out:
1378	*sockp = sock;
1379	if (err < 0 && sock) {
1380		kernel_sock_shutdown(sock, SHUT_RDWR);
1381		sock_release(sock);
1382		*sockp = NULL;
1383	}
1384
1385	return err;
1386}
1387
1388int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1389		       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1390{
1391	struct l2tp_tunnel *tunnel = NULL;
1392	int err;
1393	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1394
1395	if (cfg)
1396		encap = cfg->encap;
1397
1398	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1399	if (!tunnel) {
1400		err = -ENOMEM;
1401		goto err;
1402	}
1403
1404	tunnel->version = version;
1405	tunnel->tunnel_id = tunnel_id;
1406	tunnel->peer_tunnel_id = peer_tunnel_id;
1407
1408	tunnel->magic = L2TP_TUNNEL_MAGIC;
1409	sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1410	spin_lock_init(&tunnel->hlist_lock);
1411	tunnel->acpt_newsess = true;
 
1412
1413	tunnel->encap = encap;
1414
1415	refcount_set(&tunnel->ref_count, 1);
1416	tunnel->fd = fd;
1417
1418	/* Init delete workqueue struct */
1419	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1420
1421	INIT_LIST_HEAD(&tunnel->list);
1422
1423	err = 0;
1424err:
1425	if (tunnelp)
1426		*tunnelp = tunnel;
1427
1428	return err;
1429}
1430EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1431
1432static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1433				enum l2tp_encap_type encap)
1434{
 
 
1435	if (!net_eq(sock_net(sk), net))
1436		return -EINVAL;
1437
1438	if (sk->sk_type != SOCK_DGRAM)
1439		return -EPROTONOSUPPORT;
1440
1441	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1442		return -EPROTONOSUPPORT;
1443
1444	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1445	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1446		return -EPROTONOSUPPORT;
1447
1448	if (sk->sk_user_data)
 
 
 
 
 
1449		return -EBUSY;
 
1450
1451	return 0;
1452}
1453
1454int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1455			 struct l2tp_tunnel_cfg *cfg)
1456{
1457	struct l2tp_net *pn = l2tp_pernet(net);
1458	u32 tunnel_id = tunnel->tunnel_id;
1459	struct socket *sock;
1460	struct sock *sk;
1461	int ret;
1462
1463	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1464	ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1465			    GFP_ATOMIC);
1466	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1467	if (ret)
1468		return ret == -ENOSPC ? -EEXIST : ret;
1469
1470	if (tunnel->fd < 0) {
1471		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1472					      tunnel->peer_tunnel_id, cfg,
1473					      &sock);
1474		if (ret < 0)
1475			goto err;
1476	} else {
1477		sock = sockfd_lookup(tunnel->fd, &ret);
1478		if (!sock)
1479			goto err;
1480	}
1481
1482	sk = sock->sk;
1483	lock_sock(sk);
1484	write_lock_bh(&sk->sk_callback_lock);
1485	ret = l2tp_validate_socket(sk, net, tunnel->encap);
1486	if (ret < 0)
1487		goto err_inval_sock;
1488	rcu_assign_sk_user_data(sk, tunnel);
1489	write_unlock_bh(&sk->sk_callback_lock);
1490
1491	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1492		struct udp_tunnel_sock_cfg udp_cfg = {
1493			.sk_user_data = tunnel,
1494			.encap_type = UDP_ENCAP_L2TPINUDP,
1495			.encap_rcv = l2tp_udp_encap_recv,
 
1496			.encap_destroy = l2tp_udp_encap_destroy,
1497		};
1498
1499		setup_udp_tunnel_sock(net, sock, &udp_cfg);
1500	}
1501
1502	tunnel->old_sk_destruct = sk->sk_destruct;
1503	sk->sk_destruct = &l2tp_tunnel_destruct;
1504	sk->sk_allocation = GFP_ATOMIC;
1505	release_sock(sk);
1506
1507	sock_hold(sk);
1508	tunnel->sock = sk;
1509	tunnel->l2tp_net = net;
1510
1511	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1512	idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1513	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1514
1515	trace_register_tunnel(tunnel);
1516
1517	if (tunnel->fd >= 0)
1518		sockfd_put(sock);
1519
1520	return 0;
1521
1522err_inval_sock:
1523	write_unlock_bh(&sk->sk_callback_lock);
1524	release_sock(sk);
1525
1526	if (tunnel->fd < 0)
1527		sock_release(sock);
1528	else
1529		sockfd_put(sock);
1530err:
1531	l2tp_tunnel_remove(net, tunnel);
1532	return ret;
1533}
1534EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1535
1536/* This function is used by the netlink TUNNEL_DELETE command.
1537 */
1538void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1539{
1540	if (!test_and_set_bit(0, &tunnel->dead)) {
1541		trace_delete_tunnel(tunnel);
1542		l2tp_tunnel_inc_refcount(tunnel);
1543		queue_work(l2tp_wq, &tunnel->del_work);
1544	}
1545}
1546EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1547
1548void l2tp_session_delete(struct l2tp_session *session)
1549{
1550	if (test_and_set_bit(0, &session->dead))
1551		return;
 
 
 
 
 
 
 
 
 
 
 
1552
1553	trace_delete_session(session);
1554	l2tp_session_unhash(session);
1555	l2tp_session_queue_purge(session);
1556	if (session->session_close)
1557		(*session->session_close)(session);
1558
1559	l2tp_session_dec_refcount(session);
 
 
 
 
1560}
1561EXPORT_SYMBOL_GPL(l2tp_session_delete);
1562
1563/* We come here whenever a session's send_seq, cookie_len or
1564 * l2specific_type parameters are set.
1565 */
1566void l2tp_session_set_header_len(struct l2tp_session *session, int version)
 
1567{
1568	if (version == L2TP_HDR_VER_2) {
1569		session->hdr_len = 6;
1570		if (session->send_seq)
1571			session->hdr_len += 4;
1572	} else {
1573		session->hdr_len = 4 + session->cookie_len;
1574		session->hdr_len += l2tp_get_l2specific_len(session);
1575		if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1576			session->hdr_len += 4;
1577	}
1578}
1579EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1580
1581struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1582					 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1583{
1584	struct l2tp_session *session;
1585
1586	session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1587	if (session) {
1588		session->magic = L2TP_SESSION_MAGIC;
1589		session->tunnel = tunnel;
1590
1591		session->session_id = session_id;
1592		session->peer_session_id = peer_session_id;
1593		session->nr = 0;
1594		if (tunnel->version == L2TP_HDR_VER_2)
1595			session->nr_max = 0xffff;
1596		else
1597			session->nr_max = 0xffffff;
1598		session->nr_window_size = session->nr_max / 2;
1599		session->nr_oos_count_max = 4;
1600
1601		/* Use NR of first received packet */
1602		session->reorder_skip = 1;
1603
1604		sprintf(&session->name[0], "sess %u/%u",
1605			tunnel->tunnel_id, session->session_id);
1606
1607		skb_queue_head_init(&session->reorder_q);
1608
 
1609		INIT_HLIST_NODE(&session->hlist);
1610		INIT_HLIST_NODE(&session->global_hlist);
 
 
1611
1612		if (cfg) {
1613			session->pwtype = cfg->pw_type;
1614			session->send_seq = cfg->send_seq;
1615			session->recv_seq = cfg->recv_seq;
1616			session->lns_mode = cfg->lns_mode;
1617			session->reorder_timeout = cfg->reorder_timeout;
1618			session->l2specific_type = cfg->l2specific_type;
1619			session->cookie_len = cfg->cookie_len;
1620			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1621			session->peer_cookie_len = cfg->peer_cookie_len;
1622			memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1623		}
1624
1625		l2tp_session_set_header_len(session, tunnel->version);
1626
1627		refcount_set(&session->ref_count, 1);
1628
1629		return session;
1630	}
1631
1632	return ERR_PTR(-ENOMEM);
1633}
1634EXPORT_SYMBOL_GPL(l2tp_session_create);
1635
1636/*****************************************************************************
1637 * Init and cleanup
1638 *****************************************************************************/
1639
1640static __net_init int l2tp_init_net(struct net *net)
1641{
1642	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1643	int hash;
1644
1645	idr_init(&pn->l2tp_tunnel_idr);
1646	spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1647
1648	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1649		INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1650
1651	spin_lock_init(&pn->l2tp_session_hlist_lock);
1652
1653	return 0;
1654}
1655
1656static __net_exit void l2tp_exit_net(struct net *net)
1657{
1658	struct l2tp_net *pn = l2tp_pernet(net);
1659	struct l2tp_tunnel *tunnel = NULL;
1660	unsigned long tunnel_id, tmp;
1661	int hash;
1662
1663	rcu_read_lock_bh();
1664	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1665		if (tunnel)
1666			l2tp_tunnel_delete(tunnel);
1667	}
1668	rcu_read_unlock_bh();
1669
1670	if (l2tp_wq)
1671		flush_workqueue(l2tp_wq);
1672	rcu_barrier();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1673
1674	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1675		WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1676	idr_destroy(&pn->l2tp_tunnel_idr);
1677}
1678
1679static struct pernet_operations l2tp_net_ops = {
1680	.init = l2tp_init_net,
1681	.exit = l2tp_exit_net,
 
1682	.id   = &l2tp_net_id,
1683	.size = sizeof(struct l2tp_net),
1684};
1685
1686static int __init l2tp_init(void)
1687{
1688	int rc = 0;
1689
1690	rc = register_pernet_device(&l2tp_net_ops);
1691	if (rc)
1692		goto out;
1693
1694	l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1695	if (!l2tp_wq) {
1696		pr_err("alloc_workqueue failed\n");
1697		unregister_pernet_device(&l2tp_net_ops);
1698		rc = -ENOMEM;
1699		goto out;
1700	}
1701
1702	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1703
1704out:
1705	return rc;
1706}
1707
1708static void __exit l2tp_exit(void)
1709{
1710	unregister_pernet_device(&l2tp_net_ops);
1711	if (l2tp_wq) {
1712		destroy_workqueue(l2tp_wq);
1713		l2tp_wq = NULL;
1714	}
1715}
1716
1717module_init(l2tp_init);
1718module_exit(l2tp_exit);
1719
1720MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1721MODULE_DESCRIPTION("L2TP core");
1722MODULE_LICENSE("GPL");
1723MODULE_VERSION(L2TP_DRV_VERSION);