Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Generic PPP layer for Linux.
   4 *
   5 * Copyright 1999-2002 Paul Mackerras.
   6 *
   7 * The generic PPP layer handles the PPP network interfaces, the
   8 * /dev/ppp device, packet and VJ compression, and multilink.
   9 * It talks to PPP `channels' via the interface defined in
  10 * include/linux/ppp_channel.h.  Channels provide the basic means for
  11 * sending and receiving PPP frames on some kind of communications
  12 * channel.
  13 *
  14 * Part of the code in this driver was inspired by the old async-only
  15 * PPP driver, written by Michael Callahan and Al Longyear, and
  16 * subsequently hacked by Paul Mackerras.
  17 *
  18 * ==FILEVERSION 20041108==
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/kernel.h>
  23#include <linux/sched/signal.h>
  24#include <linux/kmod.h>
  25#include <linux/init.h>
  26#include <linux/list.h>
  27#include <linux/idr.h>
  28#include <linux/netdevice.h>
  29#include <linux/poll.h>
  30#include <linux/ppp_defs.h>
  31#include <linux/filter.h>
  32#include <linux/ppp-ioctl.h>
  33#include <linux/ppp_channel.h>
  34#include <linux/ppp-comp.h>
  35#include <linux/skbuff.h>
  36#include <linux/rtnetlink.h>
  37#include <linux/if_arp.h>
  38#include <linux/ip.h>
  39#include <linux/tcp.h>
  40#include <linux/spinlock.h>
  41#include <linux/rwsem.h>
  42#include <linux/stddef.h>
  43#include <linux/device.h>
  44#include <linux/mutex.h>
  45#include <linux/slab.h>
  46#include <linux/file.h>
  47#include <linux/unaligned.h>
  48#include <net/slhc_vj.h>
  49#include <linux/atomic.h>
  50#include <linux/refcount.h>
  51
  52#include <linux/nsproxy.h>
  53#include <net/net_namespace.h>
  54#include <net/netns/generic.h>
  55
  56#define PPP_VERSION	"2.4.2"
  57
  58/*
  59 * Network protocols we support.
  60 */
  61#define NP_IP	0		/* Internet Protocol V4 */
  62#define NP_IPV6	1		/* Internet Protocol V6 */
  63#define NP_IPX	2		/* IPX protocol */
  64#define NP_AT	3		/* Appletalk protocol */
  65#define NP_MPLS_UC 4		/* MPLS unicast */
  66#define NP_MPLS_MC 5		/* MPLS multicast */
  67#define NUM_NP	6		/* Number of NPs. */
  68
  69#define MPHDRLEN	6	/* multilink protocol header length */
  70#define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
  71
  72#define PPP_PROTO_LEN	2
  73#define PPP_LCP_HDRLEN	4
  74
  75/* The filter instructions generated by libpcap are constructed
  76 * assuming a four-byte PPP header on each packet, where the last
  77 * 2 bytes are the protocol field defined in the RFC and the first
  78 * byte of the first 2 bytes indicates the direction.
  79 * The second byte is currently unused, but we still need to initialize
  80 * it to prevent crafted BPF programs from reading them which would
  81 * cause reading of uninitialized data.
  82 */
  83#define PPP_FILTER_OUTBOUND_TAG 0x0100
  84#define PPP_FILTER_INBOUND_TAG  0x0000
  85
  86/*
  87 * An instance of /dev/ppp can be associated with either a ppp
  88 * interface unit or a ppp channel.  In both cases, file->private_data
  89 * points to one of these.
  90 */
  91struct ppp_file {
  92	enum {
  93		INTERFACE=1, CHANNEL
  94	}		kind;
  95	struct sk_buff_head xq;		/* pppd transmit queue */
  96	struct sk_buff_head rq;		/* receive queue for pppd */
  97	wait_queue_head_t rwait;	/* for poll on reading /dev/ppp */
  98	refcount_t	refcnt;		/* # refs (incl /dev/ppp attached) */
  99	int		hdrlen;		/* space to leave for headers */
 100	int		index;		/* interface unit / channel number */
 101	int		dead;		/* unit/channel has been shut down */
 102};
 103
 104#define PF_TO_X(pf, X)		container_of(pf, X, file)
 105
 106#define PF_TO_PPP(pf)		PF_TO_X(pf, struct ppp)
 107#define PF_TO_CHANNEL(pf)	PF_TO_X(pf, struct channel)
 108
 109/*
 110 * Data structure to hold primary network stats for which
 111 * we want to use 64 bit storage.  Other network stats
 112 * are stored in dev->stats of the ppp strucute.
 113 */
 114struct ppp_link_stats {
 115	u64 rx_packets;
 116	u64 tx_packets;
 117	u64 rx_bytes;
 118	u64 tx_bytes;
 119};
 120
 121/*
 122 * Data structure describing one ppp unit.
 123 * A ppp unit corresponds to a ppp network interface device
 124 * and represents a multilink bundle.
 125 * It can have 0 or more ppp channels connected to it.
 126 */
 127struct ppp {
 128	struct ppp_file	file;		/* stuff for read/write/poll 0 */
 129	struct file	*owner;		/* file that owns this unit 48 */
 130	struct list_head channels;	/* list of attached channels 4c */
 131	int		n_channels;	/* how many channels are attached 54 */
 132	spinlock_t	rlock;		/* lock for receive side 58 */
 133	spinlock_t	wlock;		/* lock for transmit side 5c */
 134	int __percpu	*xmit_recursion; /* xmit recursion detect */
 135	int		mru;		/* max receive unit 60 */
 136	unsigned int	flags;		/* control bits 64 */
 137	unsigned int	xstate;		/* transmit state bits 68 */
 138	unsigned int	rstate;		/* receive state bits 6c */
 139	int		debug;		/* debug flags 70 */
 140	struct slcompress *vj;		/* state for VJ header compression */
 141	enum NPmode	npmode[NUM_NP];	/* what to do with each net proto 78 */
 142	struct sk_buff	*xmit_pending;	/* a packet ready to go out 88 */
 143	struct compressor *xcomp;	/* transmit packet compressor 8c */
 144	void		*xc_state;	/* its internal state 90 */
 145	struct compressor *rcomp;	/* receive decompressor 94 */
 146	void		*rc_state;	/* its internal state 98 */
 147	unsigned long	last_xmit;	/* jiffies when last pkt sent 9c */
 148	unsigned long	last_recv;	/* jiffies when last pkt rcvd a0 */
 149	struct net_device *dev;		/* network interface device a4 */
 150	int		closing;	/* is device closing down? a8 */
 151#ifdef CONFIG_PPP_MULTILINK
 152	int		nxchan;		/* next channel to send something on */
 153	u32		nxseq;		/* next sequence number to send */
 154	int		mrru;		/* MP: max reconst. receive unit */
 155	u32		nextseq;	/* MP: seq no of next packet */
 156	u32		minseq;		/* MP: min of most recent seqnos */
 157	struct sk_buff_head mrq;	/* MP: receive reconstruction queue */
 158#endif /* CONFIG_PPP_MULTILINK */
 159#ifdef CONFIG_PPP_FILTER
 160	struct bpf_prog *pass_filter;	/* filter for packets to pass */
 161	struct bpf_prog *active_filter; /* filter for pkts to reset idle */
 162#endif /* CONFIG_PPP_FILTER */
 163	struct net	*ppp_net;	/* the net we belong to */
 164	struct ppp_link_stats stats64;	/* 64 bit network stats */
 165};
 166
 167/*
 168 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
 169 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
 170 * SC_MUST_COMP
 171 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
 172 * Bits in xstate: SC_COMP_RUN
 173 */
 174#define SC_FLAG_BITS	(SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
 175			 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
 176			 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
 177
 178/*
 179 * Private data structure for each channel.
 180 * This includes the data structure used for multilink.
 181 */
 182struct channel {
 183	struct ppp_file	file;		/* stuff for read/write/poll */
 184	struct list_head list;		/* link in all/new_channels list */
 185	struct ppp_channel *chan;	/* public channel data structure */
 186	struct rw_semaphore chan_sem;	/* protects `chan' during chan ioctl */
 187	spinlock_t	downl;		/* protects `chan', file.xq dequeue */
 188	struct ppp	*ppp;		/* ppp unit we're connected to */
 189	struct net	*chan_net;	/* the net channel belongs to */
 190	netns_tracker	ns_tracker;
 191	struct list_head clist;		/* link in list of channels per unit */
 192	rwlock_t	upl;		/* protects `ppp' and 'bridge' */
 193	struct channel __rcu *bridge;	/* "bridged" ppp channel */
 194#ifdef CONFIG_PPP_MULTILINK
 195	u8		avail;		/* flag used in multilink stuff */
 196	u8		had_frag;	/* >= 1 fragments have been sent */
 197	u32		lastseq;	/* MP: last sequence # received */
 198	int		speed;		/* speed of the corresponding ppp channel*/
 199#endif /* CONFIG_PPP_MULTILINK */
 200};
 201
 202struct ppp_config {
 203	struct file *file;
 204	s32 unit;
 205	bool ifname_is_set;
 206};
 207
 208/*
 209 * SMP locking issues:
 210 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
 211 * list and the ppp.n_channels field, you need to take both locks
 212 * before you modify them.
 213 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
 214 * channel.downl.
 215 */
 216
 217static DEFINE_MUTEX(ppp_mutex);
 218static atomic_t ppp_unit_count = ATOMIC_INIT(0);
 219static atomic_t channel_count = ATOMIC_INIT(0);
 220
 221/* per-net private data for this module */
 222static unsigned int ppp_net_id __read_mostly;
 223struct ppp_net {
 224	/* units to ppp mapping */
 225	struct idr units_idr;
 226
 227	/*
 228	 * all_ppp_mutex protects the units_idr mapping.
 229	 * It also ensures that finding a ppp unit in the units_idr
 230	 * map and updating its file.refcnt field is atomic.
 231	 */
 232	struct mutex all_ppp_mutex;
 233
 234	/* channels */
 235	struct list_head all_channels;
 236	struct list_head new_channels;
 237	int last_channel_index;
 238
 239	/*
 240	 * all_channels_lock protects all_channels and
 241	 * last_channel_index, and the atomicity of find
 242	 * a channel and updating its file.refcnt field.
 243	 */
 244	spinlock_t all_channels_lock;
 245};
 246
 247/* Get the PPP protocol number from a skb */
 248#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
 249
 250/* We limit the length of ppp->file.rq to this (arbitrary) value */
 251#define PPP_MAX_RQLEN	32
 252
 253/*
 254 * Maximum number of multilink fragments queued up.
 255 * This has to be large enough to cope with the maximum latency of
 256 * the slowest channel relative to the others.  Strictly it should
 257 * depend on the number of channels and their characteristics.
 258 */
 259#define PPP_MP_MAX_QLEN	128
 260
 261/* Multilink header bits. */
 262#define B	0x80		/* this fragment begins a packet */
 263#define E	0x40		/* this fragment ends a packet */
 264
 265/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
 266#define seq_before(a, b)	((s32)((a) - (b)) < 0)
 267#define seq_after(a, b)		((s32)((a) - (b)) > 0)
 268
 269/* Prototypes. */
 270static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 271			struct file *file, unsigned int cmd, unsigned long arg);
 272static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
 273static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
 274static void ppp_push(struct ppp *ppp);
 275static void ppp_channel_push(struct channel *pch);
 276static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
 277			      struct channel *pch);
 278static void ppp_receive_error(struct ppp *ppp);
 279static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
 280static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
 281					    struct sk_buff *skb);
 282#ifdef CONFIG_PPP_MULTILINK
 283static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
 284				struct channel *pch);
 285static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
 286static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
 287static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
 288#endif /* CONFIG_PPP_MULTILINK */
 289static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data);
 290static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 291static void ppp_ccp_closed(struct ppp *ppp);
 292static struct compressor *find_compressor(int type);
 293static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
 294static int ppp_create_interface(struct net *net, struct file *file, int *unit);
 295static void init_ppp_file(struct ppp_file *pf, int kind);
 296static void ppp_destroy_interface(struct ppp *ppp);
 297static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 298static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
 299static int ppp_connect_channel(struct channel *pch, int unit);
 300static int ppp_disconnect_channel(struct channel *pch);
 301static void ppp_destroy_channel(struct channel *pch);
 302static int unit_get(struct idr *p, void *ptr, int min);
 303static int unit_set(struct idr *p, void *ptr, int n);
 304static void unit_put(struct idr *p, int n);
 305static void *unit_find(struct idr *p, int n);
 306static void ppp_setup(struct net_device *dev);
 307
 308static const struct net_device_ops ppp_netdev_ops;
 309
 310static const struct class ppp_class = {
 311	.name = "ppp",
 312};
 313
 314/* per net-namespace data */
 315static inline struct ppp_net *ppp_pernet(struct net *net)
 316{
 317	return net_generic(net, ppp_net_id);
 318}
 319
 320/* Translates a PPP protocol number to a NP index (NP == network protocol) */
 321static inline int proto_to_npindex(int proto)
 322{
 323	switch (proto) {
 324	case PPP_IP:
 325		return NP_IP;
 326	case PPP_IPV6:
 327		return NP_IPV6;
 328	case PPP_IPX:
 329		return NP_IPX;
 330	case PPP_AT:
 331		return NP_AT;
 332	case PPP_MPLS_UC:
 333		return NP_MPLS_UC;
 334	case PPP_MPLS_MC:
 335		return NP_MPLS_MC;
 336	}
 337	return -EINVAL;
 338}
 339
 340/* Translates an NP index into a PPP protocol number */
 341static const int npindex_to_proto[NUM_NP] = {
 342	PPP_IP,
 343	PPP_IPV6,
 344	PPP_IPX,
 345	PPP_AT,
 346	PPP_MPLS_UC,
 347	PPP_MPLS_MC,
 348};
 349
 350/* Translates an ethertype into an NP index */
 351static inline int ethertype_to_npindex(int ethertype)
 352{
 353	switch (ethertype) {
 354	case ETH_P_IP:
 355		return NP_IP;
 356	case ETH_P_IPV6:
 357		return NP_IPV6;
 358	case ETH_P_IPX:
 359		return NP_IPX;
 360	case ETH_P_PPPTALK:
 361	case ETH_P_ATALK:
 362		return NP_AT;
 363	case ETH_P_MPLS_UC:
 364		return NP_MPLS_UC;
 365	case ETH_P_MPLS_MC:
 366		return NP_MPLS_MC;
 367	}
 368	return -1;
 369}
 370
 371/* Translates an NP index into an ethertype */
 372static const int npindex_to_ethertype[NUM_NP] = {
 373	ETH_P_IP,
 374	ETH_P_IPV6,
 375	ETH_P_IPX,
 376	ETH_P_PPPTALK,
 377	ETH_P_MPLS_UC,
 378	ETH_P_MPLS_MC,
 379};
 380
 381/*
 382 * Locking shorthand.
 383 */
 384#define ppp_xmit_lock(ppp)	spin_lock_bh(&(ppp)->wlock)
 385#define ppp_xmit_unlock(ppp)	spin_unlock_bh(&(ppp)->wlock)
 386#define ppp_recv_lock(ppp)	spin_lock_bh(&(ppp)->rlock)
 387#define ppp_recv_unlock(ppp)	spin_unlock_bh(&(ppp)->rlock)
 388#define ppp_lock(ppp)		do { ppp_xmit_lock(ppp); \
 389				     ppp_recv_lock(ppp); } while (0)
 390#define ppp_unlock(ppp)		do { ppp_recv_unlock(ppp); \
 391				     ppp_xmit_unlock(ppp); } while (0)
 392
 393/*
 394 * /dev/ppp device routines.
 395 * The /dev/ppp device is used by pppd to control the ppp unit.
 396 * It supports the read, write, ioctl and poll functions.
 397 * Open instances of /dev/ppp can be in one of three states:
 398 * unattached, attached to a ppp unit, or attached to a ppp channel.
 399 */
 400static int ppp_open(struct inode *inode, struct file *file)
 401{
 402	/*
 403	 * This could (should?) be enforced by the permissions on /dev/ppp.
 404	 */
 405	if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN))
 406		return -EPERM;
 407	return 0;
 408}
 409
 410static int ppp_release(struct inode *unused, struct file *file)
 411{
 412	struct ppp_file *pf = file->private_data;
 413	struct ppp *ppp;
 414
 415	if (pf) {
 416		file->private_data = NULL;
 417		if (pf->kind == INTERFACE) {
 418			ppp = PF_TO_PPP(pf);
 419			rtnl_lock();
 420			if (file == ppp->owner)
 421				unregister_netdevice(ppp->dev);
 422			rtnl_unlock();
 423		}
 424		if (refcount_dec_and_test(&pf->refcnt)) {
 425			switch (pf->kind) {
 426			case INTERFACE:
 427				ppp_destroy_interface(PF_TO_PPP(pf));
 428				break;
 429			case CHANNEL:
 430				ppp_destroy_channel(PF_TO_CHANNEL(pf));
 431				break;
 432			}
 433		}
 434	}
 435	return 0;
 436}
 437
 438static ssize_t ppp_read(struct file *file, char __user *buf,
 439			size_t count, loff_t *ppos)
 440{
 441	struct ppp_file *pf = file->private_data;
 442	DECLARE_WAITQUEUE(wait, current);
 443	ssize_t ret;
 444	struct sk_buff *skb = NULL;
 445	struct iovec iov;
 446	struct iov_iter to;
 447
 448	ret = count;
 449
 450	if (!pf)
 451		return -ENXIO;
 452	add_wait_queue(&pf->rwait, &wait);
 453	for (;;) {
 454		set_current_state(TASK_INTERRUPTIBLE);
 455		skb = skb_dequeue(&pf->rq);
 456		if (skb)
 457			break;
 458		ret = 0;
 459		if (pf->dead)
 460			break;
 461		if (pf->kind == INTERFACE) {
 462			/*
 463			 * Return 0 (EOF) on an interface that has no
 464			 * channels connected, unless it is looping
 465			 * network traffic (demand mode).
 466			 */
 467			struct ppp *ppp = PF_TO_PPP(pf);
 468
 469			ppp_recv_lock(ppp);
 470			if (ppp->n_channels == 0 &&
 471			    (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
 472				ppp_recv_unlock(ppp);
 473				break;
 474			}
 475			ppp_recv_unlock(ppp);
 476		}
 477		ret = -EAGAIN;
 478		if (file->f_flags & O_NONBLOCK)
 479			break;
 480		ret = -ERESTARTSYS;
 481		if (signal_pending(current))
 482			break;
 483		schedule();
 484	}
 485	set_current_state(TASK_RUNNING);
 486	remove_wait_queue(&pf->rwait, &wait);
 487
 488	if (!skb)
 489		goto out;
 490
 491	ret = -EOVERFLOW;
 492	if (skb->len > count)
 493		goto outf;
 494	ret = -EFAULT;
 495	iov.iov_base = buf;
 496	iov.iov_len = count;
 497	iov_iter_init(&to, ITER_DEST, &iov, 1, count);
 498	if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
 499		goto outf;
 500	ret = skb->len;
 501
 502 outf:
 503	kfree_skb(skb);
 504 out:
 505	return ret;
 506}
 507
 508static bool ppp_check_packet(struct sk_buff *skb, size_t count)
 509{
 510	/* LCP packets must include LCP header which 4 bytes long:
 511	 * 1-byte code, 1-byte identifier, and 2-byte length.
 512	 */
 513	return get_unaligned_be16(skb->data) != PPP_LCP ||
 514		count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
 515}
 516
 517static ssize_t ppp_write(struct file *file, const char __user *buf,
 518			 size_t count, loff_t *ppos)
 519{
 520	struct ppp_file *pf = file->private_data;
 521	struct sk_buff *skb;
 522	ssize_t ret;
 523
 524	if (!pf)
 525		return -ENXIO;
 526	/* All PPP packets should start with the 2-byte protocol */
 527	if (count < PPP_PROTO_LEN)
 528		return -EINVAL;
 529	ret = -ENOMEM;
 530	skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
 531	if (!skb)
 532		goto out;
 533	skb_reserve(skb, pf->hdrlen);
 534	ret = -EFAULT;
 535	if (copy_from_user(skb_put(skb, count), buf, count)) {
 536		kfree_skb(skb);
 537		goto out;
 538	}
 539	ret = -EINVAL;
 540	if (unlikely(!ppp_check_packet(skb, count))) {
 541		kfree_skb(skb);
 542		goto out;
 543	}
 544
 545	switch (pf->kind) {
 546	case INTERFACE:
 547		ppp_xmit_process(PF_TO_PPP(pf), skb);
 548		break;
 549	case CHANNEL:
 550		skb_queue_tail(&pf->xq, skb);
 551		ppp_channel_push(PF_TO_CHANNEL(pf));
 552		break;
 553	}
 554
 555	ret = count;
 556
 557 out:
 558	return ret;
 559}
 560
 561/* No kernel lock - fine */
 562static __poll_t ppp_poll(struct file *file, poll_table *wait)
 563{
 564	struct ppp_file *pf = file->private_data;
 565	__poll_t mask;
 566
 567	if (!pf)
 568		return 0;
 569	poll_wait(file, &pf->rwait, wait);
 570	mask = EPOLLOUT | EPOLLWRNORM;
 571	if (skb_peek(&pf->rq))
 572		mask |= EPOLLIN | EPOLLRDNORM;
 573	if (pf->dead)
 574		mask |= EPOLLHUP;
 575	else if (pf->kind == INTERFACE) {
 576		/* see comment in ppp_read */
 577		struct ppp *ppp = PF_TO_PPP(pf);
 578
 579		ppp_recv_lock(ppp);
 580		if (ppp->n_channels == 0 &&
 581		    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 582			mask |= EPOLLIN | EPOLLRDNORM;
 583		ppp_recv_unlock(ppp);
 584	}
 585
 586	return mask;
 587}
 588
 589#ifdef CONFIG_PPP_FILTER
 590static struct bpf_prog *get_filter(struct sock_fprog *uprog)
 591{
 592	struct sock_fprog_kern fprog;
 593	struct bpf_prog *res = NULL;
 594	int err;
 595
 596	if (!uprog->len)
 597		return NULL;
 598
 599	/* uprog->len is unsigned short, so no overflow here */
 600	fprog.len = uprog->len;
 601	fprog.filter = memdup_array_user(uprog->filter,
 602					 uprog->len, sizeof(struct sock_filter));
 603	if (IS_ERR(fprog.filter))
 604		return ERR_CAST(fprog.filter);
 605
 606	err = bpf_prog_create(&res, &fprog);
 607	kfree(fprog.filter);
 608
 609	return err ? ERR_PTR(err) : res;
 610}
 611
 612static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p)
 613{
 614	struct sock_fprog uprog;
 615
 616	if (copy_from_user(&uprog, p, sizeof(struct sock_fprog)))
 617		return ERR_PTR(-EFAULT);
 618	return get_filter(&uprog);
 619}
 620
 621#ifdef CONFIG_COMPAT
 622struct sock_fprog32 {
 623	unsigned short len;
 624	compat_caddr_t filter;
 625};
 626
 627#define PPPIOCSPASS32		_IOW('t', 71, struct sock_fprog32)
 628#define PPPIOCSACTIVE32		_IOW('t', 70, struct sock_fprog32)
 629
 630static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
 631{
 632	struct sock_fprog32 uprog32;
 633	struct sock_fprog uprog;
 634
 635	if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32)))
 636		return ERR_PTR(-EFAULT);
 637	uprog.len = uprog32.len;
 638	uprog.filter = compat_ptr(uprog32.filter);
 639	return get_filter(&uprog);
 640}
 641#endif
 642#endif
 643
 644/* Bridge one PPP channel to another.
 645 * When two channels are bridged, ppp_input on one channel is redirected to
 646 * the other's ops->start_xmit handler.
 647 * In order to safely bridge channels we must reject channels which are already
 648 * part of a bridge instance, or which form part of an existing unit.
 649 * Once successfully bridged, each channel holds a reference on the other
 650 * to prevent it being freed while the bridge is extant.
 651 */
 652static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
 653{
 654	write_lock_bh(&pch->upl);
 655	if (pch->ppp ||
 656	    rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
 657		write_unlock_bh(&pch->upl);
 658		return -EALREADY;
 659	}
 660	refcount_inc(&pchb->file.refcnt);
 661	rcu_assign_pointer(pch->bridge, pchb);
 662	write_unlock_bh(&pch->upl);
 663
 664	write_lock_bh(&pchb->upl);
 665	if (pchb->ppp ||
 666	    rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) {
 667		write_unlock_bh(&pchb->upl);
 668		goto err_unset;
 669	}
 670	refcount_inc(&pch->file.refcnt);
 671	rcu_assign_pointer(pchb->bridge, pch);
 672	write_unlock_bh(&pchb->upl);
 673
 674	return 0;
 675
 676err_unset:
 677	write_lock_bh(&pch->upl);
 678	/* Re-read pch->bridge with upl held in case it was modified concurrently */
 679	pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
 680	RCU_INIT_POINTER(pch->bridge, NULL);
 681	write_unlock_bh(&pch->upl);
 682	synchronize_rcu();
 683
 684	if (pchb)
 685		if (refcount_dec_and_test(&pchb->file.refcnt))
 686			ppp_destroy_channel(pchb);
 687
 688	return -EALREADY;
 689}
 690
 691static int ppp_unbridge_channels(struct channel *pch)
 692{
 693	struct channel *pchb, *pchbb;
 694
 695	write_lock_bh(&pch->upl);
 696	pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
 697	if (!pchb) {
 698		write_unlock_bh(&pch->upl);
 699		return -EINVAL;
 700	}
 701	RCU_INIT_POINTER(pch->bridge, NULL);
 702	write_unlock_bh(&pch->upl);
 703
 704	/* Only modify pchb if phcb->bridge points back to pch.
 705	 * If not, it implies that there has been a race unbridging (and possibly
 706	 * even rebridging) pchb.  We should leave pchb alone to avoid either a
 707	 * refcount underflow, or breaking another established bridge instance.
 708	 */
 709	write_lock_bh(&pchb->upl);
 710	pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl));
 711	if (pchbb == pch)
 712		RCU_INIT_POINTER(pchb->bridge, NULL);
 713	write_unlock_bh(&pchb->upl);
 714
 715	synchronize_rcu();
 716
 717	if (pchbb == pch)
 718		if (refcount_dec_and_test(&pch->file.refcnt))
 719			ppp_destroy_channel(pch);
 720
 721	if (refcount_dec_and_test(&pchb->file.refcnt))
 722		ppp_destroy_channel(pchb);
 723
 724	return 0;
 725}
 726
 727static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 728{
 729	struct ppp_file *pf;
 730	struct ppp *ppp;
 731	int err = -EFAULT, val, val2, i;
 732	struct ppp_idle32 idle32;
 733	struct ppp_idle64 idle64;
 734	struct npioctl npi;
 735	int unit, cflags;
 736	struct slcompress *vj;
 737	void __user *argp = (void __user *)arg;
 738	int __user *p = argp;
 739
 740	mutex_lock(&ppp_mutex);
 741
 742	pf = file->private_data;
 743	if (!pf) {
 744		err = ppp_unattached_ioctl(current->nsproxy->net_ns,
 745					   pf, file, cmd, arg);
 746		goto out;
 747	}
 748
 749	if (cmd == PPPIOCDETACH) {
 750		/*
 751		 * PPPIOCDETACH is no longer supported as it was heavily broken,
 752		 * and is only known to have been used by pppd older than
 753		 * ppp-2.4.2 (released November 2003).
 754		 */
 755		pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
 756			     current->comm, current->pid);
 757		err = -EINVAL;
 758		goto out;
 759	}
 760
 761	if (pf->kind == CHANNEL) {
 762		struct channel *pch, *pchb;
 763		struct ppp_channel *chan;
 764		struct ppp_net *pn;
 765
 766		pch = PF_TO_CHANNEL(pf);
 767
 768		switch (cmd) {
 769		case PPPIOCCONNECT:
 770			if (get_user(unit, p))
 771				break;
 772			err = ppp_connect_channel(pch, unit);
 773			break;
 774
 775		case PPPIOCDISCONN:
 776			err = ppp_disconnect_channel(pch);
 777			break;
 778
 779		case PPPIOCBRIDGECHAN:
 780			if (get_user(unit, p))
 781				break;
 782			err = -ENXIO;
 783			pn = ppp_pernet(current->nsproxy->net_ns);
 784			spin_lock_bh(&pn->all_channels_lock);
 785			pchb = ppp_find_channel(pn, unit);
 786			/* Hold a reference to prevent pchb being freed while
 787			 * we establish the bridge.
 788			 */
 789			if (pchb)
 790				refcount_inc(&pchb->file.refcnt);
 791			spin_unlock_bh(&pn->all_channels_lock);
 792			if (!pchb)
 793				break;
 794			err = ppp_bridge_channels(pch, pchb);
 795			/* Drop earlier refcount now bridge establishment is complete */
 796			if (refcount_dec_and_test(&pchb->file.refcnt))
 797				ppp_destroy_channel(pchb);
 798			break;
 799
 800		case PPPIOCUNBRIDGECHAN:
 801			err = ppp_unbridge_channels(pch);
 802			break;
 803
 804		default:
 805			down_read(&pch->chan_sem);
 806			chan = pch->chan;
 807			err = -ENOTTY;
 808			if (chan && chan->ops->ioctl)
 809				err = chan->ops->ioctl(chan, cmd, arg);
 810			up_read(&pch->chan_sem);
 811		}
 812		goto out;
 813	}
 814
 815	if (pf->kind != INTERFACE) {
 816		/* can't happen */
 817		pr_err("PPP: not interface or channel??\n");
 818		err = -EINVAL;
 819		goto out;
 820	}
 821
 822	ppp = PF_TO_PPP(pf);
 823	switch (cmd) {
 824	case PPPIOCSMRU:
 825		if (get_user(val, p))
 826			break;
 827		ppp->mru = val;
 828		err = 0;
 829		break;
 830
 831	case PPPIOCSFLAGS:
 832		if (get_user(val, p))
 833			break;
 834		ppp_lock(ppp);
 835		cflags = ppp->flags & ~val;
 836#ifdef CONFIG_PPP_MULTILINK
 837		if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
 838			ppp->nextseq = 0;
 839#endif
 840		ppp->flags = val & SC_FLAG_BITS;
 841		ppp_unlock(ppp);
 842		if (cflags & SC_CCP_OPEN)
 843			ppp_ccp_closed(ppp);
 844		err = 0;
 845		break;
 846
 847	case PPPIOCGFLAGS:
 848		val = ppp->flags | ppp->xstate | ppp->rstate;
 849		if (put_user(val, p))
 850			break;
 851		err = 0;
 852		break;
 853
 854	case PPPIOCSCOMPRESS:
 855	{
 856		struct ppp_option_data data;
 857		if (copy_from_user(&data, argp, sizeof(data)))
 858			err = -EFAULT;
 859		else
 860			err = ppp_set_compress(ppp, &data);
 861		break;
 862	}
 863	case PPPIOCGUNIT:
 864		if (put_user(ppp->file.index, p))
 865			break;
 866		err = 0;
 867		break;
 868
 869	case PPPIOCSDEBUG:
 870		if (get_user(val, p))
 871			break;
 872		ppp->debug = val;
 873		err = 0;
 874		break;
 875
 876	case PPPIOCGDEBUG:
 877		if (put_user(ppp->debug, p))
 878			break;
 879		err = 0;
 880		break;
 881
 882	case PPPIOCGIDLE32:
 883                idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
 884                idle32.recv_idle = (jiffies - ppp->last_recv) / HZ;
 885                if (copy_to_user(argp, &idle32, sizeof(idle32)))
 886			break;
 887		err = 0;
 888		break;
 889
 890	case PPPIOCGIDLE64:
 891		idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
 892		idle64.recv_idle = (jiffies - ppp->last_recv) / HZ;
 893		if (copy_to_user(argp, &idle64, sizeof(idle64)))
 894			break;
 895		err = 0;
 896		break;
 897
 898	case PPPIOCSMAXCID:
 899		if (get_user(val, p))
 900			break;
 901		val2 = 15;
 902		if ((val >> 16) != 0) {
 903			val2 = val >> 16;
 904			val &= 0xffff;
 905		}
 906		vj = slhc_init(val2+1, val+1);
 907		if (IS_ERR(vj)) {
 908			err = PTR_ERR(vj);
 909			break;
 910		}
 911		ppp_lock(ppp);
 912		if (ppp->vj)
 913			slhc_free(ppp->vj);
 914		ppp->vj = vj;
 915		ppp_unlock(ppp);
 916		err = 0;
 917		break;
 918
 919	case PPPIOCGNPMODE:
 920	case PPPIOCSNPMODE:
 921		if (copy_from_user(&npi, argp, sizeof(npi)))
 922			break;
 923		err = proto_to_npindex(npi.protocol);
 924		if (err < 0)
 925			break;
 926		i = err;
 927		if (cmd == PPPIOCGNPMODE) {
 928			err = -EFAULT;
 929			npi.mode = ppp->npmode[i];
 930			if (copy_to_user(argp, &npi, sizeof(npi)))
 931				break;
 932		} else {
 933			ppp->npmode[i] = npi.mode;
 934			/* we may be able to transmit more packets now (??) */
 935			netif_wake_queue(ppp->dev);
 936		}
 937		err = 0;
 938		break;
 939
 940#ifdef CONFIG_PPP_FILTER
 941	case PPPIOCSPASS:
 942	case PPPIOCSACTIVE:
 943	{
 944		struct bpf_prog *filter = ppp_get_filter(argp);
 945		struct bpf_prog **which;
 946
 947		if (IS_ERR(filter)) {
 948			err = PTR_ERR(filter);
 949			break;
 950		}
 951		if (cmd == PPPIOCSPASS)
 952			which = &ppp->pass_filter;
 953		else
 954			which = &ppp->active_filter;
 955		ppp_lock(ppp);
 956		if (*which)
 957			bpf_prog_destroy(*which);
 958		*which = filter;
 959		ppp_unlock(ppp);
 960		err = 0;
 961		break;
 962	}
 963#endif /* CONFIG_PPP_FILTER */
 964
 965#ifdef CONFIG_PPP_MULTILINK
 966	case PPPIOCSMRRU:
 967		if (get_user(val, p))
 968			break;
 969		ppp_recv_lock(ppp);
 970		ppp->mrru = val;
 971		ppp_recv_unlock(ppp);
 972		err = 0;
 973		break;
 974#endif /* CONFIG_PPP_MULTILINK */
 975
 976	default:
 977		err = -ENOTTY;
 978	}
 979
 980out:
 981	mutex_unlock(&ppp_mutex);
 982
 983	return err;
 984}
 985
 986#ifdef CONFIG_COMPAT
 987struct ppp_option_data32 {
 988	compat_uptr_t		ptr;
 989	u32			length;
 990	compat_int_t		transmit;
 991};
 992#define PPPIOCSCOMPRESS32	_IOW('t', 77, struct ppp_option_data32)
 993
 994static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 995{
 996	struct ppp_file *pf;
 997	int err = -ENOIOCTLCMD;
 998	void __user *argp = (void __user *)arg;
 999
1000	mutex_lock(&ppp_mutex);
1001
1002	pf = file->private_data;
1003	if (pf && pf->kind == INTERFACE) {
1004		struct ppp *ppp = PF_TO_PPP(pf);
1005		switch (cmd) {
1006#ifdef CONFIG_PPP_FILTER
1007		case PPPIOCSPASS32:
1008		case PPPIOCSACTIVE32:
1009		{
1010			struct bpf_prog *filter = compat_ppp_get_filter(argp);
1011			struct bpf_prog **which;
1012
1013			if (IS_ERR(filter)) {
1014				err = PTR_ERR(filter);
1015				break;
1016			}
1017			if (cmd == PPPIOCSPASS32)
1018				which = &ppp->pass_filter;
1019			else
1020				which = &ppp->active_filter;
1021			ppp_lock(ppp);
1022			if (*which)
1023				bpf_prog_destroy(*which);
1024			*which = filter;
1025			ppp_unlock(ppp);
1026			err = 0;
1027			break;
1028		}
1029#endif /* CONFIG_PPP_FILTER */
1030		case PPPIOCSCOMPRESS32:
1031		{
1032			struct ppp_option_data32 data32;
1033			if (copy_from_user(&data32, argp, sizeof(data32))) {
1034				err = -EFAULT;
1035			} else {
1036				struct ppp_option_data data = {
1037					.ptr = compat_ptr(data32.ptr),
1038					.length = data32.length,
1039					.transmit = data32.transmit
1040				};
1041				err = ppp_set_compress(ppp, &data);
1042			}
1043			break;
1044		}
1045		}
1046	}
1047	mutex_unlock(&ppp_mutex);
1048
1049	/* all other commands have compatible arguments */
1050	if (err == -ENOIOCTLCMD)
1051		err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1052
1053	return err;
1054}
1055#endif
1056
1057static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
1058			struct file *file, unsigned int cmd, unsigned long arg)
1059{
1060	int unit, err = -EFAULT;
1061	struct ppp *ppp;
1062	struct channel *chan;
1063	struct ppp_net *pn;
1064	int __user *p = (int __user *)arg;
1065
1066	switch (cmd) {
1067	case PPPIOCNEWUNIT:
1068		/* Create a new ppp unit */
1069		if (get_user(unit, p))
1070			break;
1071		err = ppp_create_interface(net, file, &unit);
1072		if (err < 0)
1073			break;
1074
1075		err = -EFAULT;
1076		if (put_user(unit, p))
1077			break;
1078		err = 0;
1079		break;
1080
1081	case PPPIOCATTACH:
1082		/* Attach to an existing ppp unit */
1083		if (get_user(unit, p))
1084			break;
1085		err = -ENXIO;
1086		pn = ppp_pernet(net);
1087		mutex_lock(&pn->all_ppp_mutex);
1088		ppp = ppp_find_unit(pn, unit);
1089		if (ppp) {
1090			refcount_inc(&ppp->file.refcnt);
1091			file->private_data = &ppp->file;
1092			err = 0;
1093		}
1094		mutex_unlock(&pn->all_ppp_mutex);
1095		break;
1096
1097	case PPPIOCATTCHAN:
1098		if (get_user(unit, p))
1099			break;
1100		err = -ENXIO;
1101		pn = ppp_pernet(net);
1102		spin_lock_bh(&pn->all_channels_lock);
1103		chan = ppp_find_channel(pn, unit);
1104		if (chan) {
1105			refcount_inc(&chan->file.refcnt);
1106			file->private_data = &chan->file;
1107			err = 0;
1108		}
1109		spin_unlock_bh(&pn->all_channels_lock);
1110		break;
1111
1112	default:
1113		err = -ENOTTY;
1114	}
1115
1116	return err;
1117}
1118
1119static const struct file_operations ppp_device_fops = {
1120	.owner		= THIS_MODULE,
1121	.read		= ppp_read,
1122	.write		= ppp_write,
1123	.poll		= ppp_poll,
1124	.unlocked_ioctl	= ppp_ioctl,
1125#ifdef CONFIG_COMPAT
1126	.compat_ioctl	= ppp_compat_ioctl,
1127#endif
1128	.open		= ppp_open,
1129	.release	= ppp_release,
1130	.llseek		= noop_llseek,
1131};
1132
1133static __net_init int ppp_init_net(struct net *net)
1134{
1135	struct ppp_net *pn = net_generic(net, ppp_net_id);
1136
1137	idr_init(&pn->units_idr);
1138	mutex_init(&pn->all_ppp_mutex);
1139
1140	INIT_LIST_HEAD(&pn->all_channels);
1141	INIT_LIST_HEAD(&pn->new_channels);
1142
1143	spin_lock_init(&pn->all_channels_lock);
1144
1145	return 0;
1146}
1147
1148static __net_exit void ppp_exit_net(struct net *net)
1149{
1150	struct ppp_net *pn = net_generic(net, ppp_net_id);
1151	struct net_device *dev;
1152	struct net_device *aux;
1153	struct ppp *ppp;
1154	LIST_HEAD(list);
1155	int id;
1156
1157	rtnl_lock();
1158	for_each_netdev_safe(net, dev, aux) {
1159		if (dev->netdev_ops == &ppp_netdev_ops)
1160			unregister_netdevice_queue(dev, &list);
1161	}
1162
1163	idr_for_each_entry(&pn->units_idr, ppp, id)
1164		/* Skip devices already unregistered by previous loop */
1165		if (!net_eq(dev_net(ppp->dev), net))
1166			unregister_netdevice_queue(ppp->dev, &list);
1167
1168	unregister_netdevice_many(&list);
1169	rtnl_unlock();
1170
1171	mutex_destroy(&pn->all_ppp_mutex);
1172	idr_destroy(&pn->units_idr);
1173	WARN_ON_ONCE(!list_empty(&pn->all_channels));
1174	WARN_ON_ONCE(!list_empty(&pn->new_channels));
1175}
1176
1177static struct pernet_operations ppp_net_ops = {
1178	.init = ppp_init_net,
1179	.exit = ppp_exit_net,
1180	.id   = &ppp_net_id,
1181	.size = sizeof(struct ppp_net),
1182};
1183
1184static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
1185{
1186	struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1187	int ret;
1188
1189	mutex_lock(&pn->all_ppp_mutex);
1190
1191	if (unit < 0) {
1192		ret = unit_get(&pn->units_idr, ppp, 0);
1193		if (ret < 0)
1194			goto err;
1195		if (!ifname_is_set) {
1196			while (1) {
1197				snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
1198				if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name))
1199					break;
1200				unit_put(&pn->units_idr, ret);
1201				ret = unit_get(&pn->units_idr, ppp, ret + 1);
1202				if (ret < 0)
1203					goto err;
1204			}
1205		}
1206	} else {
1207		/* Caller asked for a specific unit number. Fail with -EEXIST
1208		 * if unavailable. For backward compatibility, return -EEXIST
1209		 * too if idr allocation fails; this makes pppd retry without
1210		 * requesting a specific unit number.
1211		 */
1212		if (unit_find(&pn->units_idr, unit)) {
1213			ret = -EEXIST;
1214			goto err;
1215		}
1216		ret = unit_set(&pn->units_idr, ppp, unit);
1217		if (ret < 0) {
1218			/* Rewrite error for backward compatibility */
1219			ret = -EEXIST;
1220			goto err;
1221		}
1222	}
1223	ppp->file.index = ret;
1224
1225	if (!ifname_is_set)
1226		snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
1227
1228	mutex_unlock(&pn->all_ppp_mutex);
1229
1230	ret = register_netdevice(ppp->dev);
1231	if (ret < 0)
1232		goto err_unit;
1233
1234	atomic_inc(&ppp_unit_count);
1235
1236	return 0;
1237
1238err_unit:
1239	mutex_lock(&pn->all_ppp_mutex);
1240	unit_put(&pn->units_idr, ppp->file.index);
1241err:
1242	mutex_unlock(&pn->all_ppp_mutex);
1243
1244	return ret;
1245}
1246
1247static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1248			     const struct ppp_config *conf)
1249{
1250	struct ppp *ppp = netdev_priv(dev);
1251	int indx;
1252	int err;
1253	int cpu;
1254
1255	ppp->dev = dev;
1256	ppp->ppp_net = src_net;
1257	ppp->mru = PPP_MRU;
1258	ppp->owner = conf->file;
1259
1260	init_ppp_file(&ppp->file, INTERFACE);
1261	ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
1262
1263	for (indx = 0; indx < NUM_NP; ++indx)
1264		ppp->npmode[indx] = NPMODE_PASS;
1265	INIT_LIST_HEAD(&ppp->channels);
1266	spin_lock_init(&ppp->rlock);
1267	spin_lock_init(&ppp->wlock);
1268
1269	ppp->xmit_recursion = alloc_percpu(int);
1270	if (!ppp->xmit_recursion) {
1271		err = -ENOMEM;
1272		goto err1;
1273	}
1274	for_each_possible_cpu(cpu)
1275		(*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
1276
1277#ifdef CONFIG_PPP_MULTILINK
1278	ppp->minseq = -1;
1279	skb_queue_head_init(&ppp->mrq);
1280#endif /* CONFIG_PPP_MULTILINK */
1281#ifdef CONFIG_PPP_FILTER
1282	ppp->pass_filter = NULL;
1283	ppp->active_filter = NULL;
1284#endif /* CONFIG_PPP_FILTER */
1285
1286	err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
1287	if (err < 0)
1288		goto err2;
1289
1290	conf->file->private_data = &ppp->file;
1291
1292	return 0;
1293err2:
1294	free_percpu(ppp->xmit_recursion);
1295err1:
1296	return err;
1297}
1298
1299static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
1300	[IFLA_PPP_DEV_FD]	= { .type = NLA_S32 },
1301};
1302
1303static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
1304			   struct netlink_ext_ack *extack)
1305{
1306	if (!data)
1307		return -EINVAL;
1308
1309	if (!data[IFLA_PPP_DEV_FD])
1310		return -EINVAL;
1311	if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
1312		return -EBADF;
1313
1314	return 0;
1315}
1316
1317static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
1318			  struct nlattr *tb[], struct nlattr *data[],
1319			  struct netlink_ext_ack *extack)
1320{
1321	struct ppp_config conf = {
1322		.unit = -1,
1323		.ifname_is_set = true,
1324	};
1325	struct file *file;
1326	int err;
1327
1328	file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
1329	if (!file)
1330		return -EBADF;
1331
1332	/* rtnl_lock is already held here, but ppp_create_interface() locks
1333	 * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
1334	 * possible deadlock due to lock order inversion, at the cost of
1335	 * pushing the problem back to userspace.
1336	 */
1337	if (!mutex_trylock(&ppp_mutex)) {
1338		err = -EBUSY;
1339		goto out;
1340	}
1341
1342	if (file->f_op != &ppp_device_fops || file->private_data) {
1343		err = -EBADF;
1344		goto out_unlock;
1345	}
1346
1347	conf.file = file;
1348
1349	/* Don't use device name generated by the rtnetlink layer when ifname
1350	 * isn't specified. Let ppp_dev_configure() set the device name using
1351	 * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
1352	 * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
1353	 */
1354	if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
1355		conf.ifname_is_set = false;
1356
1357	err = ppp_dev_configure(src_net, dev, &conf);
1358
1359out_unlock:
1360	mutex_unlock(&ppp_mutex);
1361out:
1362	fput(file);
1363
1364	return err;
1365}
1366
1367static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
1368{
1369	unregister_netdevice_queue(dev, head);
1370}
1371
1372static size_t ppp_nl_get_size(const struct net_device *dev)
1373{
1374	return 0;
1375}
1376
1377static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
1378{
1379	return 0;
1380}
1381
1382static struct net *ppp_nl_get_link_net(const struct net_device *dev)
1383{
1384	struct ppp *ppp = netdev_priv(dev);
1385
1386	return READ_ONCE(ppp->ppp_net);
1387}
1388
1389static struct rtnl_link_ops ppp_link_ops __read_mostly = {
1390	.kind		= "ppp",
1391	.maxtype	= IFLA_PPP_MAX,
1392	.policy		= ppp_nl_policy,
1393	.priv_size	= sizeof(struct ppp),
1394	.setup		= ppp_setup,
1395	.validate	= ppp_nl_validate,
1396	.newlink	= ppp_nl_newlink,
1397	.dellink	= ppp_nl_dellink,
1398	.get_size	= ppp_nl_get_size,
1399	.fill_info	= ppp_nl_fill_info,
1400	.get_link_net	= ppp_nl_get_link_net,
1401};
1402
1403#define PPP_MAJOR	108
1404
1405/* Called at boot time if ppp is compiled into the kernel,
1406   or at module load time (from init_module) if compiled as a module. */
1407static int __init ppp_init(void)
1408{
1409	int err;
1410
1411	pr_info("PPP generic driver version " PPP_VERSION "\n");
1412
1413	err = register_pernet_device(&ppp_net_ops);
1414	if (err) {
1415		pr_err("failed to register PPP pernet device (%d)\n", err);
1416		goto out;
1417	}
1418
1419	err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
1420	if (err) {
1421		pr_err("failed to register PPP device (%d)\n", err);
1422		goto out_net;
1423	}
1424
1425	err = class_register(&ppp_class);
1426	if (err)
 
1427		goto out_chrdev;
 
1428
1429	err = rtnl_link_register(&ppp_link_ops);
1430	if (err) {
1431		pr_err("failed to register rtnetlink PPP handler\n");
1432		goto out_class;
1433	}
1434
1435	/* not a big deal if we fail here :-) */
1436	device_create(&ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
1437
1438	return 0;
1439
1440out_class:
1441	class_unregister(&ppp_class);
1442out_chrdev:
1443	unregister_chrdev(PPP_MAJOR, "ppp");
1444out_net:
1445	unregister_pernet_device(&ppp_net_ops);
1446out:
1447	return err;
1448}
1449
1450/*
1451 * Network interface unit routines.
1452 */
1453static netdev_tx_t
1454ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1455{
1456	struct ppp *ppp = netdev_priv(dev);
1457	int npi, proto;
1458	unsigned char *pp;
1459
1460	npi = ethertype_to_npindex(ntohs(skb->protocol));
1461	if (npi < 0)
1462		goto outf;
1463
1464	/* Drop, accept or reject the packet */
1465	switch (ppp->npmode[npi]) {
1466	case NPMODE_PASS:
1467		break;
1468	case NPMODE_QUEUE:
1469		/* it would be nice to have a way to tell the network
1470		   system to queue this one up for later. */
1471		goto outf;
1472	case NPMODE_DROP:
1473	case NPMODE_ERROR:
1474		goto outf;
1475	}
1476
1477	/* Put the 2-byte PPP protocol number on the front,
1478	   making sure there is room for the address and control fields. */
1479	if (skb_cow_head(skb, PPP_HDRLEN))
1480		goto outf;
1481
1482	pp = skb_push(skb, 2);
1483	proto = npindex_to_proto[npi];
1484	put_unaligned_be16(proto, pp);
1485
1486	skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
1487	ppp_xmit_process(ppp, skb);
1488
1489	return NETDEV_TX_OK;
1490
1491 outf:
1492	kfree_skb(skb);
1493	++dev->stats.tx_dropped;
1494	return NETDEV_TX_OK;
1495}
1496
1497static int
1498ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
1499		       void __user *addr, int cmd)
1500{
1501	struct ppp *ppp = netdev_priv(dev);
1502	int err = -EFAULT;
 
1503	struct ppp_stats stats;
1504	struct ppp_comp_stats cstats;
1505	char *vers;
1506
1507	switch (cmd) {
1508	case SIOCGPPPSTATS:
1509		ppp_get_stats(ppp, &stats);
1510		if (copy_to_user(addr, &stats, sizeof(stats)))
1511			break;
1512		err = 0;
1513		break;
1514
1515	case SIOCGPPPCSTATS:
1516		memset(&cstats, 0, sizeof(cstats));
1517		if (ppp->xc_state)
1518			ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1519		if (ppp->rc_state)
1520			ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1521		if (copy_to_user(addr, &cstats, sizeof(cstats)))
1522			break;
1523		err = 0;
1524		break;
1525
1526	case SIOCGPPPVER:
1527		vers = PPP_VERSION;
1528		if (copy_to_user(addr, vers, strlen(vers) + 1))
1529			break;
1530		err = 0;
1531		break;
1532
1533	default:
1534		err = -EINVAL;
1535	}
1536
1537	return err;
1538}
1539
1540static void
1541ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1542{
1543	struct ppp *ppp = netdev_priv(dev);
1544
1545	ppp_recv_lock(ppp);
1546	stats64->rx_packets = ppp->stats64.rx_packets;
1547	stats64->rx_bytes   = ppp->stats64.rx_bytes;
1548	ppp_recv_unlock(ppp);
1549
1550	ppp_xmit_lock(ppp);
1551	stats64->tx_packets = ppp->stats64.tx_packets;
1552	stats64->tx_bytes   = ppp->stats64.tx_bytes;
1553	ppp_xmit_unlock(ppp);
1554
1555	stats64->rx_errors        = dev->stats.rx_errors;
1556	stats64->tx_errors        = dev->stats.tx_errors;
1557	stats64->rx_dropped       = dev->stats.rx_dropped;
1558	stats64->tx_dropped       = dev->stats.tx_dropped;
1559	stats64->rx_length_errors = dev->stats.rx_length_errors;
1560}
1561
1562static int ppp_dev_init(struct net_device *dev)
1563{
1564	struct ppp *ppp;
1565
1566	netdev_lockdep_set_classes(dev);
1567
1568	ppp = netdev_priv(dev);
1569	/* Let the netdevice take a reference on the ppp file. This ensures
1570	 * that ppp_destroy_interface() won't run before the device gets
1571	 * unregistered.
1572	 */
1573	refcount_inc(&ppp->file.refcnt);
1574
1575	return 0;
1576}
1577
1578static void ppp_dev_uninit(struct net_device *dev)
1579{
1580	struct ppp *ppp = netdev_priv(dev);
1581	struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1582
1583	ppp_lock(ppp);
1584	ppp->closing = 1;
1585	ppp_unlock(ppp);
1586
1587	mutex_lock(&pn->all_ppp_mutex);
1588	unit_put(&pn->units_idr, ppp->file.index);
1589	mutex_unlock(&pn->all_ppp_mutex);
1590
1591	ppp->owner = NULL;
1592
1593	ppp->file.dead = 1;
1594	wake_up_interruptible(&ppp->file.rwait);
1595}
1596
1597static void ppp_dev_priv_destructor(struct net_device *dev)
1598{
1599	struct ppp *ppp;
1600
1601	ppp = netdev_priv(dev);
1602	if (refcount_dec_and_test(&ppp->file.refcnt))
1603		ppp_destroy_interface(ppp);
1604}
1605
1606static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
1607				 struct net_device_path *path)
1608{
1609	struct ppp *ppp = netdev_priv(ctx->dev);
1610	struct ppp_channel *chan;
1611	struct channel *pch;
1612
1613	if (ppp->flags & SC_MULTILINK)
1614		return -EOPNOTSUPP;
1615
1616	if (list_empty(&ppp->channels))
1617		return -ENODEV;
1618
1619	pch = list_first_entry(&ppp->channels, struct channel, clist);
1620	chan = pch->chan;
1621	if (!chan->ops->fill_forward_path)
1622		return -EOPNOTSUPP;
1623
1624	return chan->ops->fill_forward_path(ctx, path, chan);
1625}
1626
1627static const struct net_device_ops ppp_netdev_ops = {
1628	.ndo_init	 = ppp_dev_init,
1629	.ndo_uninit      = ppp_dev_uninit,
1630	.ndo_start_xmit  = ppp_start_xmit,
1631	.ndo_siocdevprivate = ppp_net_siocdevprivate,
1632	.ndo_get_stats64 = ppp_get_stats64,
1633	.ndo_fill_forward_path = ppp_fill_forward_path,
1634};
1635
1636static const struct device_type ppp_type = {
1637	.name = "ppp",
1638};
1639
1640static void ppp_setup(struct net_device *dev)
1641{
1642	dev->netdev_ops = &ppp_netdev_ops;
1643	SET_NETDEV_DEVTYPE(dev, &ppp_type);
1644
1645	dev->lltx = true;
1646
1647	dev->hard_header_len = PPP_HDRLEN;
1648	dev->mtu = PPP_MRU;
1649	dev->addr_len = 0;
1650	dev->tx_queue_len = 3;
1651	dev->type = ARPHRD_PPP;
1652	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1653	dev->priv_destructor = ppp_dev_priv_destructor;
1654	netif_keep_dst(dev);
1655}
1656
1657/*
1658 * Transmit-side routines.
1659 */
1660
1661/* Called to do any work queued up on the transmit side that can now be done */
1662static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1663{
1664	ppp_xmit_lock(ppp);
1665	if (!ppp->closing) {
1666		ppp_push(ppp);
1667
1668		if (skb)
1669			skb_queue_tail(&ppp->file.xq, skb);
1670		while (!ppp->xmit_pending &&
1671		       (skb = skb_dequeue(&ppp->file.xq)))
1672			ppp_send_frame(ppp, skb);
1673		/* If there's no work left to do, tell the core net
1674		   code that we can accept some more. */
1675		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1676			netif_wake_queue(ppp->dev);
1677		else
1678			netif_stop_queue(ppp->dev);
1679	} else {
1680		kfree_skb(skb);
1681	}
1682	ppp_xmit_unlock(ppp);
1683}
1684
1685static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1686{
1687	local_bh_disable();
1688
1689	if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
1690		goto err;
1691
1692	(*this_cpu_ptr(ppp->xmit_recursion))++;
1693	__ppp_xmit_process(ppp, skb);
1694	(*this_cpu_ptr(ppp->xmit_recursion))--;
1695
1696	local_bh_enable();
1697
1698	return;
1699
1700err:
1701	local_bh_enable();
1702
1703	kfree_skb(skb);
1704
1705	if (net_ratelimit())
1706		netdev_err(ppp->dev, "recursion detected\n");
1707}
1708
1709static inline struct sk_buff *
1710pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1711{
1712	struct sk_buff *new_skb;
1713	int len;
1714	int new_skb_size = ppp->dev->mtu +
1715		ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1716	int compressor_skb_size = ppp->dev->mtu +
1717		ppp->xcomp->comp_extra + PPP_HDRLEN;
1718	new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1719	if (!new_skb) {
1720		if (net_ratelimit())
1721			netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1722		return NULL;
1723	}
1724	if (ppp->dev->hard_header_len > PPP_HDRLEN)
1725		skb_reserve(new_skb,
1726			    ppp->dev->hard_header_len - PPP_HDRLEN);
1727
1728	/* compressor still expects A/C bytes in hdr */
1729	len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1730				   new_skb->data, skb->len + 2,
1731				   compressor_skb_size);
1732	if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1733		consume_skb(skb);
1734		skb = new_skb;
1735		skb_put(skb, len);
1736		skb_pull(skb, 2);	/* pull off A/C bytes */
1737	} else if (len == 0) {
1738		/* didn't compress, or CCP not up yet */
1739		consume_skb(new_skb);
1740		new_skb = skb;
1741	} else {
1742		/*
1743		 * (len < 0)
1744		 * MPPE requires that we do not send unencrypted
1745		 * frames.  The compressor will return -1 if we
1746		 * should drop the frame.  We cannot simply test
1747		 * the compress_proto because MPPE and MPPC share
1748		 * the same number.
1749		 */
1750		if (net_ratelimit())
1751			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1752		kfree_skb(skb);
1753		consume_skb(new_skb);
1754		new_skb = NULL;
1755	}
1756	return new_skb;
1757}
1758
1759/*
1760 * Compress and send a frame.
1761 * The caller should have locked the xmit path,
1762 * and xmit_pending should be 0.
1763 */
1764static void
1765ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1766{
1767	int proto = PPP_PROTO(skb);
1768	struct sk_buff *new_skb;
1769	int len;
1770	unsigned char *cp;
1771
1772	skb->dev = ppp->dev;
1773
1774	if (proto < 0x8000) {
1775#ifdef CONFIG_PPP_FILTER
1776		/* check if the packet passes the pass and active filters.
1777		 * See comment for PPP_FILTER_OUTBOUND_TAG above.
1778		 */
1779		*(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG);
1780		if (ppp->pass_filter &&
1781		    bpf_prog_run(ppp->pass_filter, skb) == 0) {
1782			if (ppp->debug & 1)
1783				netdev_printk(KERN_DEBUG, ppp->dev,
1784					      "PPP: outbound frame "
1785					      "not passed\n");
1786			kfree_skb(skb);
1787			return;
1788		}
1789		/* if this packet passes the active filter, record the time */
1790		if (!(ppp->active_filter &&
1791		      bpf_prog_run(ppp->active_filter, skb) == 0))
1792			ppp->last_xmit = jiffies;
1793		skb_pull(skb, 2);
1794#else
1795		/* for data packets, record the time */
1796		ppp->last_xmit = jiffies;
1797#endif /* CONFIG_PPP_FILTER */
1798	}
1799
1800	++ppp->stats64.tx_packets;
1801	ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN;
1802
1803	switch (proto) {
1804	case PPP_IP:
1805		if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1806			break;
1807		/* try to do VJ TCP header compression */
1808		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1809				    GFP_ATOMIC);
1810		if (!new_skb) {
1811			netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1812			goto drop;
1813		}
1814		skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1815		cp = skb->data + 2;
1816		len = slhc_compress(ppp->vj, cp, skb->len - 2,
1817				    new_skb->data + 2, &cp,
1818				    !(ppp->flags & SC_NO_TCP_CCID));
1819		if (cp == skb->data + 2) {
1820			/* didn't compress */
1821			consume_skb(new_skb);
1822		} else {
1823			if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1824				proto = PPP_VJC_COMP;
1825				cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1826			} else {
1827				proto = PPP_VJC_UNCOMP;
1828				cp[0] = skb->data[2];
1829			}
1830			consume_skb(skb);
1831			skb = new_skb;
1832			cp = skb_put(skb, len + 2);
1833			cp[0] = 0;
1834			cp[1] = proto;
1835		}
1836		break;
1837
1838	case PPP_CCP:
1839		/* peek at outbound CCP frames */
1840		ppp_ccp_peek(ppp, skb, 0);
1841		break;
1842	}
1843
1844	/* try to do packet compression */
1845	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1846	    proto != PPP_LCP && proto != PPP_CCP) {
1847		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1848			if (net_ratelimit())
1849				netdev_err(ppp->dev,
1850					   "ppp: compression required but "
1851					   "down - pkt dropped.\n");
1852			goto drop;
1853		}
1854		skb = pad_compress_skb(ppp, skb);
1855		if (!skb)
1856			goto drop;
1857	}
1858
1859	/*
1860	 * If we are waiting for traffic (demand dialling),
1861	 * queue it up for pppd to receive.
1862	 */
1863	if (ppp->flags & SC_LOOP_TRAFFIC) {
1864		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1865			goto drop;
1866		skb_queue_tail(&ppp->file.rq, skb);
1867		wake_up_interruptible(&ppp->file.rwait);
1868		return;
1869	}
1870
1871	ppp->xmit_pending = skb;
1872	ppp_push(ppp);
1873	return;
1874
1875 drop:
1876	kfree_skb(skb);
1877	++ppp->dev->stats.tx_errors;
1878}
1879
1880/*
1881 * Try to send the frame in xmit_pending.
1882 * The caller should have the xmit path locked.
1883 */
1884static void
1885ppp_push(struct ppp *ppp)
1886{
1887	struct list_head *list;
1888	struct channel *pch;
1889	struct sk_buff *skb = ppp->xmit_pending;
1890
1891	if (!skb)
1892		return;
1893
1894	list = &ppp->channels;
1895	if (list_empty(list)) {
1896		/* nowhere to send the packet, just drop it */
1897		ppp->xmit_pending = NULL;
1898		kfree_skb(skb);
1899		return;
1900	}
1901
1902	if ((ppp->flags & SC_MULTILINK) == 0) {
1903		/* not doing multilink: send it down the first channel */
1904		list = list->next;
1905		pch = list_entry(list, struct channel, clist);
1906
1907		spin_lock(&pch->downl);
1908		if (pch->chan) {
1909			if (pch->chan->ops->start_xmit(pch->chan, skb))
1910				ppp->xmit_pending = NULL;
1911		} else {
1912			/* channel got unregistered */
1913			kfree_skb(skb);
1914			ppp->xmit_pending = NULL;
1915		}
1916		spin_unlock(&pch->downl);
1917		return;
1918	}
1919
1920#ifdef CONFIG_PPP_MULTILINK
1921	/* Multilink: fragment the packet over as many links
1922	   as can take the packet at the moment. */
1923	if (!ppp_mp_explode(ppp, skb))
1924		return;
1925#endif /* CONFIG_PPP_MULTILINK */
1926
1927	ppp->xmit_pending = NULL;
1928	kfree_skb(skb);
1929}
1930
1931#ifdef CONFIG_PPP_MULTILINK
1932static bool mp_protocol_compress __read_mostly = true;
1933module_param(mp_protocol_compress, bool, 0644);
1934MODULE_PARM_DESC(mp_protocol_compress,
1935		 "compress protocol id in multilink fragments");
1936
1937/*
1938 * Divide a packet to be transmitted into fragments and
1939 * send them out the individual links.
1940 */
1941static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1942{
1943	int len, totlen;
1944	int i, bits, hdrlen, mtu;
1945	int flen;
1946	int navail, nfree, nzero;
1947	int nbigger;
1948	int totspeed;
1949	int totfree;
1950	unsigned char *p, *q;
1951	struct list_head *list;
1952	struct channel *pch;
1953	struct sk_buff *frag;
1954	struct ppp_channel *chan;
1955
1956	totspeed = 0; /*total bitrate of the bundle*/
1957	nfree = 0; /* # channels which have no packet already queued */
1958	navail = 0; /* total # of usable channels (not deregistered) */
1959	nzero = 0; /* number of channels with zero speed associated*/
1960	totfree = 0; /*total # of channels available and
1961				  *having no queued packets before
1962				  *starting the fragmentation*/
1963
1964	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1965	i = 0;
1966	list_for_each_entry(pch, &ppp->channels, clist) {
1967		if (pch->chan) {
1968			pch->avail = 1;
1969			navail++;
1970			pch->speed = pch->chan->speed;
1971		} else {
1972			pch->avail = 0;
1973		}
1974		if (pch->avail) {
1975			if (skb_queue_empty(&pch->file.xq) ||
1976				!pch->had_frag) {
1977					if (pch->speed == 0)
1978						nzero++;
1979					else
1980						totspeed += pch->speed;
1981
1982					pch->avail = 2;
1983					++nfree;
1984					++totfree;
1985				}
1986			if (!pch->had_frag && i < ppp->nxchan)
1987				ppp->nxchan = i;
1988		}
1989		++i;
1990	}
1991	/*
1992	 * Don't start sending this packet unless at least half of
1993	 * the channels are free.  This gives much better TCP
1994	 * performance if we have a lot of channels.
1995	 */
1996	if (nfree == 0 || nfree < navail / 2)
1997		return 0; /* can't take now, leave it in xmit_pending */
1998
1999	/* Do protocol field compression */
2000	p = skb->data;
2001	len = skb->len;
2002	if (*p == 0 && mp_protocol_compress) {
2003		++p;
2004		--len;
2005	}
2006
2007	totlen = len;
2008	nbigger = len % nfree;
2009
2010	/* skip to the channel after the one we last used
2011	   and start at that one */
2012	list = &ppp->channels;
2013	for (i = 0; i < ppp->nxchan; ++i) {
2014		list = list->next;
2015		if (list == &ppp->channels) {
2016			i = 0;
2017			break;
2018		}
2019	}
2020
2021	/* create a fragment for each channel */
2022	bits = B;
2023	while (len > 0) {
2024		list = list->next;
2025		if (list == &ppp->channels) {
2026			i = 0;
2027			continue;
2028		}
2029		pch = list_entry(list, struct channel, clist);
2030		++i;
2031		if (!pch->avail)
2032			continue;
2033
2034		/*
2035		 * Skip this channel if it has a fragment pending already and
2036		 * we haven't given a fragment to all of the free channels.
2037		 */
2038		if (pch->avail == 1) {
2039			if (nfree > 0)
2040				continue;
2041		} else {
2042			pch->avail = 1;
2043		}
2044
2045		/* check the channel's mtu and whether it is still attached. */
2046		spin_lock(&pch->downl);
2047		if (pch->chan == NULL) {
2048			/* can't use this channel, it's being deregistered */
2049			if (pch->speed == 0)
2050				nzero--;
2051			else
2052				totspeed -= pch->speed;
2053
2054			spin_unlock(&pch->downl);
2055			pch->avail = 0;
2056			totlen = len;
2057			totfree--;
2058			nfree--;
2059			if (--navail == 0)
2060				break;
2061			continue;
2062		}
2063
2064		/*
2065		*if the channel speed is not set divide
2066		*the packet evenly among the free channels;
2067		*otherwise divide it according to the speed
2068		*of the channel we are going to transmit on
2069		*/
2070		flen = len;
2071		if (nfree > 0) {
2072			if (pch->speed == 0) {
2073				flen = len/nfree;
2074				if (nbigger > 0) {
2075					flen++;
2076					nbigger--;
2077				}
2078			} else {
2079				flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
2080					((totspeed*totfree)/pch->speed)) - hdrlen;
2081				if (nbigger > 0) {
2082					flen += ((totfree - nzero)*pch->speed)/totspeed;
2083					nbigger -= ((totfree - nzero)*pch->speed)/
2084							totspeed;
2085				}
2086			}
2087			nfree--;
2088		}
2089
2090		/*
2091		 *check if we are on the last channel or
2092		 *we exceded the length of the data to
2093		 *fragment
2094		 */
2095		if ((nfree <= 0) || (flen > len))
2096			flen = len;
2097		/*
2098		 *it is not worth to tx on slow channels:
2099		 *in that case from the resulting flen according to the
2100		 *above formula will be equal or less than zero.
2101		 *Skip the channel in this case
2102		 */
2103		if (flen <= 0) {
2104			pch->avail = 2;
2105			spin_unlock(&pch->downl);
2106			continue;
2107		}
2108
2109		/*
2110		 * hdrlen includes the 2-byte PPP protocol field, but the
2111		 * MTU counts only the payload excluding the protocol field.
2112		 * (RFC1661 Section 2)
2113		 */
2114		mtu = pch->chan->mtu - (hdrlen - 2);
2115		if (mtu < 4)
2116			mtu = 4;
2117		if (flen > mtu)
2118			flen = mtu;
2119		if (flen == len)
2120			bits |= E;
2121		frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
2122		if (!frag)
2123			goto noskb;
2124		q = skb_put(frag, flen + hdrlen);
2125
2126		/* make the MP header */
2127		put_unaligned_be16(PPP_MP, q);
2128		if (ppp->flags & SC_MP_XSHORTSEQ) {
2129			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
2130			q[3] = ppp->nxseq;
2131		} else {
2132			q[2] = bits;
2133			q[3] = ppp->nxseq >> 16;
2134			q[4] = ppp->nxseq >> 8;
2135			q[5] = ppp->nxseq;
2136		}
2137
2138		memcpy(q + hdrlen, p, flen);
2139
2140		/* try to send it down the channel */
2141		chan = pch->chan;
2142		if (!skb_queue_empty(&pch->file.xq) ||
2143			!chan->ops->start_xmit(chan, frag))
2144			skb_queue_tail(&pch->file.xq, frag);
2145		pch->had_frag = 1;
2146		p += flen;
2147		len -= flen;
2148		++ppp->nxseq;
2149		bits = 0;
2150		spin_unlock(&pch->downl);
2151	}
2152	ppp->nxchan = i;
2153
2154	return 1;
2155
2156 noskb:
2157	spin_unlock(&pch->downl);
2158	if (ppp->debug & 1)
2159		netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
2160	++ppp->dev->stats.tx_errors;
2161	++ppp->nxseq;
2162	return 1;	/* abandon the frame */
2163}
2164#endif /* CONFIG_PPP_MULTILINK */
2165
2166/* Try to send data out on a channel */
2167static void __ppp_channel_push(struct channel *pch)
2168{
2169	struct sk_buff *skb;
2170	struct ppp *ppp;
2171
2172	spin_lock(&pch->downl);
2173	if (pch->chan) {
2174		while (!skb_queue_empty(&pch->file.xq)) {
2175			skb = skb_dequeue(&pch->file.xq);
2176			if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
2177				/* put the packet back and try again later */
2178				skb_queue_head(&pch->file.xq, skb);
2179				break;
2180			}
2181		}
2182	} else {
2183		/* channel got deregistered */
2184		skb_queue_purge(&pch->file.xq);
2185	}
2186	spin_unlock(&pch->downl);
2187	/* see if there is anything from the attached unit to be sent */
2188	if (skb_queue_empty(&pch->file.xq)) {
2189		ppp = pch->ppp;
2190		if (ppp)
2191			__ppp_xmit_process(ppp, NULL);
2192	}
2193}
2194
2195static void ppp_channel_push(struct channel *pch)
2196{
2197	read_lock_bh(&pch->upl);
2198	if (pch->ppp) {
2199		(*this_cpu_ptr(pch->ppp->xmit_recursion))++;
2200		__ppp_channel_push(pch);
2201		(*this_cpu_ptr(pch->ppp->xmit_recursion))--;
2202	} else {
2203		__ppp_channel_push(pch);
2204	}
2205	read_unlock_bh(&pch->upl);
2206}
2207
2208/*
2209 * Receive-side routines.
2210 */
2211
2212struct ppp_mp_skb_parm {
2213	u32		sequence;
2214	u8		BEbits;
2215};
2216#define PPP_MP_CB(skb)	((struct ppp_mp_skb_parm *)((skb)->cb))
2217
2218static inline void
2219ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2220{
2221	ppp_recv_lock(ppp);
2222	if (!ppp->closing)
2223		ppp_receive_frame(ppp, skb, pch);
2224	else
2225		kfree_skb(skb);
2226	ppp_recv_unlock(ppp);
2227}
2228
2229/**
2230 * __ppp_decompress_proto - Decompress protocol field, slim version.
2231 * @skb: Socket buffer where protocol field should be decompressed. It must have
2232 *	 at least 1 byte of head room and 1 byte of linear data. First byte of
2233 *	 data must be a protocol field byte.
2234 *
2235 * Decompress protocol field in PPP header if it's compressed, e.g. when
2236 * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
2237 * length are done in this function.
2238 */
2239static void __ppp_decompress_proto(struct sk_buff *skb)
2240{
2241	if (skb->data[0] & 0x01)
2242		*(u8 *)skb_push(skb, 1) = 0x00;
2243}
2244
2245/**
2246 * ppp_decompress_proto - Check skb data room and decompress protocol field.
2247 * @skb: Socket buffer where protocol field should be decompressed. First byte
2248 *	 of data must be a protocol field byte.
2249 *
2250 * Decompress protocol field in PPP header if it's compressed, e.g. when
2251 * Protocol-Field-Compression (PFC) was negotiated. This function also makes
2252 * sure that skb data room is sufficient for Protocol field, before and after
2253 * decompression.
2254 *
2255 * Return: true - decompressed successfully, false - not enough room in skb.
2256 */
2257static bool ppp_decompress_proto(struct sk_buff *skb)
2258{
2259	/* At least one byte should be present (if protocol is compressed) */
2260	if (!pskb_may_pull(skb, 1))
2261		return false;
2262
2263	__ppp_decompress_proto(skb);
2264
2265	/* Protocol field should occupy 2 bytes when not compressed */
2266	return pskb_may_pull(skb, 2);
2267}
2268
2269/* Attempt to handle a frame via. a bridged channel, if one exists.
2270 * If the channel is bridged, the frame is consumed by the bridge.
2271 * If not, the caller must handle the frame by normal recv mechanisms.
2272 * Returns true if the frame is consumed, false otherwise.
2273 */
2274static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
2275{
2276	struct channel *pchb;
2277
2278	rcu_read_lock();
2279	pchb = rcu_dereference(pch->bridge);
2280	if (!pchb)
2281		goto out_rcu;
2282
2283	spin_lock_bh(&pchb->downl);
2284	if (!pchb->chan) {
2285		/* channel got unregistered */
2286		kfree_skb(skb);
2287		goto outl;
2288	}
2289
2290	skb_scrub_packet(skb, !net_eq(pch->chan_net, pchb->chan_net));
2291	if (!pchb->chan->ops->start_xmit(pchb->chan, skb))
2292		kfree_skb(skb);
2293
2294outl:
2295	spin_unlock_bh(&pchb->downl);
2296out_rcu:
2297	rcu_read_unlock();
2298
2299	/* If pchb is set then we've consumed the packet */
2300	return !!pchb;
2301}
2302
2303void
2304ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
2305{
2306	struct channel *pch = chan->ppp;
2307	int proto;
2308
2309	if (!pch) {
2310		kfree_skb(skb);
2311		return;
2312	}
2313
2314	/* If the channel is bridged, transmit via. bridge */
2315	if (ppp_channel_bridge_input(pch, skb))
2316		return;
2317
2318	read_lock_bh(&pch->upl);
2319	if (!ppp_decompress_proto(skb)) {
2320		kfree_skb(skb);
2321		if (pch->ppp) {
2322			++pch->ppp->dev->stats.rx_length_errors;
2323			ppp_receive_error(pch->ppp);
2324		}
2325		goto done;
2326	}
2327
2328	proto = PPP_PROTO(skb);
2329	if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
2330		/* put it on the channel queue */
2331		skb_queue_tail(&pch->file.rq, skb);
2332		/* drop old frames if queue too long */
2333		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
2334		       (skb = skb_dequeue(&pch->file.rq)))
2335			kfree_skb(skb);
2336		wake_up_interruptible(&pch->file.rwait);
2337	} else {
2338		ppp_do_recv(pch->ppp, skb, pch);
2339	}
2340
2341done:
2342	read_unlock_bh(&pch->upl);
2343}
2344
2345/* Put a 0-length skb in the receive queue as an error indication */
2346void
2347ppp_input_error(struct ppp_channel *chan, int code)
2348{
2349	struct channel *pch = chan->ppp;
2350	struct sk_buff *skb;
2351
2352	if (!pch)
2353		return;
2354
2355	read_lock_bh(&pch->upl);
2356	if (pch->ppp) {
2357		skb = alloc_skb(0, GFP_ATOMIC);
2358		if (skb) {
2359			skb->len = 0;		/* probably unnecessary */
2360			skb->cb[0] = code;
2361			ppp_do_recv(pch->ppp, skb, pch);
2362		}
2363	}
2364	read_unlock_bh(&pch->upl);
2365}
2366
2367/*
2368 * We come in here to process a received frame.
2369 * The receive side of the ppp unit is locked.
2370 */
2371static void
2372ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2373{
2374	/* note: a 0-length skb is used as an error indication */
2375	if (skb->len > 0) {
2376		skb_checksum_complete_unset(skb);
2377#ifdef CONFIG_PPP_MULTILINK
2378		/* XXX do channel-level decompression here */
2379		if (PPP_PROTO(skb) == PPP_MP)
2380			ppp_receive_mp_frame(ppp, skb, pch);
2381		else
2382#endif /* CONFIG_PPP_MULTILINK */
2383			ppp_receive_nonmp_frame(ppp, skb);
2384	} else {
2385		kfree_skb(skb);
2386		ppp_receive_error(ppp);
2387	}
2388}
2389
2390static void
2391ppp_receive_error(struct ppp *ppp)
2392{
2393	++ppp->dev->stats.rx_errors;
2394	if (ppp->vj)
2395		slhc_toss(ppp->vj);
2396}
2397
2398static void
2399ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
2400{
2401	struct sk_buff *ns;
2402	int proto, len, npi;
2403
2404	/*
2405	 * Decompress the frame, if compressed.
2406	 * Note that some decompressors need to see uncompressed frames
2407	 * that come in as well as compressed frames.
2408	 */
2409	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
2410	    (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
2411		skb = ppp_decompress_frame(ppp, skb);
2412
2413	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
2414		goto err;
2415
2416	/* At this point the "Protocol" field MUST be decompressed, either in
2417	 * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
2418	 */
2419	proto = PPP_PROTO(skb);
2420	switch (proto) {
2421	case PPP_VJC_COMP:
2422		/* decompress VJ compressed packets */
2423		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2424			goto err;
2425
2426		if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
2427			/* copy to a new sk_buff with more tailroom */
2428			ns = dev_alloc_skb(skb->len + 128);
2429			if (!ns) {
2430				netdev_err(ppp->dev, "PPP: no memory "
2431					   "(VJ decomp)\n");
2432				goto err;
2433			}
2434			skb_reserve(ns, 2);
2435			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
2436			consume_skb(skb);
2437			skb = ns;
2438		}
2439		else
2440			skb->ip_summed = CHECKSUM_NONE;
2441
2442		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
2443		if (len <= 0) {
2444			netdev_printk(KERN_DEBUG, ppp->dev,
2445				      "PPP: VJ decompression error\n");
2446			goto err;
2447		}
2448		len += 2;
2449		if (len > skb->len)
2450			skb_put(skb, len - skb->len);
2451		else if (len < skb->len)
2452			skb_trim(skb, len);
2453		proto = PPP_IP;
2454		break;
2455
2456	case PPP_VJC_UNCOMP:
2457		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2458			goto err;
2459
2460		/* Until we fix the decompressor need to make sure
2461		 * data portion is linear.
2462		 */
2463		if (!pskb_may_pull(skb, skb->len))
2464			goto err;
2465
2466		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
2467			netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
2468			goto err;
2469		}
2470		proto = PPP_IP;
2471		break;
2472
2473	case PPP_CCP:
2474		ppp_ccp_peek(ppp, skb, 1);
2475		break;
2476	}
2477
2478	++ppp->stats64.rx_packets;
2479	ppp->stats64.rx_bytes += skb->len - 2;
2480
2481	npi = proto_to_npindex(proto);
2482	if (npi < 0) {
2483		/* control or unknown frame - pass it to pppd */
2484		skb_queue_tail(&ppp->file.rq, skb);
2485		/* limit queue length by dropping old frames */
2486		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
2487		       (skb = skb_dequeue(&ppp->file.rq)))
2488			kfree_skb(skb);
2489		/* wake up any process polling or blocking on read */
2490		wake_up_interruptible(&ppp->file.rwait);
2491
2492	} else {
2493		/* network protocol frame - give it to the kernel */
2494
2495#ifdef CONFIG_PPP_FILTER
 
 
 
2496		if (ppp->pass_filter || ppp->active_filter) {
2497			if (skb_unclone(skb, GFP_ATOMIC))
2498				goto err;
2499			/* Check if the packet passes the pass and active filters.
2500			 * See comment for PPP_FILTER_INBOUND_TAG above.
2501			 */
2502			*(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG);
2503			if (ppp->pass_filter &&
2504			    bpf_prog_run(ppp->pass_filter, skb) == 0) {
2505				if (ppp->debug & 1)
2506					netdev_printk(KERN_DEBUG, ppp->dev,
2507						      "PPP: inbound frame "
2508						      "not passed\n");
2509				kfree_skb(skb);
2510				return;
2511			}
2512			if (!(ppp->active_filter &&
2513			      bpf_prog_run(ppp->active_filter, skb) == 0))
2514				ppp->last_recv = jiffies;
2515			__skb_pull(skb, 2);
2516		} else
2517#endif /* CONFIG_PPP_FILTER */
2518			ppp->last_recv = jiffies;
2519
2520		if ((ppp->dev->flags & IFF_UP) == 0 ||
2521		    ppp->npmode[npi] != NPMODE_PASS) {
2522			kfree_skb(skb);
2523		} else {
2524			/* chop off protocol */
2525			skb_pull_rcsum(skb, 2);
2526			skb->dev = ppp->dev;
2527			skb->protocol = htons(npindex_to_ethertype[npi]);
2528			skb_reset_mac_header(skb);
2529			skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
2530						      dev_net(ppp->dev)));
2531			netif_rx(skb);
2532		}
2533	}
2534	return;
2535
2536 err:
2537	kfree_skb(skb);
2538	ppp_receive_error(ppp);
2539}
2540
2541static struct sk_buff *
2542ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
2543{
2544	int proto = PPP_PROTO(skb);
2545	struct sk_buff *ns;
2546	int len;
2547
2548	/* Until we fix all the decompressor's need to make sure
2549	 * data portion is linear.
2550	 */
2551	if (!pskb_may_pull(skb, skb->len))
2552		goto err;
2553
2554	if (proto == PPP_COMP) {
2555		int obuff_size;
2556
2557		switch(ppp->rcomp->compress_proto) {
2558		case CI_MPPE:
2559			obuff_size = ppp->mru + PPP_HDRLEN + 1;
2560			break;
2561		default:
2562			obuff_size = ppp->mru + PPP_HDRLEN;
2563			break;
2564		}
2565
2566		ns = dev_alloc_skb(obuff_size);
2567		if (!ns) {
2568			netdev_err(ppp->dev, "ppp_decompress_frame: "
2569				   "no memory\n");
2570			goto err;
2571		}
2572		/* the decompressor still expects the A/C bytes in the hdr */
2573		len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
2574				skb->len + 2, ns->data, obuff_size);
2575		if (len < 0) {
2576			/* Pass the compressed frame to pppd as an
2577			   error indication. */
2578			if (len == DECOMP_FATALERROR)
2579				ppp->rstate |= SC_DC_FERROR;
2580			kfree_skb(ns);
2581			goto err;
2582		}
2583
2584		consume_skb(skb);
2585		skb = ns;
2586		skb_put(skb, len);
2587		skb_pull(skb, 2);	/* pull off the A/C bytes */
2588
2589		/* Don't call __ppp_decompress_proto() here, but instead rely on
2590		 * corresponding algo (mppe/bsd/deflate) to decompress it.
2591		 */
2592	} else {
2593		/* Uncompressed frame - pass to decompressor so it
2594		   can update its dictionary if necessary. */
2595		if (ppp->rcomp->incomp)
2596			ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
2597					   skb->len + 2);
2598	}
2599
2600	return skb;
2601
2602 err:
2603	ppp->rstate |= SC_DC_ERROR;
2604	ppp_receive_error(ppp);
2605	return skb;
2606}
2607
2608#ifdef CONFIG_PPP_MULTILINK
2609/*
2610 * Receive a multilink frame.
2611 * We put it on the reconstruction queue and then pull off
2612 * as many completed frames as we can.
2613 */
2614static void
2615ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2616{
2617	u32 mask, seq;
2618	struct channel *ch;
2619	int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
2620
2621	if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
2622		goto err;		/* no good, throw it away */
2623
2624	/* Decode sequence number and begin/end bits */
2625	if (ppp->flags & SC_MP_SHORTSEQ) {
2626		seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
2627		mask = 0xfff;
2628	} else {
2629		seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
2630		mask = 0xffffff;
2631	}
2632	PPP_MP_CB(skb)->BEbits = skb->data[2];
2633	skb_pull(skb, mphdrlen);	/* pull off PPP and MP headers */
2634
2635	/*
2636	 * Do protocol ID decompression on the first fragment of each packet.
2637	 * We have to do that here, because ppp_receive_nonmp_frame() expects
2638	 * decompressed protocol field.
2639	 */
2640	if (PPP_MP_CB(skb)->BEbits & B)
2641		__ppp_decompress_proto(skb);
2642
2643	/*
2644	 * Expand sequence number to 32 bits, making it as close
2645	 * as possible to ppp->minseq.
2646	 */
2647	seq |= ppp->minseq & ~mask;
2648	if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
2649		seq += mask + 1;
2650	else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
2651		seq -= mask + 1;	/* should never happen */
2652	PPP_MP_CB(skb)->sequence = seq;
2653	pch->lastseq = seq;
2654
2655	/*
2656	 * If this packet comes before the next one we were expecting,
2657	 * drop it.
2658	 */
2659	if (seq_before(seq, ppp->nextseq)) {
2660		kfree_skb(skb);
2661		++ppp->dev->stats.rx_dropped;
2662		ppp_receive_error(ppp);
2663		return;
2664	}
2665
2666	/*
2667	 * Reevaluate minseq, the minimum over all channels of the
2668	 * last sequence number received on each channel.  Because of
2669	 * the increasing sequence number rule, we know that any fragment
2670	 * before `minseq' which hasn't arrived is never going to arrive.
2671	 * The list of channels can't change because we have the receive
2672	 * side of the ppp unit locked.
2673	 */
2674	list_for_each_entry(ch, &ppp->channels, clist) {
2675		if (seq_before(ch->lastseq, seq))
2676			seq = ch->lastseq;
2677	}
2678	if (seq_before(ppp->minseq, seq))
2679		ppp->minseq = seq;
2680
2681	/* Put the fragment on the reconstruction queue */
2682	ppp_mp_insert(ppp, skb);
2683
2684	/* If the queue is getting long, don't wait any longer for packets
2685	   before the start of the queue. */
2686	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
2687		struct sk_buff *mskb = skb_peek(&ppp->mrq);
2688		if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
2689			ppp->minseq = PPP_MP_CB(mskb)->sequence;
2690	}
2691
2692	/* Pull completed packets off the queue and receive them. */
2693	while ((skb = ppp_mp_reconstruct(ppp))) {
2694		if (pskb_may_pull(skb, 2))
2695			ppp_receive_nonmp_frame(ppp, skb);
2696		else {
2697			++ppp->dev->stats.rx_length_errors;
2698			kfree_skb(skb);
2699			ppp_receive_error(ppp);
2700		}
2701	}
2702
2703	return;
2704
2705 err:
2706	kfree_skb(skb);
2707	ppp_receive_error(ppp);
2708}
2709
2710/*
2711 * Insert a fragment on the MP reconstruction queue.
2712 * The queue is ordered by increasing sequence number.
2713 */
2714static void
2715ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
2716{
2717	struct sk_buff *p;
2718	struct sk_buff_head *list = &ppp->mrq;
2719	u32 seq = PPP_MP_CB(skb)->sequence;
2720
2721	/* N.B. we don't need to lock the list lock because we have the
2722	   ppp unit receive-side lock. */
2723	skb_queue_walk(list, p) {
2724		if (seq_before(seq, PPP_MP_CB(p)->sequence))
2725			break;
2726	}
2727	__skb_queue_before(list, p, skb);
2728}
2729
2730/*
2731 * Reconstruct a packet from the MP fragment queue.
2732 * We go through increasing sequence numbers until we find a
2733 * complete packet, or we get to the sequence number for a fragment
2734 * which hasn't arrived but might still do so.
2735 */
2736static struct sk_buff *
2737ppp_mp_reconstruct(struct ppp *ppp)
2738{
2739	u32 seq = ppp->nextseq;
2740	u32 minseq = ppp->minseq;
2741	struct sk_buff_head *list = &ppp->mrq;
2742	struct sk_buff *p, *tmp;
2743	struct sk_buff *head, *tail;
2744	struct sk_buff *skb = NULL;
2745	int lost = 0, len = 0;
2746
2747	if (ppp->mrru == 0)	/* do nothing until mrru is set */
2748		return NULL;
2749	head = __skb_peek(list);
2750	tail = NULL;
2751	skb_queue_walk_safe(list, p, tmp) {
2752	again:
2753		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2754			/* this can't happen, anyway ignore the skb */
2755			netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2756				   "seq %u < %u\n",
2757				   PPP_MP_CB(p)->sequence, seq);
2758			__skb_unlink(p, list);
2759			kfree_skb(p);
2760			continue;
2761		}
2762		if (PPP_MP_CB(p)->sequence != seq) {
2763			u32 oldseq;
2764			/* Fragment `seq' is missing.  If it is after
2765			   minseq, it might arrive later, so stop here. */
2766			if (seq_after(seq, minseq))
2767				break;
2768			/* Fragment `seq' is lost, keep going. */
2769			lost = 1;
2770			oldseq = seq;
2771			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2772				minseq + 1: PPP_MP_CB(p)->sequence;
2773
2774			if (ppp->debug & 1)
2775				netdev_printk(KERN_DEBUG, ppp->dev,
2776					      "lost frag %u..%u\n",
2777					      oldseq, seq-1);
2778
2779			goto again;
2780		}
2781
2782		/*
2783		 * At this point we know that all the fragments from
2784		 * ppp->nextseq to seq are either present or lost.
2785		 * Also, there are no complete packets in the queue
2786		 * that have no missing fragments and end before this
2787		 * fragment.
2788		 */
2789
2790		/* B bit set indicates this fragment starts a packet */
2791		if (PPP_MP_CB(p)->BEbits & B) {
2792			head = p;
2793			lost = 0;
2794			len = 0;
2795		}
2796
2797		len += p->len;
2798
2799		/* Got a complete packet yet? */
2800		if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2801		    (PPP_MP_CB(head)->BEbits & B)) {
2802			if (len > ppp->mrru + 2) {
2803				++ppp->dev->stats.rx_length_errors;
2804				netdev_printk(KERN_DEBUG, ppp->dev,
2805					      "PPP: reconstructed packet"
2806					      " is too long (%d)\n", len);
2807			} else {
2808				tail = p;
2809				break;
2810			}
2811			ppp->nextseq = seq + 1;
2812		}
2813
2814		/*
2815		 * If this is the ending fragment of a packet,
2816		 * and we haven't found a complete valid packet yet,
2817		 * we can discard up to and including this fragment.
2818		 */
2819		if (PPP_MP_CB(p)->BEbits & E) {
2820			struct sk_buff *tmp2;
2821
2822			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2823				if (ppp->debug & 1)
2824					netdev_printk(KERN_DEBUG, ppp->dev,
2825						      "discarding frag %u\n",
2826						      PPP_MP_CB(p)->sequence);
2827				__skb_unlink(p, list);
2828				kfree_skb(p);
2829			}
2830			head = skb_peek(list);
2831			if (!head)
2832				break;
2833		}
2834		++seq;
2835	}
2836
2837	/* If we have a complete packet, copy it all into one skb. */
2838	if (tail != NULL) {
2839		/* If we have discarded any fragments,
2840		   signal a receive error. */
2841		if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2842			skb_queue_walk_safe(list, p, tmp) {
2843				if (p == head)
2844					break;
2845				if (ppp->debug & 1)
2846					netdev_printk(KERN_DEBUG, ppp->dev,
2847						      "discarding frag %u\n",
2848						      PPP_MP_CB(p)->sequence);
2849				__skb_unlink(p, list);
2850				kfree_skb(p);
2851			}
2852
2853			if (ppp->debug & 1)
2854				netdev_printk(KERN_DEBUG, ppp->dev,
2855					      "  missed pkts %u..%u\n",
2856					      ppp->nextseq,
2857					      PPP_MP_CB(head)->sequence-1);
2858			++ppp->dev->stats.rx_dropped;
2859			ppp_receive_error(ppp);
2860		}
2861
2862		skb = head;
2863		if (head != tail) {
2864			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2865			p = skb_queue_next(list, head);
2866			__skb_unlink(skb, list);
2867			skb_queue_walk_from_safe(list, p, tmp) {
2868				__skb_unlink(p, list);
2869				*fragpp = p;
2870				p->next = NULL;
2871				fragpp = &p->next;
2872
2873				skb->len += p->len;
2874				skb->data_len += p->len;
2875				skb->truesize += p->truesize;
2876
2877				if (p == tail)
2878					break;
2879			}
2880		} else {
2881			__skb_unlink(skb, list);
2882		}
2883
2884		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2885	}
2886
2887	return skb;
2888}
2889#endif /* CONFIG_PPP_MULTILINK */
2890
2891/*
2892 * Channel interface.
2893 */
2894
2895/* Create a new, unattached ppp channel. */
2896int ppp_register_channel(struct ppp_channel *chan)
2897{
2898	return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2899}
2900
2901/* Create a new, unattached ppp channel for specified net. */
2902int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2903{
2904	struct channel *pch;
2905	struct ppp_net *pn;
2906
2907	pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2908	if (!pch)
2909		return -ENOMEM;
2910
2911	pn = ppp_pernet(net);
2912
2913	pch->ppp = NULL;
2914	pch->chan = chan;
2915	pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
2916	chan->ppp = pch;
2917	init_ppp_file(&pch->file, CHANNEL);
2918	pch->file.hdrlen = chan->hdrlen;
2919#ifdef CONFIG_PPP_MULTILINK
2920	pch->lastseq = -1;
2921#endif /* CONFIG_PPP_MULTILINK */
2922	init_rwsem(&pch->chan_sem);
2923	spin_lock_init(&pch->downl);
2924	rwlock_init(&pch->upl);
2925
2926	spin_lock_bh(&pn->all_channels_lock);
2927	pch->file.index = ++pn->last_channel_index;
2928	list_add(&pch->list, &pn->new_channels);
2929	atomic_inc(&channel_count);
2930	spin_unlock_bh(&pn->all_channels_lock);
2931
2932	return 0;
2933}
2934
2935/*
2936 * Return the index of a channel.
2937 */
2938int ppp_channel_index(struct ppp_channel *chan)
2939{
2940	struct channel *pch = chan->ppp;
2941
2942	if (pch)
2943		return pch->file.index;
2944	return -1;
2945}
2946
2947/*
2948 * Return the PPP unit number to which a channel is connected.
2949 */
2950int ppp_unit_number(struct ppp_channel *chan)
2951{
2952	struct channel *pch = chan->ppp;
2953	int unit = -1;
2954
2955	if (pch) {
2956		read_lock_bh(&pch->upl);
2957		if (pch->ppp)
2958			unit = pch->ppp->file.index;
2959		read_unlock_bh(&pch->upl);
2960	}
2961	return unit;
2962}
2963
2964/*
2965 * Return the PPP device interface name of a channel.
2966 */
2967char *ppp_dev_name(struct ppp_channel *chan)
2968{
2969	struct channel *pch = chan->ppp;
2970	char *name = NULL;
2971
2972	if (pch) {
2973		read_lock_bh(&pch->upl);
2974		if (pch->ppp && pch->ppp->dev)
2975			name = pch->ppp->dev->name;
2976		read_unlock_bh(&pch->upl);
2977	}
2978	return name;
2979}
2980
2981
2982/*
2983 * Disconnect a channel from the generic layer.
2984 * This must be called in process context.
2985 */
2986void
2987ppp_unregister_channel(struct ppp_channel *chan)
2988{
2989	struct channel *pch = chan->ppp;
2990	struct ppp_net *pn;
2991
2992	if (!pch)
2993		return;		/* should never happen */
2994
2995	chan->ppp = NULL;
2996
2997	/*
2998	 * This ensures that we have returned from any calls into
2999	 * the channel's start_xmit or ioctl routine before we proceed.
3000	 */
3001	down_write(&pch->chan_sem);
3002	spin_lock_bh(&pch->downl);
3003	pch->chan = NULL;
3004	spin_unlock_bh(&pch->downl);
3005	up_write(&pch->chan_sem);
3006	ppp_disconnect_channel(pch);
3007
3008	pn = ppp_pernet(pch->chan_net);
3009	spin_lock_bh(&pn->all_channels_lock);
3010	list_del(&pch->list);
3011	spin_unlock_bh(&pn->all_channels_lock);
3012
3013	ppp_unbridge_channels(pch);
3014
3015	pch->file.dead = 1;
3016	wake_up_interruptible(&pch->file.rwait);
3017
3018	if (refcount_dec_and_test(&pch->file.refcnt))
3019		ppp_destroy_channel(pch);
3020}
3021
3022/*
3023 * Callback from a channel when it can accept more to transmit.
3024 * This should be called at BH/softirq level, not interrupt level.
3025 */
3026void
3027ppp_output_wakeup(struct ppp_channel *chan)
3028{
3029	struct channel *pch = chan->ppp;
3030
3031	if (!pch)
3032		return;
3033	ppp_channel_push(pch);
3034}
3035
3036/*
3037 * Compression control.
3038 */
3039
3040/* Process the PPPIOCSCOMPRESS ioctl. */
3041static int
3042ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data)
3043{
3044	int err = -EFAULT;
3045	struct compressor *cp, *ocomp;
3046	void *state, *ostate;
3047	unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
3048
3049	if (data->length > CCP_MAX_OPTION_LENGTH)
3050		goto out;
3051	if (copy_from_user(ccp_option, data->ptr, data->length))
3052		goto out;
3053
3054	err = -EINVAL;
3055	if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length)
3056		goto out;
3057
3058	cp = try_then_request_module(
3059		find_compressor(ccp_option[0]),
3060		"ppp-compress-%d", ccp_option[0]);
3061	if (!cp)
3062		goto out;
3063
3064	err = -ENOBUFS;
3065	if (data->transmit) {
3066		state = cp->comp_alloc(ccp_option, data->length);
3067		if (state) {
3068			ppp_xmit_lock(ppp);
3069			ppp->xstate &= ~SC_COMP_RUN;
3070			ocomp = ppp->xcomp;
3071			ostate = ppp->xc_state;
3072			ppp->xcomp = cp;
3073			ppp->xc_state = state;
3074			ppp_xmit_unlock(ppp);
3075			if (ostate) {
3076				ocomp->comp_free(ostate);
3077				module_put(ocomp->owner);
3078			}
3079			err = 0;
3080		} else
3081			module_put(cp->owner);
3082
3083	} else {
3084		state = cp->decomp_alloc(ccp_option, data->length);
3085		if (state) {
3086			ppp_recv_lock(ppp);
3087			ppp->rstate &= ~SC_DECOMP_RUN;
3088			ocomp = ppp->rcomp;
3089			ostate = ppp->rc_state;
3090			ppp->rcomp = cp;
3091			ppp->rc_state = state;
3092			ppp_recv_unlock(ppp);
3093			if (ostate) {
3094				ocomp->decomp_free(ostate);
3095				module_put(ocomp->owner);
3096			}
3097			err = 0;
3098		} else
3099			module_put(cp->owner);
3100	}
3101
3102 out:
3103	return err;
3104}
3105
3106/*
3107 * Look at a CCP packet and update our state accordingly.
3108 * We assume the caller has the xmit or recv path locked.
3109 */
3110static void
3111ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
3112{
3113	unsigned char *dp;
3114	int len;
3115
3116	if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
3117		return;	/* no header */
3118	dp = skb->data + 2;
3119
3120	switch (CCP_CODE(dp)) {
3121	case CCP_CONFREQ:
3122
3123		/* A ConfReq starts negotiation of compression
3124		 * in one direction of transmission,
3125		 * and hence brings it down...but which way?
3126		 *
3127		 * Remember:
3128		 * A ConfReq indicates what the sender would like to receive
3129		 */
3130		if(inbound)
3131			/* He is proposing what I should send */
3132			ppp->xstate &= ~SC_COMP_RUN;
3133		else
3134			/* I am proposing to what he should send */
3135			ppp->rstate &= ~SC_DECOMP_RUN;
3136
3137		break;
3138
3139	case CCP_TERMREQ:
3140	case CCP_TERMACK:
3141		/*
3142		 * CCP is going down, both directions of transmission
3143		 */
3144		ppp->rstate &= ~SC_DECOMP_RUN;
3145		ppp->xstate &= ~SC_COMP_RUN;
3146		break;
3147
3148	case CCP_CONFACK:
3149		if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
3150			break;
3151		len = CCP_LENGTH(dp);
3152		if (!pskb_may_pull(skb, len + 2))
3153			return;		/* too short */
3154		dp += CCP_HDRLEN;
3155		len -= CCP_HDRLEN;
3156		if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
3157			break;
3158		if (inbound) {
3159			/* we will start receiving compressed packets */
3160			if (!ppp->rc_state)
3161				break;
3162			if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
3163					ppp->file.index, 0, ppp->mru, ppp->debug)) {
3164				ppp->rstate |= SC_DECOMP_RUN;
3165				ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
3166			}
3167		} else {
3168			/* we will soon start sending compressed packets */
3169			if (!ppp->xc_state)
3170				break;
3171			if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
3172					ppp->file.index, 0, ppp->debug))
3173				ppp->xstate |= SC_COMP_RUN;
3174		}
3175		break;
3176
3177	case CCP_RESETACK:
3178		/* reset the [de]compressor */
3179		if ((ppp->flags & SC_CCP_UP) == 0)
3180			break;
3181		if (inbound) {
3182			if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
3183				ppp->rcomp->decomp_reset(ppp->rc_state);
3184				ppp->rstate &= ~SC_DC_ERROR;
3185			}
3186		} else {
3187			if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
3188				ppp->xcomp->comp_reset(ppp->xc_state);
3189		}
3190		break;
3191	}
3192}
3193
3194/* Free up compression resources. */
3195static void
3196ppp_ccp_closed(struct ppp *ppp)
3197{
3198	void *xstate, *rstate;
3199	struct compressor *xcomp, *rcomp;
3200
3201	ppp_lock(ppp);
3202	ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
3203	ppp->xstate = 0;
3204	xcomp = ppp->xcomp;
3205	xstate = ppp->xc_state;
3206	ppp->xc_state = NULL;
3207	ppp->rstate = 0;
3208	rcomp = ppp->rcomp;
3209	rstate = ppp->rc_state;
3210	ppp->rc_state = NULL;
3211	ppp_unlock(ppp);
3212
3213	if (xstate) {
3214		xcomp->comp_free(xstate);
3215		module_put(xcomp->owner);
3216	}
3217	if (rstate) {
3218		rcomp->decomp_free(rstate);
3219		module_put(rcomp->owner);
3220	}
3221}
3222
3223/* List of compressors. */
3224static LIST_HEAD(compressor_list);
3225static DEFINE_SPINLOCK(compressor_list_lock);
3226
3227struct compressor_entry {
3228	struct list_head list;
3229	struct compressor *comp;
3230};
3231
3232static struct compressor_entry *
3233find_comp_entry(int proto)
3234{
3235	struct compressor_entry *ce;
3236
3237	list_for_each_entry(ce, &compressor_list, list) {
3238		if (ce->comp->compress_proto == proto)
3239			return ce;
3240	}
3241	return NULL;
3242}
3243
3244/* Register a compressor */
3245int
3246ppp_register_compressor(struct compressor *cp)
3247{
3248	struct compressor_entry *ce;
3249	int ret;
3250	spin_lock(&compressor_list_lock);
3251	ret = -EEXIST;
3252	if (find_comp_entry(cp->compress_proto))
3253		goto out;
3254	ret = -ENOMEM;
3255	ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
3256	if (!ce)
3257		goto out;
3258	ret = 0;
3259	ce->comp = cp;
3260	list_add(&ce->list, &compressor_list);
3261 out:
3262	spin_unlock(&compressor_list_lock);
3263	return ret;
3264}
3265
3266/* Unregister a compressor */
3267void
3268ppp_unregister_compressor(struct compressor *cp)
3269{
3270	struct compressor_entry *ce;
3271
3272	spin_lock(&compressor_list_lock);
3273	ce = find_comp_entry(cp->compress_proto);
3274	if (ce && ce->comp == cp) {
3275		list_del(&ce->list);
3276		kfree(ce);
3277	}
3278	spin_unlock(&compressor_list_lock);
3279}
3280
3281/* Find a compressor. */
3282static struct compressor *
3283find_compressor(int type)
3284{
3285	struct compressor_entry *ce;
3286	struct compressor *cp = NULL;
3287
3288	spin_lock(&compressor_list_lock);
3289	ce = find_comp_entry(type);
3290	if (ce) {
3291		cp = ce->comp;
3292		if (!try_module_get(cp->owner))
3293			cp = NULL;
3294	}
3295	spin_unlock(&compressor_list_lock);
3296	return cp;
3297}
3298
3299/*
3300 * Miscelleneous stuff.
3301 */
3302
3303static void
3304ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
3305{
3306	struct slcompress *vj = ppp->vj;
3307
3308	memset(st, 0, sizeof(*st));
3309	st->p.ppp_ipackets = ppp->stats64.rx_packets;
3310	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
3311	st->p.ppp_ibytes = ppp->stats64.rx_bytes;
3312	st->p.ppp_opackets = ppp->stats64.tx_packets;
3313	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
3314	st->p.ppp_obytes = ppp->stats64.tx_bytes;
3315	if (!vj)
3316		return;
3317	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
3318	st->vj.vjs_compressed = vj->sls_o_compressed;
3319	st->vj.vjs_searches = vj->sls_o_searches;
3320	st->vj.vjs_misses = vj->sls_o_misses;
3321	st->vj.vjs_errorin = vj->sls_i_error;
3322	st->vj.vjs_tossed = vj->sls_i_tossed;
3323	st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
3324	st->vj.vjs_compressedin = vj->sls_i_compressed;
3325}
3326
3327/*
3328 * Stuff for handling the lists of ppp units and channels
3329 * and for initialization.
3330 */
3331
3332/*
3333 * Create a new ppp interface unit.  Fails if it can't allocate memory
3334 * or if there is already a unit with the requested number.
3335 * unit == -1 means allocate a new number.
3336 */
3337static int ppp_create_interface(struct net *net, struct file *file, int *unit)
3338{
3339	struct ppp_config conf = {
3340		.file = file,
3341		.unit = *unit,
3342		.ifname_is_set = false,
3343	};
3344	struct net_device *dev;
3345	struct ppp *ppp;
3346	int err;
3347
3348	dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
3349	if (!dev) {
3350		err = -ENOMEM;
3351		goto err;
3352	}
3353	dev_net_set(dev, net);
3354	dev->rtnl_link_ops = &ppp_link_ops;
3355
3356	rtnl_lock();
3357
3358	err = ppp_dev_configure(net, dev, &conf);
3359	if (err < 0)
3360		goto err_dev;
3361	ppp = netdev_priv(dev);
3362	*unit = ppp->file.index;
3363
3364	rtnl_unlock();
3365
3366	return 0;
3367
3368err_dev:
3369	rtnl_unlock();
3370	free_netdev(dev);
3371err:
3372	return err;
3373}
3374
3375/*
3376 * Initialize a ppp_file structure.
3377 */
3378static void
3379init_ppp_file(struct ppp_file *pf, int kind)
3380{
3381	pf->kind = kind;
3382	skb_queue_head_init(&pf->xq);
3383	skb_queue_head_init(&pf->rq);
3384	refcount_set(&pf->refcnt, 1);
3385	init_waitqueue_head(&pf->rwait);
3386}
3387
3388/*
3389 * Free the memory used by a ppp unit.  This is only called once
3390 * there are no channels connected to the unit and no file structs
3391 * that reference the unit.
3392 */
3393static void ppp_destroy_interface(struct ppp *ppp)
3394{
3395	atomic_dec(&ppp_unit_count);
3396
3397	if (!ppp->file.dead || ppp->n_channels) {
3398		/* "can't happen" */
3399		netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
3400			   "but dead=%d n_channels=%d !\n",
3401			   ppp, ppp->file.dead, ppp->n_channels);
3402		return;
3403	}
3404
3405	ppp_ccp_closed(ppp);
3406	if (ppp->vj) {
3407		slhc_free(ppp->vj);
3408		ppp->vj = NULL;
3409	}
3410	skb_queue_purge(&ppp->file.xq);
3411	skb_queue_purge(&ppp->file.rq);
3412#ifdef CONFIG_PPP_MULTILINK
3413	skb_queue_purge(&ppp->mrq);
3414#endif /* CONFIG_PPP_MULTILINK */
3415#ifdef CONFIG_PPP_FILTER
3416	if (ppp->pass_filter) {
3417		bpf_prog_destroy(ppp->pass_filter);
3418		ppp->pass_filter = NULL;
3419	}
3420
3421	if (ppp->active_filter) {
3422		bpf_prog_destroy(ppp->active_filter);
3423		ppp->active_filter = NULL;
3424	}
3425#endif /* CONFIG_PPP_FILTER */
3426
3427	kfree_skb(ppp->xmit_pending);
3428	free_percpu(ppp->xmit_recursion);
3429
3430	free_netdev(ppp->dev);
3431}
3432
3433/*
3434 * Locate an existing ppp unit.
3435 * The caller should have locked the all_ppp_mutex.
3436 */
3437static struct ppp *
3438ppp_find_unit(struct ppp_net *pn, int unit)
3439{
3440	return unit_find(&pn->units_idr, unit);
3441}
3442
3443/*
3444 * Locate an existing ppp channel.
3445 * The caller should have locked the all_channels_lock.
3446 * First we look in the new_channels list, then in the
3447 * all_channels list.  If found in the new_channels list,
3448 * we move it to the all_channels list.  This is for speed
3449 * when we have a lot of channels in use.
3450 */
3451static struct channel *
3452ppp_find_channel(struct ppp_net *pn, int unit)
3453{
3454	struct channel *pch;
3455
3456	list_for_each_entry(pch, &pn->new_channels, list) {
3457		if (pch->file.index == unit) {
3458			list_move(&pch->list, &pn->all_channels);
3459			return pch;
3460		}
3461	}
3462
3463	list_for_each_entry(pch, &pn->all_channels, list) {
3464		if (pch->file.index == unit)
3465			return pch;
3466	}
3467
3468	return NULL;
3469}
3470
3471/*
3472 * Connect a PPP channel to a PPP interface unit.
3473 */
3474static int
3475ppp_connect_channel(struct channel *pch, int unit)
3476{
3477	struct ppp *ppp;
3478	struct ppp_net *pn;
3479	int ret = -ENXIO;
3480	int hdrlen;
3481
3482	pn = ppp_pernet(pch->chan_net);
3483
3484	mutex_lock(&pn->all_ppp_mutex);
3485	ppp = ppp_find_unit(pn, unit);
3486	if (!ppp)
3487		goto out;
3488	write_lock_bh(&pch->upl);
3489	ret = -EINVAL;
3490	if (pch->ppp ||
3491	    rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)))
3492		goto outl;
3493
3494	ppp_lock(ppp);
3495	spin_lock_bh(&pch->downl);
3496	if (!pch->chan) {
3497		/* Don't connect unregistered channels */
3498		spin_unlock_bh(&pch->downl);
3499		ppp_unlock(ppp);
3500		ret = -ENOTCONN;
3501		goto outl;
3502	}
3503	spin_unlock_bh(&pch->downl);
3504	if (pch->file.hdrlen > ppp->file.hdrlen)
3505		ppp->file.hdrlen = pch->file.hdrlen;
3506	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
3507	if (hdrlen > ppp->dev->hard_header_len)
3508		ppp->dev->hard_header_len = hdrlen;
3509	list_add_tail(&pch->clist, &ppp->channels);
3510	++ppp->n_channels;
3511	pch->ppp = ppp;
3512	refcount_inc(&ppp->file.refcnt);
3513	ppp_unlock(ppp);
3514	ret = 0;
3515
3516 outl:
3517	write_unlock_bh(&pch->upl);
3518 out:
3519	mutex_unlock(&pn->all_ppp_mutex);
3520	return ret;
3521}
3522
3523/*
3524 * Disconnect a channel from its ppp unit.
3525 */
3526static int
3527ppp_disconnect_channel(struct channel *pch)
3528{
3529	struct ppp *ppp;
3530	int err = -EINVAL;
3531
3532	write_lock_bh(&pch->upl);
3533	ppp = pch->ppp;
3534	pch->ppp = NULL;
3535	write_unlock_bh(&pch->upl);
3536	if (ppp) {
3537		/* remove it from the ppp unit's list */
3538		ppp_lock(ppp);
3539		list_del(&pch->clist);
3540		if (--ppp->n_channels == 0)
3541			wake_up_interruptible(&ppp->file.rwait);
3542		ppp_unlock(ppp);
3543		if (refcount_dec_and_test(&ppp->file.refcnt))
3544			ppp_destroy_interface(ppp);
3545		err = 0;
3546	}
3547	return err;
3548}
3549
3550/*
3551 * Free up the resources used by a ppp channel.
3552 */
3553static void ppp_destroy_channel(struct channel *pch)
3554{
3555	put_net_track(pch->chan_net, &pch->ns_tracker);
3556	pch->chan_net = NULL;
3557
3558	atomic_dec(&channel_count);
3559
3560	if (!pch->file.dead) {
3561		/* "can't happen" */
3562		pr_err("ppp: destroying undead channel %p !\n", pch);
3563		return;
3564	}
3565	skb_queue_purge(&pch->file.xq);
3566	skb_queue_purge(&pch->file.rq);
3567	kfree(pch);
3568}
3569
3570static void __exit ppp_cleanup(void)
3571{
3572	/* should never happen */
3573	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
3574		pr_err("PPP: removing module but units remain!\n");
3575	rtnl_link_unregister(&ppp_link_ops);
3576	unregister_chrdev(PPP_MAJOR, "ppp");
3577	device_destroy(&ppp_class, MKDEV(PPP_MAJOR, 0));
3578	class_unregister(&ppp_class);
3579	unregister_pernet_device(&ppp_net_ops);
3580}
3581
3582/*
3583 * Units handling. Caller must protect concurrent access
3584 * by holding all_ppp_mutex
3585 */
3586
3587/* associate pointer with specified number */
3588static int unit_set(struct idr *p, void *ptr, int n)
3589{
3590	int unit;
3591
3592	unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
3593	if (unit == -ENOSPC)
3594		unit = -EINVAL;
3595	return unit;
3596}
3597
3598/* get new free unit number and associate pointer with it */
3599static int unit_get(struct idr *p, void *ptr, int min)
3600{
3601	return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
3602}
3603
3604/* put unit number back to a pool */
3605static void unit_put(struct idr *p, int n)
3606{
3607	idr_remove(p, n);
3608}
3609
3610/* get pointer associated with the number */
3611static void *unit_find(struct idr *p, int n)
3612{
3613	return idr_find(p, n);
3614}
3615
3616/* Module/initialization stuff */
3617
3618module_init(ppp_init);
3619module_exit(ppp_cleanup);
3620
3621EXPORT_SYMBOL(ppp_register_net_channel);
3622EXPORT_SYMBOL(ppp_register_channel);
3623EXPORT_SYMBOL(ppp_unregister_channel);
3624EXPORT_SYMBOL(ppp_channel_index);
3625EXPORT_SYMBOL(ppp_unit_number);
3626EXPORT_SYMBOL(ppp_dev_name);
3627EXPORT_SYMBOL(ppp_input);
3628EXPORT_SYMBOL(ppp_input_error);
3629EXPORT_SYMBOL(ppp_output_wakeup);
3630EXPORT_SYMBOL(ppp_register_compressor);
3631EXPORT_SYMBOL(ppp_unregister_compressor);
3632MODULE_DESCRIPTION("Generic PPP layer driver");
3633MODULE_LICENSE("GPL");
3634MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
3635MODULE_ALIAS_RTNL_LINK("ppp");
3636MODULE_ALIAS("devname:ppp");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Generic PPP layer for Linux.
   4 *
   5 * Copyright 1999-2002 Paul Mackerras.
   6 *
   7 * The generic PPP layer handles the PPP network interfaces, the
   8 * /dev/ppp device, packet and VJ compression, and multilink.
   9 * It talks to PPP `channels' via the interface defined in
  10 * include/linux/ppp_channel.h.  Channels provide the basic means for
  11 * sending and receiving PPP frames on some kind of communications
  12 * channel.
  13 *
  14 * Part of the code in this driver was inspired by the old async-only
  15 * PPP driver, written by Michael Callahan and Al Longyear, and
  16 * subsequently hacked by Paul Mackerras.
  17 *
  18 * ==FILEVERSION 20041108==
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/kernel.h>
  23#include <linux/sched/signal.h>
  24#include <linux/kmod.h>
  25#include <linux/init.h>
  26#include <linux/list.h>
  27#include <linux/idr.h>
  28#include <linux/netdevice.h>
  29#include <linux/poll.h>
  30#include <linux/ppp_defs.h>
  31#include <linux/filter.h>
  32#include <linux/ppp-ioctl.h>
  33#include <linux/ppp_channel.h>
  34#include <linux/ppp-comp.h>
  35#include <linux/skbuff.h>
  36#include <linux/rtnetlink.h>
  37#include <linux/if_arp.h>
  38#include <linux/ip.h>
  39#include <linux/tcp.h>
  40#include <linux/spinlock.h>
  41#include <linux/rwsem.h>
  42#include <linux/stddef.h>
  43#include <linux/device.h>
  44#include <linux/mutex.h>
  45#include <linux/slab.h>
  46#include <linux/file.h>
  47#include <asm/unaligned.h>
  48#include <net/slhc_vj.h>
  49#include <linux/atomic.h>
  50#include <linux/refcount.h>
  51
  52#include <linux/nsproxy.h>
  53#include <net/net_namespace.h>
  54#include <net/netns/generic.h>
  55
  56#define PPP_VERSION	"2.4.2"
  57
  58/*
  59 * Network protocols we support.
  60 */
  61#define NP_IP	0		/* Internet Protocol V4 */
  62#define NP_IPV6	1		/* Internet Protocol V6 */
  63#define NP_IPX	2		/* IPX protocol */
  64#define NP_AT	3		/* Appletalk protocol */
  65#define NP_MPLS_UC 4		/* MPLS unicast */
  66#define NP_MPLS_MC 5		/* MPLS multicast */
  67#define NUM_NP	6		/* Number of NPs. */
  68
  69#define MPHDRLEN	6	/* multilink protocol header length */
  70#define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
  71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72/*
  73 * An instance of /dev/ppp can be associated with either a ppp
  74 * interface unit or a ppp channel.  In both cases, file->private_data
  75 * points to one of these.
  76 */
  77struct ppp_file {
  78	enum {
  79		INTERFACE=1, CHANNEL
  80	}		kind;
  81	struct sk_buff_head xq;		/* pppd transmit queue */
  82	struct sk_buff_head rq;		/* receive queue for pppd */
  83	wait_queue_head_t rwait;	/* for poll on reading /dev/ppp */
  84	refcount_t	refcnt;		/* # refs (incl /dev/ppp attached) */
  85	int		hdrlen;		/* space to leave for headers */
  86	int		index;		/* interface unit / channel number */
  87	int		dead;		/* unit/channel has been shut down */
  88};
  89
  90#define PF_TO_X(pf, X)		container_of(pf, X, file)
  91
  92#define PF_TO_PPP(pf)		PF_TO_X(pf, struct ppp)
  93#define PF_TO_CHANNEL(pf)	PF_TO_X(pf, struct channel)
  94
  95/*
  96 * Data structure to hold primary network stats for which
  97 * we want to use 64 bit storage.  Other network stats
  98 * are stored in dev->stats of the ppp strucute.
  99 */
 100struct ppp_link_stats {
 101	u64 rx_packets;
 102	u64 tx_packets;
 103	u64 rx_bytes;
 104	u64 tx_bytes;
 105};
 106
 107/*
 108 * Data structure describing one ppp unit.
 109 * A ppp unit corresponds to a ppp network interface device
 110 * and represents a multilink bundle.
 111 * It can have 0 or more ppp channels connected to it.
 112 */
 113struct ppp {
 114	struct ppp_file	file;		/* stuff for read/write/poll 0 */
 115	struct file	*owner;		/* file that owns this unit 48 */
 116	struct list_head channels;	/* list of attached channels 4c */
 117	int		n_channels;	/* how many channels are attached 54 */
 118	spinlock_t	rlock;		/* lock for receive side 58 */
 119	spinlock_t	wlock;		/* lock for transmit side 5c */
 120	int __percpu	*xmit_recursion; /* xmit recursion detect */
 121	int		mru;		/* max receive unit 60 */
 122	unsigned int	flags;		/* control bits 64 */
 123	unsigned int	xstate;		/* transmit state bits 68 */
 124	unsigned int	rstate;		/* receive state bits 6c */
 125	int		debug;		/* debug flags 70 */
 126	struct slcompress *vj;		/* state for VJ header compression */
 127	enum NPmode	npmode[NUM_NP];	/* what to do with each net proto 78 */
 128	struct sk_buff	*xmit_pending;	/* a packet ready to go out 88 */
 129	struct compressor *xcomp;	/* transmit packet compressor 8c */
 130	void		*xc_state;	/* its internal state 90 */
 131	struct compressor *rcomp;	/* receive decompressor 94 */
 132	void		*rc_state;	/* its internal state 98 */
 133	unsigned long	last_xmit;	/* jiffies when last pkt sent 9c */
 134	unsigned long	last_recv;	/* jiffies when last pkt rcvd a0 */
 135	struct net_device *dev;		/* network interface device a4 */
 136	int		closing;	/* is device closing down? a8 */
 137#ifdef CONFIG_PPP_MULTILINK
 138	int		nxchan;		/* next channel to send something on */
 139	u32		nxseq;		/* next sequence number to send */
 140	int		mrru;		/* MP: max reconst. receive unit */
 141	u32		nextseq;	/* MP: seq no of next packet */
 142	u32		minseq;		/* MP: min of most recent seqnos */
 143	struct sk_buff_head mrq;	/* MP: receive reconstruction queue */
 144#endif /* CONFIG_PPP_MULTILINK */
 145#ifdef CONFIG_PPP_FILTER
 146	struct bpf_prog *pass_filter;	/* filter for packets to pass */
 147	struct bpf_prog *active_filter; /* filter for pkts to reset idle */
 148#endif /* CONFIG_PPP_FILTER */
 149	struct net	*ppp_net;	/* the net we belong to */
 150	struct ppp_link_stats stats64;	/* 64 bit network stats */
 151};
 152
 153/*
 154 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
 155 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
 156 * SC_MUST_COMP
 157 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
 158 * Bits in xstate: SC_COMP_RUN
 159 */
 160#define SC_FLAG_BITS	(SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
 161			 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
 162			 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
 163
 164/*
 165 * Private data structure for each channel.
 166 * This includes the data structure used for multilink.
 167 */
 168struct channel {
 169	struct ppp_file	file;		/* stuff for read/write/poll */
 170	struct list_head list;		/* link in all/new_channels list */
 171	struct ppp_channel *chan;	/* public channel data structure */
 172	struct rw_semaphore chan_sem;	/* protects `chan' during chan ioctl */
 173	spinlock_t	downl;		/* protects `chan', file.xq dequeue */
 174	struct ppp	*ppp;		/* ppp unit we're connected to */
 175	struct net	*chan_net;	/* the net channel belongs to */
 
 176	struct list_head clist;		/* link in list of channels per unit */
 177	rwlock_t	upl;		/* protects `ppp' */
 
 178#ifdef CONFIG_PPP_MULTILINK
 179	u8		avail;		/* flag used in multilink stuff */
 180	u8		had_frag;	/* >= 1 fragments have been sent */
 181	u32		lastseq;	/* MP: last sequence # received */
 182	int		speed;		/* speed of the corresponding ppp channel*/
 183#endif /* CONFIG_PPP_MULTILINK */
 184};
 185
 186struct ppp_config {
 187	struct file *file;
 188	s32 unit;
 189	bool ifname_is_set;
 190};
 191
 192/*
 193 * SMP locking issues:
 194 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
 195 * list and the ppp.n_channels field, you need to take both locks
 196 * before you modify them.
 197 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
 198 * channel.downl.
 199 */
 200
 201static DEFINE_MUTEX(ppp_mutex);
 202static atomic_t ppp_unit_count = ATOMIC_INIT(0);
 203static atomic_t channel_count = ATOMIC_INIT(0);
 204
 205/* per-net private data for this module */
 206static unsigned int ppp_net_id __read_mostly;
 207struct ppp_net {
 208	/* units to ppp mapping */
 209	struct idr units_idr;
 210
 211	/*
 212	 * all_ppp_mutex protects the units_idr mapping.
 213	 * It also ensures that finding a ppp unit in the units_idr
 214	 * map and updating its file.refcnt field is atomic.
 215	 */
 216	struct mutex all_ppp_mutex;
 217
 218	/* channels */
 219	struct list_head all_channels;
 220	struct list_head new_channels;
 221	int last_channel_index;
 222
 223	/*
 224	 * all_channels_lock protects all_channels and
 225	 * last_channel_index, and the atomicity of find
 226	 * a channel and updating its file.refcnt field.
 227	 */
 228	spinlock_t all_channels_lock;
 229};
 230
 231/* Get the PPP protocol number from a skb */
 232#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
 233
 234/* We limit the length of ppp->file.rq to this (arbitrary) value */
 235#define PPP_MAX_RQLEN	32
 236
 237/*
 238 * Maximum number of multilink fragments queued up.
 239 * This has to be large enough to cope with the maximum latency of
 240 * the slowest channel relative to the others.  Strictly it should
 241 * depend on the number of channels and their characteristics.
 242 */
 243#define PPP_MP_MAX_QLEN	128
 244
 245/* Multilink header bits. */
 246#define B	0x80		/* this fragment begins a packet */
 247#define E	0x40		/* this fragment ends a packet */
 248
 249/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
 250#define seq_before(a, b)	((s32)((a) - (b)) < 0)
 251#define seq_after(a, b)		((s32)((a) - (b)) > 0)
 252
 253/* Prototypes. */
 254static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 255			struct file *file, unsigned int cmd, unsigned long arg);
 256static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
 257static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
 258static void ppp_push(struct ppp *ppp);
 259static void ppp_channel_push(struct channel *pch);
 260static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
 261			      struct channel *pch);
 262static void ppp_receive_error(struct ppp *ppp);
 263static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
 264static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
 265					    struct sk_buff *skb);
 266#ifdef CONFIG_PPP_MULTILINK
 267static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
 268				struct channel *pch);
 269static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
 270static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
 271static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
 272#endif /* CONFIG_PPP_MULTILINK */
 273static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data);
 274static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 275static void ppp_ccp_closed(struct ppp *ppp);
 276static struct compressor *find_compressor(int type);
 277static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
 278static int ppp_create_interface(struct net *net, struct file *file, int *unit);
 279static void init_ppp_file(struct ppp_file *pf, int kind);
 280static void ppp_destroy_interface(struct ppp *ppp);
 281static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 282static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
 283static int ppp_connect_channel(struct channel *pch, int unit);
 284static int ppp_disconnect_channel(struct channel *pch);
 285static void ppp_destroy_channel(struct channel *pch);
 286static int unit_get(struct idr *p, void *ptr);
 287static int unit_set(struct idr *p, void *ptr, int n);
 288static void unit_put(struct idr *p, int n);
 289static void *unit_find(struct idr *p, int n);
 290static void ppp_setup(struct net_device *dev);
 291
 292static const struct net_device_ops ppp_netdev_ops;
 293
 294static struct class *ppp_class;
 
 
 295
 296/* per net-namespace data */
 297static inline struct ppp_net *ppp_pernet(struct net *net)
 298{
 299	return net_generic(net, ppp_net_id);
 300}
 301
 302/* Translates a PPP protocol number to a NP index (NP == network protocol) */
 303static inline int proto_to_npindex(int proto)
 304{
 305	switch (proto) {
 306	case PPP_IP:
 307		return NP_IP;
 308	case PPP_IPV6:
 309		return NP_IPV6;
 310	case PPP_IPX:
 311		return NP_IPX;
 312	case PPP_AT:
 313		return NP_AT;
 314	case PPP_MPLS_UC:
 315		return NP_MPLS_UC;
 316	case PPP_MPLS_MC:
 317		return NP_MPLS_MC;
 318	}
 319	return -EINVAL;
 320}
 321
 322/* Translates an NP index into a PPP protocol number */
 323static const int npindex_to_proto[NUM_NP] = {
 324	PPP_IP,
 325	PPP_IPV6,
 326	PPP_IPX,
 327	PPP_AT,
 328	PPP_MPLS_UC,
 329	PPP_MPLS_MC,
 330};
 331
 332/* Translates an ethertype into an NP index */
 333static inline int ethertype_to_npindex(int ethertype)
 334{
 335	switch (ethertype) {
 336	case ETH_P_IP:
 337		return NP_IP;
 338	case ETH_P_IPV6:
 339		return NP_IPV6;
 340	case ETH_P_IPX:
 341		return NP_IPX;
 342	case ETH_P_PPPTALK:
 343	case ETH_P_ATALK:
 344		return NP_AT;
 345	case ETH_P_MPLS_UC:
 346		return NP_MPLS_UC;
 347	case ETH_P_MPLS_MC:
 348		return NP_MPLS_MC;
 349	}
 350	return -1;
 351}
 352
 353/* Translates an NP index into an ethertype */
 354static const int npindex_to_ethertype[NUM_NP] = {
 355	ETH_P_IP,
 356	ETH_P_IPV6,
 357	ETH_P_IPX,
 358	ETH_P_PPPTALK,
 359	ETH_P_MPLS_UC,
 360	ETH_P_MPLS_MC,
 361};
 362
 363/*
 364 * Locking shorthand.
 365 */
 366#define ppp_xmit_lock(ppp)	spin_lock_bh(&(ppp)->wlock)
 367#define ppp_xmit_unlock(ppp)	spin_unlock_bh(&(ppp)->wlock)
 368#define ppp_recv_lock(ppp)	spin_lock_bh(&(ppp)->rlock)
 369#define ppp_recv_unlock(ppp)	spin_unlock_bh(&(ppp)->rlock)
 370#define ppp_lock(ppp)		do { ppp_xmit_lock(ppp); \
 371				     ppp_recv_lock(ppp); } while (0)
 372#define ppp_unlock(ppp)		do { ppp_recv_unlock(ppp); \
 373				     ppp_xmit_unlock(ppp); } while (0)
 374
 375/*
 376 * /dev/ppp device routines.
 377 * The /dev/ppp device is used by pppd to control the ppp unit.
 378 * It supports the read, write, ioctl and poll functions.
 379 * Open instances of /dev/ppp can be in one of three states:
 380 * unattached, attached to a ppp unit, or attached to a ppp channel.
 381 */
 382static int ppp_open(struct inode *inode, struct file *file)
 383{
 384	/*
 385	 * This could (should?) be enforced by the permissions on /dev/ppp.
 386	 */
 387	if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN))
 388		return -EPERM;
 389	return 0;
 390}
 391
 392static int ppp_release(struct inode *unused, struct file *file)
 393{
 394	struct ppp_file *pf = file->private_data;
 395	struct ppp *ppp;
 396
 397	if (pf) {
 398		file->private_data = NULL;
 399		if (pf->kind == INTERFACE) {
 400			ppp = PF_TO_PPP(pf);
 401			rtnl_lock();
 402			if (file == ppp->owner)
 403				unregister_netdevice(ppp->dev);
 404			rtnl_unlock();
 405		}
 406		if (refcount_dec_and_test(&pf->refcnt)) {
 407			switch (pf->kind) {
 408			case INTERFACE:
 409				ppp_destroy_interface(PF_TO_PPP(pf));
 410				break;
 411			case CHANNEL:
 412				ppp_destroy_channel(PF_TO_CHANNEL(pf));
 413				break;
 414			}
 415		}
 416	}
 417	return 0;
 418}
 419
 420static ssize_t ppp_read(struct file *file, char __user *buf,
 421			size_t count, loff_t *ppos)
 422{
 423	struct ppp_file *pf = file->private_data;
 424	DECLARE_WAITQUEUE(wait, current);
 425	ssize_t ret;
 426	struct sk_buff *skb = NULL;
 427	struct iovec iov;
 428	struct iov_iter to;
 429
 430	ret = count;
 431
 432	if (!pf)
 433		return -ENXIO;
 434	add_wait_queue(&pf->rwait, &wait);
 435	for (;;) {
 436		set_current_state(TASK_INTERRUPTIBLE);
 437		skb = skb_dequeue(&pf->rq);
 438		if (skb)
 439			break;
 440		ret = 0;
 441		if (pf->dead)
 442			break;
 443		if (pf->kind == INTERFACE) {
 444			/*
 445			 * Return 0 (EOF) on an interface that has no
 446			 * channels connected, unless it is looping
 447			 * network traffic (demand mode).
 448			 */
 449			struct ppp *ppp = PF_TO_PPP(pf);
 450
 451			ppp_recv_lock(ppp);
 452			if (ppp->n_channels == 0 &&
 453			    (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
 454				ppp_recv_unlock(ppp);
 455				break;
 456			}
 457			ppp_recv_unlock(ppp);
 458		}
 459		ret = -EAGAIN;
 460		if (file->f_flags & O_NONBLOCK)
 461			break;
 462		ret = -ERESTARTSYS;
 463		if (signal_pending(current))
 464			break;
 465		schedule();
 466	}
 467	set_current_state(TASK_RUNNING);
 468	remove_wait_queue(&pf->rwait, &wait);
 469
 470	if (!skb)
 471		goto out;
 472
 473	ret = -EOVERFLOW;
 474	if (skb->len > count)
 475		goto outf;
 476	ret = -EFAULT;
 477	iov.iov_base = buf;
 478	iov.iov_len = count;
 479	iov_iter_init(&to, READ, &iov, 1, count);
 480	if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
 481		goto outf;
 482	ret = skb->len;
 483
 484 outf:
 485	kfree_skb(skb);
 486 out:
 487	return ret;
 488}
 489
 
 
 
 
 
 
 
 
 
 490static ssize_t ppp_write(struct file *file, const char __user *buf,
 491			 size_t count, loff_t *ppos)
 492{
 493	struct ppp_file *pf = file->private_data;
 494	struct sk_buff *skb;
 495	ssize_t ret;
 496
 497	if (!pf)
 498		return -ENXIO;
 
 
 
 499	ret = -ENOMEM;
 500	skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
 501	if (!skb)
 502		goto out;
 503	skb_reserve(skb, pf->hdrlen);
 504	ret = -EFAULT;
 505	if (copy_from_user(skb_put(skb, count), buf, count)) {
 506		kfree_skb(skb);
 507		goto out;
 508	}
 
 
 
 
 
 509
 510	switch (pf->kind) {
 511	case INTERFACE:
 512		ppp_xmit_process(PF_TO_PPP(pf), skb);
 513		break;
 514	case CHANNEL:
 515		skb_queue_tail(&pf->xq, skb);
 516		ppp_channel_push(PF_TO_CHANNEL(pf));
 517		break;
 518	}
 519
 520	ret = count;
 521
 522 out:
 523	return ret;
 524}
 525
 526/* No kernel lock - fine */
 527static __poll_t ppp_poll(struct file *file, poll_table *wait)
 528{
 529	struct ppp_file *pf = file->private_data;
 530	__poll_t mask;
 531
 532	if (!pf)
 533		return 0;
 534	poll_wait(file, &pf->rwait, wait);
 535	mask = EPOLLOUT | EPOLLWRNORM;
 536	if (skb_peek(&pf->rq))
 537		mask |= EPOLLIN | EPOLLRDNORM;
 538	if (pf->dead)
 539		mask |= EPOLLHUP;
 540	else if (pf->kind == INTERFACE) {
 541		/* see comment in ppp_read */
 542		struct ppp *ppp = PF_TO_PPP(pf);
 543
 544		ppp_recv_lock(ppp);
 545		if (ppp->n_channels == 0 &&
 546		    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 547			mask |= EPOLLIN | EPOLLRDNORM;
 548		ppp_recv_unlock(ppp);
 549	}
 550
 551	return mask;
 552}
 553
 554#ifdef CONFIG_PPP_FILTER
 555static struct bpf_prog *get_filter(struct sock_fprog *uprog)
 556{
 557	struct sock_fprog_kern fprog;
 558	struct bpf_prog *res = NULL;
 559	int err;
 560
 561	if (!uprog->len)
 562		return NULL;
 563
 564	/* uprog->len is unsigned short, so no overflow here */
 565	fprog.len = uprog->len;
 566	fprog.filter = memdup_user(uprog->filter,
 567				   uprog->len * sizeof(struct sock_filter));
 568	if (IS_ERR(fprog.filter))
 569		return ERR_CAST(fprog.filter);
 570
 571	err = bpf_prog_create(&res, &fprog);
 572	kfree(fprog.filter);
 573
 574	return err ? ERR_PTR(err) : res;
 575}
 576
 577static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p)
 578{
 579	struct sock_fprog uprog;
 580
 581	if (copy_from_user(&uprog, p, sizeof(struct sock_fprog)))
 582		return ERR_PTR(-EFAULT);
 583	return get_filter(&uprog);
 584}
 585
 586#ifdef CONFIG_COMPAT
 587struct sock_fprog32 {
 588	unsigned short len;
 589	compat_caddr_t filter;
 590};
 591
 592#define PPPIOCSPASS32		_IOW('t', 71, struct sock_fprog32)
 593#define PPPIOCSACTIVE32		_IOW('t', 70, struct sock_fprog32)
 594
 595static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
 596{
 597	struct sock_fprog32 uprog32;
 598	struct sock_fprog uprog;
 599
 600	if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32)))
 601		return ERR_PTR(-EFAULT);
 602	uprog.len = uprog32.len;
 603	uprog.filter = compat_ptr(uprog32.filter);
 604	return get_filter(&uprog);
 605}
 606#endif
 607#endif
 608
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 609static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 610{
 611	struct ppp_file *pf;
 612	struct ppp *ppp;
 613	int err = -EFAULT, val, val2, i;
 614	struct ppp_idle32 idle32;
 615	struct ppp_idle64 idle64;
 616	struct npioctl npi;
 617	int unit, cflags;
 618	struct slcompress *vj;
 619	void __user *argp = (void __user *)arg;
 620	int __user *p = argp;
 621
 622	mutex_lock(&ppp_mutex);
 623
 624	pf = file->private_data;
 625	if (!pf) {
 626		err = ppp_unattached_ioctl(current->nsproxy->net_ns,
 627					   pf, file, cmd, arg);
 628		goto out;
 629	}
 630
 631	if (cmd == PPPIOCDETACH) {
 632		/*
 633		 * PPPIOCDETACH is no longer supported as it was heavily broken,
 634		 * and is only known to have been used by pppd older than
 635		 * ppp-2.4.2 (released November 2003).
 636		 */
 637		pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
 638			     current->comm, current->pid);
 639		err = -EINVAL;
 640		goto out;
 641	}
 642
 643	if (pf->kind == CHANNEL) {
 644		struct channel *pch;
 645		struct ppp_channel *chan;
 
 646
 647		pch = PF_TO_CHANNEL(pf);
 648
 649		switch (cmd) {
 650		case PPPIOCCONNECT:
 651			if (get_user(unit, p))
 652				break;
 653			err = ppp_connect_channel(pch, unit);
 654			break;
 655
 656		case PPPIOCDISCONN:
 657			err = ppp_disconnect_channel(pch);
 658			break;
 659
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 660		default:
 661			down_read(&pch->chan_sem);
 662			chan = pch->chan;
 663			err = -ENOTTY;
 664			if (chan && chan->ops->ioctl)
 665				err = chan->ops->ioctl(chan, cmd, arg);
 666			up_read(&pch->chan_sem);
 667		}
 668		goto out;
 669	}
 670
 671	if (pf->kind != INTERFACE) {
 672		/* can't happen */
 673		pr_err("PPP: not interface or channel??\n");
 674		err = -EINVAL;
 675		goto out;
 676	}
 677
 678	ppp = PF_TO_PPP(pf);
 679	switch (cmd) {
 680	case PPPIOCSMRU:
 681		if (get_user(val, p))
 682			break;
 683		ppp->mru = val;
 684		err = 0;
 685		break;
 686
 687	case PPPIOCSFLAGS:
 688		if (get_user(val, p))
 689			break;
 690		ppp_lock(ppp);
 691		cflags = ppp->flags & ~val;
 692#ifdef CONFIG_PPP_MULTILINK
 693		if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
 694			ppp->nextseq = 0;
 695#endif
 696		ppp->flags = val & SC_FLAG_BITS;
 697		ppp_unlock(ppp);
 698		if (cflags & SC_CCP_OPEN)
 699			ppp_ccp_closed(ppp);
 700		err = 0;
 701		break;
 702
 703	case PPPIOCGFLAGS:
 704		val = ppp->flags | ppp->xstate | ppp->rstate;
 705		if (put_user(val, p))
 706			break;
 707		err = 0;
 708		break;
 709
 710	case PPPIOCSCOMPRESS:
 711	{
 712		struct ppp_option_data data;
 713		if (copy_from_user(&data, argp, sizeof(data)))
 714			err = -EFAULT;
 715		else
 716			err = ppp_set_compress(ppp, &data);
 717		break;
 718	}
 719	case PPPIOCGUNIT:
 720		if (put_user(ppp->file.index, p))
 721			break;
 722		err = 0;
 723		break;
 724
 725	case PPPIOCSDEBUG:
 726		if (get_user(val, p))
 727			break;
 728		ppp->debug = val;
 729		err = 0;
 730		break;
 731
 732	case PPPIOCGDEBUG:
 733		if (put_user(ppp->debug, p))
 734			break;
 735		err = 0;
 736		break;
 737
 738	case PPPIOCGIDLE32:
 739                idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
 740                idle32.recv_idle = (jiffies - ppp->last_recv) / HZ;
 741                if (copy_to_user(argp, &idle32, sizeof(idle32)))
 742			break;
 743		err = 0;
 744		break;
 745
 746	case PPPIOCGIDLE64:
 747		idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
 748		idle64.recv_idle = (jiffies - ppp->last_recv) / HZ;
 749		if (copy_to_user(argp, &idle64, sizeof(idle64)))
 750			break;
 751		err = 0;
 752		break;
 753
 754	case PPPIOCSMAXCID:
 755		if (get_user(val, p))
 756			break;
 757		val2 = 15;
 758		if ((val >> 16) != 0) {
 759			val2 = val >> 16;
 760			val &= 0xffff;
 761		}
 762		vj = slhc_init(val2+1, val+1);
 763		if (IS_ERR(vj)) {
 764			err = PTR_ERR(vj);
 765			break;
 766		}
 767		ppp_lock(ppp);
 768		if (ppp->vj)
 769			slhc_free(ppp->vj);
 770		ppp->vj = vj;
 771		ppp_unlock(ppp);
 772		err = 0;
 773		break;
 774
 775	case PPPIOCGNPMODE:
 776	case PPPIOCSNPMODE:
 777		if (copy_from_user(&npi, argp, sizeof(npi)))
 778			break;
 779		err = proto_to_npindex(npi.protocol);
 780		if (err < 0)
 781			break;
 782		i = err;
 783		if (cmd == PPPIOCGNPMODE) {
 784			err = -EFAULT;
 785			npi.mode = ppp->npmode[i];
 786			if (copy_to_user(argp, &npi, sizeof(npi)))
 787				break;
 788		} else {
 789			ppp->npmode[i] = npi.mode;
 790			/* we may be able to transmit more packets now (??) */
 791			netif_wake_queue(ppp->dev);
 792		}
 793		err = 0;
 794		break;
 795
 796#ifdef CONFIG_PPP_FILTER
 797	case PPPIOCSPASS:
 798	case PPPIOCSACTIVE:
 799	{
 800		struct bpf_prog *filter = ppp_get_filter(argp);
 801		struct bpf_prog **which;
 802
 803		if (IS_ERR(filter)) {
 804			err = PTR_ERR(filter);
 805			break;
 806		}
 807		if (cmd == PPPIOCSPASS)
 808			which = &ppp->pass_filter;
 809		else
 810			which = &ppp->active_filter;
 811		ppp_lock(ppp);
 812		if (*which)
 813			bpf_prog_destroy(*which);
 814		*which = filter;
 815		ppp_unlock(ppp);
 816		err = 0;
 817		break;
 818	}
 819#endif /* CONFIG_PPP_FILTER */
 820
 821#ifdef CONFIG_PPP_MULTILINK
 822	case PPPIOCSMRRU:
 823		if (get_user(val, p))
 824			break;
 825		ppp_recv_lock(ppp);
 826		ppp->mrru = val;
 827		ppp_recv_unlock(ppp);
 828		err = 0;
 829		break;
 830#endif /* CONFIG_PPP_MULTILINK */
 831
 832	default:
 833		err = -ENOTTY;
 834	}
 835
 836out:
 837	mutex_unlock(&ppp_mutex);
 838
 839	return err;
 840}
 841
 842#ifdef CONFIG_COMPAT
 843struct ppp_option_data32 {
 844	compat_uptr_t		ptr;
 845	u32			length;
 846	compat_int_t		transmit;
 847};
 848#define PPPIOCSCOMPRESS32	_IOW('t', 77, struct ppp_option_data32)
 849
 850static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 851{
 852	struct ppp_file *pf;
 853	int err = -ENOIOCTLCMD;
 854	void __user *argp = (void __user *)arg;
 855
 856	mutex_lock(&ppp_mutex);
 857
 858	pf = file->private_data;
 859	if (pf && pf->kind == INTERFACE) {
 860		struct ppp *ppp = PF_TO_PPP(pf);
 861		switch (cmd) {
 862#ifdef CONFIG_PPP_FILTER
 863		case PPPIOCSPASS32:
 864		case PPPIOCSACTIVE32:
 865		{
 866			struct bpf_prog *filter = compat_ppp_get_filter(argp);
 867			struct bpf_prog **which;
 868
 869			if (IS_ERR(filter)) {
 870				err = PTR_ERR(filter);
 871				break;
 872			}
 873			if (cmd == PPPIOCSPASS32)
 874				which = &ppp->pass_filter;
 875			else
 876				which = &ppp->active_filter;
 877			ppp_lock(ppp);
 878			if (*which)
 879				bpf_prog_destroy(*which);
 880			*which = filter;
 881			ppp_unlock(ppp);
 882			err = 0;
 883			break;
 884		}
 885#endif /* CONFIG_PPP_FILTER */
 886		case PPPIOCSCOMPRESS32:
 887		{
 888			struct ppp_option_data32 data32;
 889			if (copy_from_user(&data32, argp, sizeof(data32))) {
 890				err = -EFAULT;
 891			} else {
 892				struct ppp_option_data data = {
 893					.ptr = compat_ptr(data32.ptr),
 894					.length = data32.length,
 895					.transmit = data32.transmit
 896				};
 897				err = ppp_set_compress(ppp, &data);
 898			}
 899			break;
 900		}
 901		}
 902	}
 903	mutex_unlock(&ppp_mutex);
 904
 905	/* all other commands have compatible arguments */
 906	if (err == -ENOIOCTLCMD)
 907		err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
 908
 909	return err;
 910}
 911#endif
 912
 913static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 914			struct file *file, unsigned int cmd, unsigned long arg)
 915{
 916	int unit, err = -EFAULT;
 917	struct ppp *ppp;
 918	struct channel *chan;
 919	struct ppp_net *pn;
 920	int __user *p = (int __user *)arg;
 921
 922	switch (cmd) {
 923	case PPPIOCNEWUNIT:
 924		/* Create a new ppp unit */
 925		if (get_user(unit, p))
 926			break;
 927		err = ppp_create_interface(net, file, &unit);
 928		if (err < 0)
 929			break;
 930
 931		err = -EFAULT;
 932		if (put_user(unit, p))
 933			break;
 934		err = 0;
 935		break;
 936
 937	case PPPIOCATTACH:
 938		/* Attach to an existing ppp unit */
 939		if (get_user(unit, p))
 940			break;
 941		err = -ENXIO;
 942		pn = ppp_pernet(net);
 943		mutex_lock(&pn->all_ppp_mutex);
 944		ppp = ppp_find_unit(pn, unit);
 945		if (ppp) {
 946			refcount_inc(&ppp->file.refcnt);
 947			file->private_data = &ppp->file;
 948			err = 0;
 949		}
 950		mutex_unlock(&pn->all_ppp_mutex);
 951		break;
 952
 953	case PPPIOCATTCHAN:
 954		if (get_user(unit, p))
 955			break;
 956		err = -ENXIO;
 957		pn = ppp_pernet(net);
 958		spin_lock_bh(&pn->all_channels_lock);
 959		chan = ppp_find_channel(pn, unit);
 960		if (chan) {
 961			refcount_inc(&chan->file.refcnt);
 962			file->private_data = &chan->file;
 963			err = 0;
 964		}
 965		spin_unlock_bh(&pn->all_channels_lock);
 966		break;
 967
 968	default:
 969		err = -ENOTTY;
 970	}
 971
 972	return err;
 973}
 974
 975static const struct file_operations ppp_device_fops = {
 976	.owner		= THIS_MODULE,
 977	.read		= ppp_read,
 978	.write		= ppp_write,
 979	.poll		= ppp_poll,
 980	.unlocked_ioctl	= ppp_ioctl,
 981#ifdef CONFIG_COMPAT
 982	.compat_ioctl	= ppp_compat_ioctl,
 983#endif
 984	.open		= ppp_open,
 985	.release	= ppp_release,
 986	.llseek		= noop_llseek,
 987};
 988
 989static __net_init int ppp_init_net(struct net *net)
 990{
 991	struct ppp_net *pn = net_generic(net, ppp_net_id);
 992
 993	idr_init(&pn->units_idr);
 994	mutex_init(&pn->all_ppp_mutex);
 995
 996	INIT_LIST_HEAD(&pn->all_channels);
 997	INIT_LIST_HEAD(&pn->new_channels);
 998
 999	spin_lock_init(&pn->all_channels_lock);
1000
1001	return 0;
1002}
1003
1004static __net_exit void ppp_exit_net(struct net *net)
1005{
1006	struct ppp_net *pn = net_generic(net, ppp_net_id);
1007	struct net_device *dev;
1008	struct net_device *aux;
1009	struct ppp *ppp;
1010	LIST_HEAD(list);
1011	int id;
1012
1013	rtnl_lock();
1014	for_each_netdev_safe(net, dev, aux) {
1015		if (dev->netdev_ops == &ppp_netdev_ops)
1016			unregister_netdevice_queue(dev, &list);
1017	}
1018
1019	idr_for_each_entry(&pn->units_idr, ppp, id)
1020		/* Skip devices already unregistered by previous loop */
1021		if (!net_eq(dev_net(ppp->dev), net))
1022			unregister_netdevice_queue(ppp->dev, &list);
1023
1024	unregister_netdevice_many(&list);
1025	rtnl_unlock();
1026
1027	mutex_destroy(&pn->all_ppp_mutex);
1028	idr_destroy(&pn->units_idr);
1029	WARN_ON_ONCE(!list_empty(&pn->all_channels));
1030	WARN_ON_ONCE(!list_empty(&pn->new_channels));
1031}
1032
1033static struct pernet_operations ppp_net_ops = {
1034	.init = ppp_init_net,
1035	.exit = ppp_exit_net,
1036	.id   = &ppp_net_id,
1037	.size = sizeof(struct ppp_net),
1038};
1039
1040static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
1041{
1042	struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1043	int ret;
1044
1045	mutex_lock(&pn->all_ppp_mutex);
1046
1047	if (unit < 0) {
1048		ret = unit_get(&pn->units_idr, ppp);
1049		if (ret < 0)
1050			goto err;
 
 
 
 
 
 
 
 
 
 
 
1051	} else {
1052		/* Caller asked for a specific unit number. Fail with -EEXIST
1053		 * if unavailable. For backward compatibility, return -EEXIST
1054		 * too if idr allocation fails; this makes pppd retry without
1055		 * requesting a specific unit number.
1056		 */
1057		if (unit_find(&pn->units_idr, unit)) {
1058			ret = -EEXIST;
1059			goto err;
1060		}
1061		ret = unit_set(&pn->units_idr, ppp, unit);
1062		if (ret < 0) {
1063			/* Rewrite error for backward compatibility */
1064			ret = -EEXIST;
1065			goto err;
1066		}
1067	}
1068	ppp->file.index = ret;
1069
1070	if (!ifname_is_set)
1071		snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
1072
1073	mutex_unlock(&pn->all_ppp_mutex);
1074
1075	ret = register_netdevice(ppp->dev);
1076	if (ret < 0)
1077		goto err_unit;
1078
1079	atomic_inc(&ppp_unit_count);
1080
1081	return 0;
1082
1083err_unit:
1084	mutex_lock(&pn->all_ppp_mutex);
1085	unit_put(&pn->units_idr, ppp->file.index);
1086err:
1087	mutex_unlock(&pn->all_ppp_mutex);
1088
1089	return ret;
1090}
1091
1092static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1093			     const struct ppp_config *conf)
1094{
1095	struct ppp *ppp = netdev_priv(dev);
1096	int indx;
1097	int err;
1098	int cpu;
1099
1100	ppp->dev = dev;
1101	ppp->ppp_net = src_net;
1102	ppp->mru = PPP_MRU;
1103	ppp->owner = conf->file;
1104
1105	init_ppp_file(&ppp->file, INTERFACE);
1106	ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
1107
1108	for (indx = 0; indx < NUM_NP; ++indx)
1109		ppp->npmode[indx] = NPMODE_PASS;
1110	INIT_LIST_HEAD(&ppp->channels);
1111	spin_lock_init(&ppp->rlock);
1112	spin_lock_init(&ppp->wlock);
1113
1114	ppp->xmit_recursion = alloc_percpu(int);
1115	if (!ppp->xmit_recursion) {
1116		err = -ENOMEM;
1117		goto err1;
1118	}
1119	for_each_possible_cpu(cpu)
1120		(*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
1121
1122#ifdef CONFIG_PPP_MULTILINK
1123	ppp->minseq = -1;
1124	skb_queue_head_init(&ppp->mrq);
1125#endif /* CONFIG_PPP_MULTILINK */
1126#ifdef CONFIG_PPP_FILTER
1127	ppp->pass_filter = NULL;
1128	ppp->active_filter = NULL;
1129#endif /* CONFIG_PPP_FILTER */
1130
1131	err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
1132	if (err < 0)
1133		goto err2;
1134
1135	conf->file->private_data = &ppp->file;
1136
1137	return 0;
1138err2:
1139	free_percpu(ppp->xmit_recursion);
1140err1:
1141	return err;
1142}
1143
1144static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
1145	[IFLA_PPP_DEV_FD]	= { .type = NLA_S32 },
1146};
1147
1148static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
1149			   struct netlink_ext_ack *extack)
1150{
1151	if (!data)
1152		return -EINVAL;
1153
1154	if (!data[IFLA_PPP_DEV_FD])
1155		return -EINVAL;
1156	if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
1157		return -EBADF;
1158
1159	return 0;
1160}
1161
1162static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
1163			  struct nlattr *tb[], struct nlattr *data[],
1164			  struct netlink_ext_ack *extack)
1165{
1166	struct ppp_config conf = {
1167		.unit = -1,
1168		.ifname_is_set = true,
1169	};
1170	struct file *file;
1171	int err;
1172
1173	file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
1174	if (!file)
1175		return -EBADF;
1176
1177	/* rtnl_lock is already held here, but ppp_create_interface() locks
1178	 * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
1179	 * possible deadlock due to lock order inversion, at the cost of
1180	 * pushing the problem back to userspace.
1181	 */
1182	if (!mutex_trylock(&ppp_mutex)) {
1183		err = -EBUSY;
1184		goto out;
1185	}
1186
1187	if (file->f_op != &ppp_device_fops || file->private_data) {
1188		err = -EBADF;
1189		goto out_unlock;
1190	}
1191
1192	conf.file = file;
1193
1194	/* Don't use device name generated by the rtnetlink layer when ifname
1195	 * isn't specified. Let ppp_dev_configure() set the device name using
1196	 * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
1197	 * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
1198	 */
1199	if (!tb[IFLA_IFNAME])
1200		conf.ifname_is_set = false;
1201
1202	err = ppp_dev_configure(src_net, dev, &conf);
1203
1204out_unlock:
1205	mutex_unlock(&ppp_mutex);
1206out:
1207	fput(file);
1208
1209	return err;
1210}
1211
1212static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
1213{
1214	unregister_netdevice_queue(dev, head);
1215}
1216
1217static size_t ppp_nl_get_size(const struct net_device *dev)
1218{
1219	return 0;
1220}
1221
1222static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
1223{
1224	return 0;
1225}
1226
1227static struct net *ppp_nl_get_link_net(const struct net_device *dev)
1228{
1229	struct ppp *ppp = netdev_priv(dev);
1230
1231	return ppp->ppp_net;
1232}
1233
1234static struct rtnl_link_ops ppp_link_ops __read_mostly = {
1235	.kind		= "ppp",
1236	.maxtype	= IFLA_PPP_MAX,
1237	.policy		= ppp_nl_policy,
1238	.priv_size	= sizeof(struct ppp),
1239	.setup		= ppp_setup,
1240	.validate	= ppp_nl_validate,
1241	.newlink	= ppp_nl_newlink,
1242	.dellink	= ppp_nl_dellink,
1243	.get_size	= ppp_nl_get_size,
1244	.fill_info	= ppp_nl_fill_info,
1245	.get_link_net	= ppp_nl_get_link_net,
1246};
1247
1248#define PPP_MAJOR	108
1249
1250/* Called at boot time if ppp is compiled into the kernel,
1251   or at module load time (from init_module) if compiled as a module. */
1252static int __init ppp_init(void)
1253{
1254	int err;
1255
1256	pr_info("PPP generic driver version " PPP_VERSION "\n");
1257
1258	err = register_pernet_device(&ppp_net_ops);
1259	if (err) {
1260		pr_err("failed to register PPP pernet device (%d)\n", err);
1261		goto out;
1262	}
1263
1264	err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
1265	if (err) {
1266		pr_err("failed to register PPP device (%d)\n", err);
1267		goto out_net;
1268	}
1269
1270	ppp_class = class_create(THIS_MODULE, "ppp");
1271	if (IS_ERR(ppp_class)) {
1272		err = PTR_ERR(ppp_class);
1273		goto out_chrdev;
1274	}
1275
1276	err = rtnl_link_register(&ppp_link_ops);
1277	if (err) {
1278		pr_err("failed to register rtnetlink PPP handler\n");
1279		goto out_class;
1280	}
1281
1282	/* not a big deal if we fail here :-) */
1283	device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
1284
1285	return 0;
1286
1287out_class:
1288	class_destroy(ppp_class);
1289out_chrdev:
1290	unregister_chrdev(PPP_MAJOR, "ppp");
1291out_net:
1292	unregister_pernet_device(&ppp_net_ops);
1293out:
1294	return err;
1295}
1296
1297/*
1298 * Network interface unit routines.
1299 */
1300static netdev_tx_t
1301ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1302{
1303	struct ppp *ppp = netdev_priv(dev);
1304	int npi, proto;
1305	unsigned char *pp;
1306
1307	npi = ethertype_to_npindex(ntohs(skb->protocol));
1308	if (npi < 0)
1309		goto outf;
1310
1311	/* Drop, accept or reject the packet */
1312	switch (ppp->npmode[npi]) {
1313	case NPMODE_PASS:
1314		break;
1315	case NPMODE_QUEUE:
1316		/* it would be nice to have a way to tell the network
1317		   system to queue this one up for later. */
1318		goto outf;
1319	case NPMODE_DROP:
1320	case NPMODE_ERROR:
1321		goto outf;
1322	}
1323
1324	/* Put the 2-byte PPP protocol number on the front,
1325	   making sure there is room for the address and control fields. */
1326	if (skb_cow_head(skb, PPP_HDRLEN))
1327		goto outf;
1328
1329	pp = skb_push(skb, 2);
1330	proto = npindex_to_proto[npi];
1331	put_unaligned_be16(proto, pp);
1332
1333	skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
1334	ppp_xmit_process(ppp, skb);
1335
1336	return NETDEV_TX_OK;
1337
1338 outf:
1339	kfree_skb(skb);
1340	++dev->stats.tx_dropped;
1341	return NETDEV_TX_OK;
1342}
1343
1344static int
1345ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
1346{
1347	struct ppp *ppp = netdev_priv(dev);
1348	int err = -EFAULT;
1349	void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
1350	struct ppp_stats stats;
1351	struct ppp_comp_stats cstats;
1352	char *vers;
1353
1354	switch (cmd) {
1355	case SIOCGPPPSTATS:
1356		ppp_get_stats(ppp, &stats);
1357		if (copy_to_user(addr, &stats, sizeof(stats)))
1358			break;
1359		err = 0;
1360		break;
1361
1362	case SIOCGPPPCSTATS:
1363		memset(&cstats, 0, sizeof(cstats));
1364		if (ppp->xc_state)
1365			ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1366		if (ppp->rc_state)
1367			ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1368		if (copy_to_user(addr, &cstats, sizeof(cstats)))
1369			break;
1370		err = 0;
1371		break;
1372
1373	case SIOCGPPPVER:
1374		vers = PPP_VERSION;
1375		if (copy_to_user(addr, vers, strlen(vers) + 1))
1376			break;
1377		err = 0;
1378		break;
1379
1380	default:
1381		err = -EINVAL;
1382	}
1383
1384	return err;
1385}
1386
1387static void
1388ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1389{
1390	struct ppp *ppp = netdev_priv(dev);
1391
1392	ppp_recv_lock(ppp);
1393	stats64->rx_packets = ppp->stats64.rx_packets;
1394	stats64->rx_bytes   = ppp->stats64.rx_bytes;
1395	ppp_recv_unlock(ppp);
1396
1397	ppp_xmit_lock(ppp);
1398	stats64->tx_packets = ppp->stats64.tx_packets;
1399	stats64->tx_bytes   = ppp->stats64.tx_bytes;
1400	ppp_xmit_unlock(ppp);
1401
1402	stats64->rx_errors        = dev->stats.rx_errors;
1403	stats64->tx_errors        = dev->stats.tx_errors;
1404	stats64->rx_dropped       = dev->stats.rx_dropped;
1405	stats64->tx_dropped       = dev->stats.tx_dropped;
1406	stats64->rx_length_errors = dev->stats.rx_length_errors;
1407}
1408
1409static int ppp_dev_init(struct net_device *dev)
1410{
1411	struct ppp *ppp;
1412
1413	netdev_lockdep_set_classes(dev);
1414
1415	ppp = netdev_priv(dev);
1416	/* Let the netdevice take a reference on the ppp file. This ensures
1417	 * that ppp_destroy_interface() won't run before the device gets
1418	 * unregistered.
1419	 */
1420	refcount_inc(&ppp->file.refcnt);
1421
1422	return 0;
1423}
1424
1425static void ppp_dev_uninit(struct net_device *dev)
1426{
1427	struct ppp *ppp = netdev_priv(dev);
1428	struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1429
1430	ppp_lock(ppp);
1431	ppp->closing = 1;
1432	ppp_unlock(ppp);
1433
1434	mutex_lock(&pn->all_ppp_mutex);
1435	unit_put(&pn->units_idr, ppp->file.index);
1436	mutex_unlock(&pn->all_ppp_mutex);
1437
1438	ppp->owner = NULL;
1439
1440	ppp->file.dead = 1;
1441	wake_up_interruptible(&ppp->file.rwait);
1442}
1443
1444static void ppp_dev_priv_destructor(struct net_device *dev)
1445{
1446	struct ppp *ppp;
1447
1448	ppp = netdev_priv(dev);
1449	if (refcount_dec_and_test(&ppp->file.refcnt))
1450		ppp_destroy_interface(ppp);
1451}
1452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1453static const struct net_device_ops ppp_netdev_ops = {
1454	.ndo_init	 = ppp_dev_init,
1455	.ndo_uninit      = ppp_dev_uninit,
1456	.ndo_start_xmit  = ppp_start_xmit,
1457	.ndo_do_ioctl    = ppp_net_ioctl,
1458	.ndo_get_stats64 = ppp_get_stats64,
 
1459};
1460
1461static struct device_type ppp_type = {
1462	.name = "ppp",
1463};
1464
1465static void ppp_setup(struct net_device *dev)
1466{
1467	dev->netdev_ops = &ppp_netdev_ops;
1468	SET_NETDEV_DEVTYPE(dev, &ppp_type);
1469
1470	dev->features |= NETIF_F_LLTX;
1471
1472	dev->hard_header_len = PPP_HDRLEN;
1473	dev->mtu = PPP_MRU;
1474	dev->addr_len = 0;
1475	dev->tx_queue_len = 3;
1476	dev->type = ARPHRD_PPP;
1477	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1478	dev->priv_destructor = ppp_dev_priv_destructor;
1479	netif_keep_dst(dev);
1480}
1481
1482/*
1483 * Transmit-side routines.
1484 */
1485
1486/* Called to do any work queued up on the transmit side that can now be done */
1487static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1488{
1489	ppp_xmit_lock(ppp);
1490	if (!ppp->closing) {
1491		ppp_push(ppp);
1492
1493		if (skb)
1494			skb_queue_tail(&ppp->file.xq, skb);
1495		while (!ppp->xmit_pending &&
1496		       (skb = skb_dequeue(&ppp->file.xq)))
1497			ppp_send_frame(ppp, skb);
1498		/* If there's no work left to do, tell the core net
1499		   code that we can accept some more. */
1500		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1501			netif_wake_queue(ppp->dev);
1502		else
1503			netif_stop_queue(ppp->dev);
1504	} else {
1505		kfree_skb(skb);
1506	}
1507	ppp_xmit_unlock(ppp);
1508}
1509
1510static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1511{
1512	local_bh_disable();
1513
1514	if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
1515		goto err;
1516
1517	(*this_cpu_ptr(ppp->xmit_recursion))++;
1518	__ppp_xmit_process(ppp, skb);
1519	(*this_cpu_ptr(ppp->xmit_recursion))--;
1520
1521	local_bh_enable();
1522
1523	return;
1524
1525err:
1526	local_bh_enable();
1527
1528	kfree_skb(skb);
1529
1530	if (net_ratelimit())
1531		netdev_err(ppp->dev, "recursion detected\n");
1532}
1533
1534static inline struct sk_buff *
1535pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1536{
1537	struct sk_buff *new_skb;
1538	int len;
1539	int new_skb_size = ppp->dev->mtu +
1540		ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1541	int compressor_skb_size = ppp->dev->mtu +
1542		ppp->xcomp->comp_extra + PPP_HDRLEN;
1543	new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1544	if (!new_skb) {
1545		if (net_ratelimit())
1546			netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1547		return NULL;
1548	}
1549	if (ppp->dev->hard_header_len > PPP_HDRLEN)
1550		skb_reserve(new_skb,
1551			    ppp->dev->hard_header_len - PPP_HDRLEN);
1552
1553	/* compressor still expects A/C bytes in hdr */
1554	len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1555				   new_skb->data, skb->len + 2,
1556				   compressor_skb_size);
1557	if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1558		consume_skb(skb);
1559		skb = new_skb;
1560		skb_put(skb, len);
1561		skb_pull(skb, 2);	/* pull off A/C bytes */
1562	} else if (len == 0) {
1563		/* didn't compress, or CCP not up yet */
1564		consume_skb(new_skb);
1565		new_skb = skb;
1566	} else {
1567		/*
1568		 * (len < 0)
1569		 * MPPE requires that we do not send unencrypted
1570		 * frames.  The compressor will return -1 if we
1571		 * should drop the frame.  We cannot simply test
1572		 * the compress_proto because MPPE and MPPC share
1573		 * the same number.
1574		 */
1575		if (net_ratelimit())
1576			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1577		kfree_skb(skb);
1578		consume_skb(new_skb);
1579		new_skb = NULL;
1580	}
1581	return new_skb;
1582}
1583
1584/*
1585 * Compress and send a frame.
1586 * The caller should have locked the xmit path,
1587 * and xmit_pending should be 0.
1588 */
1589static void
1590ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1591{
1592	int proto = PPP_PROTO(skb);
1593	struct sk_buff *new_skb;
1594	int len;
1595	unsigned char *cp;
1596
 
 
1597	if (proto < 0x8000) {
1598#ifdef CONFIG_PPP_FILTER
1599		/* check if we should pass this packet */
1600		/* the filter instructions are constructed assuming
1601		   a four-byte PPP header on each packet */
1602		*(u8 *)skb_push(skb, 2) = 1;
1603		if (ppp->pass_filter &&
1604		    BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
1605			if (ppp->debug & 1)
1606				netdev_printk(KERN_DEBUG, ppp->dev,
1607					      "PPP: outbound frame "
1608					      "not passed\n");
1609			kfree_skb(skb);
1610			return;
1611		}
1612		/* if this packet passes the active filter, record the time */
1613		if (!(ppp->active_filter &&
1614		      BPF_PROG_RUN(ppp->active_filter, skb) == 0))
1615			ppp->last_xmit = jiffies;
1616		skb_pull(skb, 2);
1617#else
1618		/* for data packets, record the time */
1619		ppp->last_xmit = jiffies;
1620#endif /* CONFIG_PPP_FILTER */
1621	}
1622
1623	++ppp->stats64.tx_packets;
1624	ppp->stats64.tx_bytes += skb->len - 2;
1625
1626	switch (proto) {
1627	case PPP_IP:
1628		if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1629			break;
1630		/* try to do VJ TCP header compression */
1631		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1632				    GFP_ATOMIC);
1633		if (!new_skb) {
1634			netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1635			goto drop;
1636		}
1637		skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1638		cp = skb->data + 2;
1639		len = slhc_compress(ppp->vj, cp, skb->len - 2,
1640				    new_skb->data + 2, &cp,
1641				    !(ppp->flags & SC_NO_TCP_CCID));
1642		if (cp == skb->data + 2) {
1643			/* didn't compress */
1644			consume_skb(new_skb);
1645		} else {
1646			if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1647				proto = PPP_VJC_COMP;
1648				cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1649			} else {
1650				proto = PPP_VJC_UNCOMP;
1651				cp[0] = skb->data[2];
1652			}
1653			consume_skb(skb);
1654			skb = new_skb;
1655			cp = skb_put(skb, len + 2);
1656			cp[0] = 0;
1657			cp[1] = proto;
1658		}
1659		break;
1660
1661	case PPP_CCP:
1662		/* peek at outbound CCP frames */
1663		ppp_ccp_peek(ppp, skb, 0);
1664		break;
1665	}
1666
1667	/* try to do packet compression */
1668	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1669	    proto != PPP_LCP && proto != PPP_CCP) {
1670		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1671			if (net_ratelimit())
1672				netdev_err(ppp->dev,
1673					   "ppp: compression required but "
1674					   "down - pkt dropped.\n");
1675			goto drop;
1676		}
1677		skb = pad_compress_skb(ppp, skb);
1678		if (!skb)
1679			goto drop;
1680	}
1681
1682	/*
1683	 * If we are waiting for traffic (demand dialling),
1684	 * queue it up for pppd to receive.
1685	 */
1686	if (ppp->flags & SC_LOOP_TRAFFIC) {
1687		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1688			goto drop;
1689		skb_queue_tail(&ppp->file.rq, skb);
1690		wake_up_interruptible(&ppp->file.rwait);
1691		return;
1692	}
1693
1694	ppp->xmit_pending = skb;
1695	ppp_push(ppp);
1696	return;
1697
1698 drop:
1699	kfree_skb(skb);
1700	++ppp->dev->stats.tx_errors;
1701}
1702
1703/*
1704 * Try to send the frame in xmit_pending.
1705 * The caller should have the xmit path locked.
1706 */
1707static void
1708ppp_push(struct ppp *ppp)
1709{
1710	struct list_head *list;
1711	struct channel *pch;
1712	struct sk_buff *skb = ppp->xmit_pending;
1713
1714	if (!skb)
1715		return;
1716
1717	list = &ppp->channels;
1718	if (list_empty(list)) {
1719		/* nowhere to send the packet, just drop it */
1720		ppp->xmit_pending = NULL;
1721		kfree_skb(skb);
1722		return;
1723	}
1724
1725	if ((ppp->flags & SC_MULTILINK) == 0) {
1726		/* not doing multilink: send it down the first channel */
1727		list = list->next;
1728		pch = list_entry(list, struct channel, clist);
1729
1730		spin_lock(&pch->downl);
1731		if (pch->chan) {
1732			if (pch->chan->ops->start_xmit(pch->chan, skb))
1733				ppp->xmit_pending = NULL;
1734		} else {
1735			/* channel got unregistered */
1736			kfree_skb(skb);
1737			ppp->xmit_pending = NULL;
1738		}
1739		spin_unlock(&pch->downl);
1740		return;
1741	}
1742
1743#ifdef CONFIG_PPP_MULTILINK
1744	/* Multilink: fragment the packet over as many links
1745	   as can take the packet at the moment. */
1746	if (!ppp_mp_explode(ppp, skb))
1747		return;
1748#endif /* CONFIG_PPP_MULTILINK */
1749
1750	ppp->xmit_pending = NULL;
1751	kfree_skb(skb);
1752}
1753
1754#ifdef CONFIG_PPP_MULTILINK
1755static bool mp_protocol_compress __read_mostly = true;
1756module_param(mp_protocol_compress, bool, 0644);
1757MODULE_PARM_DESC(mp_protocol_compress,
1758		 "compress protocol id in multilink fragments");
1759
1760/*
1761 * Divide a packet to be transmitted into fragments and
1762 * send them out the individual links.
1763 */
1764static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1765{
1766	int len, totlen;
1767	int i, bits, hdrlen, mtu;
1768	int flen;
1769	int navail, nfree, nzero;
1770	int nbigger;
1771	int totspeed;
1772	int totfree;
1773	unsigned char *p, *q;
1774	struct list_head *list;
1775	struct channel *pch;
1776	struct sk_buff *frag;
1777	struct ppp_channel *chan;
1778
1779	totspeed = 0; /*total bitrate of the bundle*/
1780	nfree = 0; /* # channels which have no packet already queued */
1781	navail = 0; /* total # of usable channels (not deregistered) */
1782	nzero = 0; /* number of channels with zero speed associated*/
1783	totfree = 0; /*total # of channels available and
1784				  *having no queued packets before
1785				  *starting the fragmentation*/
1786
1787	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1788	i = 0;
1789	list_for_each_entry(pch, &ppp->channels, clist) {
1790		if (pch->chan) {
1791			pch->avail = 1;
1792			navail++;
1793			pch->speed = pch->chan->speed;
1794		} else {
1795			pch->avail = 0;
1796		}
1797		if (pch->avail) {
1798			if (skb_queue_empty(&pch->file.xq) ||
1799				!pch->had_frag) {
1800					if (pch->speed == 0)
1801						nzero++;
1802					else
1803						totspeed += pch->speed;
1804
1805					pch->avail = 2;
1806					++nfree;
1807					++totfree;
1808				}
1809			if (!pch->had_frag && i < ppp->nxchan)
1810				ppp->nxchan = i;
1811		}
1812		++i;
1813	}
1814	/*
1815	 * Don't start sending this packet unless at least half of
1816	 * the channels are free.  This gives much better TCP
1817	 * performance if we have a lot of channels.
1818	 */
1819	if (nfree == 0 || nfree < navail / 2)
1820		return 0; /* can't take now, leave it in xmit_pending */
1821
1822	/* Do protocol field compression */
1823	p = skb->data;
1824	len = skb->len;
1825	if (*p == 0 && mp_protocol_compress) {
1826		++p;
1827		--len;
1828	}
1829
1830	totlen = len;
1831	nbigger = len % nfree;
1832
1833	/* skip to the channel after the one we last used
1834	   and start at that one */
1835	list = &ppp->channels;
1836	for (i = 0; i < ppp->nxchan; ++i) {
1837		list = list->next;
1838		if (list == &ppp->channels) {
1839			i = 0;
1840			break;
1841		}
1842	}
1843
1844	/* create a fragment for each channel */
1845	bits = B;
1846	while (len > 0) {
1847		list = list->next;
1848		if (list == &ppp->channels) {
1849			i = 0;
1850			continue;
1851		}
1852		pch = list_entry(list, struct channel, clist);
1853		++i;
1854		if (!pch->avail)
1855			continue;
1856
1857		/*
1858		 * Skip this channel if it has a fragment pending already and
1859		 * we haven't given a fragment to all of the free channels.
1860		 */
1861		if (pch->avail == 1) {
1862			if (nfree > 0)
1863				continue;
1864		} else {
1865			pch->avail = 1;
1866		}
1867
1868		/* check the channel's mtu and whether it is still attached. */
1869		spin_lock(&pch->downl);
1870		if (pch->chan == NULL) {
1871			/* can't use this channel, it's being deregistered */
1872			if (pch->speed == 0)
1873				nzero--;
1874			else
1875				totspeed -= pch->speed;
1876
1877			spin_unlock(&pch->downl);
1878			pch->avail = 0;
1879			totlen = len;
1880			totfree--;
1881			nfree--;
1882			if (--navail == 0)
1883				break;
1884			continue;
1885		}
1886
1887		/*
1888		*if the channel speed is not set divide
1889		*the packet evenly among the free channels;
1890		*otherwise divide it according to the speed
1891		*of the channel we are going to transmit on
1892		*/
1893		flen = len;
1894		if (nfree > 0) {
1895			if (pch->speed == 0) {
1896				flen = len/nfree;
1897				if (nbigger > 0) {
1898					flen++;
1899					nbigger--;
1900				}
1901			} else {
1902				flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1903					((totspeed*totfree)/pch->speed)) - hdrlen;
1904				if (nbigger > 0) {
1905					flen += ((totfree - nzero)*pch->speed)/totspeed;
1906					nbigger -= ((totfree - nzero)*pch->speed)/
1907							totspeed;
1908				}
1909			}
1910			nfree--;
1911		}
1912
1913		/*
1914		 *check if we are on the last channel or
1915		 *we exceded the length of the data to
1916		 *fragment
1917		 */
1918		if ((nfree <= 0) || (flen > len))
1919			flen = len;
1920		/*
1921		 *it is not worth to tx on slow channels:
1922		 *in that case from the resulting flen according to the
1923		 *above formula will be equal or less than zero.
1924		 *Skip the channel in this case
1925		 */
1926		if (flen <= 0) {
1927			pch->avail = 2;
1928			spin_unlock(&pch->downl);
1929			continue;
1930		}
1931
1932		/*
1933		 * hdrlen includes the 2-byte PPP protocol field, but the
1934		 * MTU counts only the payload excluding the protocol field.
1935		 * (RFC1661 Section 2)
1936		 */
1937		mtu = pch->chan->mtu - (hdrlen - 2);
1938		if (mtu < 4)
1939			mtu = 4;
1940		if (flen > mtu)
1941			flen = mtu;
1942		if (flen == len)
1943			bits |= E;
1944		frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1945		if (!frag)
1946			goto noskb;
1947		q = skb_put(frag, flen + hdrlen);
1948
1949		/* make the MP header */
1950		put_unaligned_be16(PPP_MP, q);
1951		if (ppp->flags & SC_MP_XSHORTSEQ) {
1952			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1953			q[3] = ppp->nxseq;
1954		} else {
1955			q[2] = bits;
1956			q[3] = ppp->nxseq >> 16;
1957			q[4] = ppp->nxseq >> 8;
1958			q[5] = ppp->nxseq;
1959		}
1960
1961		memcpy(q + hdrlen, p, flen);
1962
1963		/* try to send it down the channel */
1964		chan = pch->chan;
1965		if (!skb_queue_empty(&pch->file.xq) ||
1966			!chan->ops->start_xmit(chan, frag))
1967			skb_queue_tail(&pch->file.xq, frag);
1968		pch->had_frag = 1;
1969		p += flen;
1970		len -= flen;
1971		++ppp->nxseq;
1972		bits = 0;
1973		spin_unlock(&pch->downl);
1974	}
1975	ppp->nxchan = i;
1976
1977	return 1;
1978
1979 noskb:
1980	spin_unlock(&pch->downl);
1981	if (ppp->debug & 1)
1982		netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1983	++ppp->dev->stats.tx_errors;
1984	++ppp->nxseq;
1985	return 1;	/* abandon the frame */
1986}
1987#endif /* CONFIG_PPP_MULTILINK */
1988
1989/* Try to send data out on a channel */
1990static void __ppp_channel_push(struct channel *pch)
1991{
1992	struct sk_buff *skb;
1993	struct ppp *ppp;
1994
1995	spin_lock(&pch->downl);
1996	if (pch->chan) {
1997		while (!skb_queue_empty(&pch->file.xq)) {
1998			skb = skb_dequeue(&pch->file.xq);
1999			if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
2000				/* put the packet back and try again later */
2001				skb_queue_head(&pch->file.xq, skb);
2002				break;
2003			}
2004		}
2005	} else {
2006		/* channel got deregistered */
2007		skb_queue_purge(&pch->file.xq);
2008	}
2009	spin_unlock(&pch->downl);
2010	/* see if there is anything from the attached unit to be sent */
2011	if (skb_queue_empty(&pch->file.xq)) {
2012		ppp = pch->ppp;
2013		if (ppp)
2014			__ppp_xmit_process(ppp, NULL);
2015	}
2016}
2017
2018static void ppp_channel_push(struct channel *pch)
2019{
2020	read_lock_bh(&pch->upl);
2021	if (pch->ppp) {
2022		(*this_cpu_ptr(pch->ppp->xmit_recursion))++;
2023		__ppp_channel_push(pch);
2024		(*this_cpu_ptr(pch->ppp->xmit_recursion))--;
2025	} else {
2026		__ppp_channel_push(pch);
2027	}
2028	read_unlock_bh(&pch->upl);
2029}
2030
2031/*
2032 * Receive-side routines.
2033 */
2034
2035struct ppp_mp_skb_parm {
2036	u32		sequence;
2037	u8		BEbits;
2038};
2039#define PPP_MP_CB(skb)	((struct ppp_mp_skb_parm *)((skb)->cb))
2040
2041static inline void
2042ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2043{
2044	ppp_recv_lock(ppp);
2045	if (!ppp->closing)
2046		ppp_receive_frame(ppp, skb, pch);
2047	else
2048		kfree_skb(skb);
2049	ppp_recv_unlock(ppp);
2050}
2051
2052/**
2053 * __ppp_decompress_proto - Decompress protocol field, slim version.
2054 * @skb: Socket buffer where protocol field should be decompressed. It must have
2055 *	 at least 1 byte of head room and 1 byte of linear data. First byte of
2056 *	 data must be a protocol field byte.
2057 *
2058 * Decompress protocol field in PPP header if it's compressed, e.g. when
2059 * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
2060 * length are done in this function.
2061 */
2062static void __ppp_decompress_proto(struct sk_buff *skb)
2063{
2064	if (skb->data[0] & 0x01)
2065		*(u8 *)skb_push(skb, 1) = 0x00;
2066}
2067
2068/**
2069 * ppp_decompress_proto - Check skb data room and decompress protocol field.
2070 * @skb: Socket buffer where protocol field should be decompressed. First byte
2071 *	 of data must be a protocol field byte.
2072 *
2073 * Decompress protocol field in PPP header if it's compressed, e.g. when
2074 * Protocol-Field-Compression (PFC) was negotiated. This function also makes
2075 * sure that skb data room is sufficient for Protocol field, before and after
2076 * decompression.
2077 *
2078 * Return: true - decompressed successfully, false - not enough room in skb.
2079 */
2080static bool ppp_decompress_proto(struct sk_buff *skb)
2081{
2082	/* At least one byte should be present (if protocol is compressed) */
2083	if (!pskb_may_pull(skb, 1))
2084		return false;
2085
2086	__ppp_decompress_proto(skb);
2087
2088	/* Protocol field should occupy 2 bytes when not compressed */
2089	return pskb_may_pull(skb, 2);
2090}
2091
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2092void
2093ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
2094{
2095	struct channel *pch = chan->ppp;
2096	int proto;
2097
2098	if (!pch) {
2099		kfree_skb(skb);
2100		return;
2101	}
2102
 
 
 
 
2103	read_lock_bh(&pch->upl);
2104	if (!ppp_decompress_proto(skb)) {
2105		kfree_skb(skb);
2106		if (pch->ppp) {
2107			++pch->ppp->dev->stats.rx_length_errors;
2108			ppp_receive_error(pch->ppp);
2109		}
2110		goto done;
2111	}
2112
2113	proto = PPP_PROTO(skb);
2114	if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
2115		/* put it on the channel queue */
2116		skb_queue_tail(&pch->file.rq, skb);
2117		/* drop old frames if queue too long */
2118		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
2119		       (skb = skb_dequeue(&pch->file.rq)))
2120			kfree_skb(skb);
2121		wake_up_interruptible(&pch->file.rwait);
2122	} else {
2123		ppp_do_recv(pch->ppp, skb, pch);
2124	}
2125
2126done:
2127	read_unlock_bh(&pch->upl);
2128}
2129
2130/* Put a 0-length skb in the receive queue as an error indication */
2131void
2132ppp_input_error(struct ppp_channel *chan, int code)
2133{
2134	struct channel *pch = chan->ppp;
2135	struct sk_buff *skb;
2136
2137	if (!pch)
2138		return;
2139
2140	read_lock_bh(&pch->upl);
2141	if (pch->ppp) {
2142		skb = alloc_skb(0, GFP_ATOMIC);
2143		if (skb) {
2144			skb->len = 0;		/* probably unnecessary */
2145			skb->cb[0] = code;
2146			ppp_do_recv(pch->ppp, skb, pch);
2147		}
2148	}
2149	read_unlock_bh(&pch->upl);
2150}
2151
2152/*
2153 * We come in here to process a received frame.
2154 * The receive side of the ppp unit is locked.
2155 */
2156static void
2157ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2158{
2159	/* note: a 0-length skb is used as an error indication */
2160	if (skb->len > 0) {
2161		skb_checksum_complete_unset(skb);
2162#ifdef CONFIG_PPP_MULTILINK
2163		/* XXX do channel-level decompression here */
2164		if (PPP_PROTO(skb) == PPP_MP)
2165			ppp_receive_mp_frame(ppp, skb, pch);
2166		else
2167#endif /* CONFIG_PPP_MULTILINK */
2168			ppp_receive_nonmp_frame(ppp, skb);
2169	} else {
2170		kfree_skb(skb);
2171		ppp_receive_error(ppp);
2172	}
2173}
2174
2175static void
2176ppp_receive_error(struct ppp *ppp)
2177{
2178	++ppp->dev->stats.rx_errors;
2179	if (ppp->vj)
2180		slhc_toss(ppp->vj);
2181}
2182
2183static void
2184ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
2185{
2186	struct sk_buff *ns;
2187	int proto, len, npi;
2188
2189	/*
2190	 * Decompress the frame, if compressed.
2191	 * Note that some decompressors need to see uncompressed frames
2192	 * that come in as well as compressed frames.
2193	 */
2194	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
2195	    (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
2196		skb = ppp_decompress_frame(ppp, skb);
2197
2198	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
2199		goto err;
2200
2201	/* At this point the "Protocol" field MUST be decompressed, either in
2202	 * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
2203	 */
2204	proto = PPP_PROTO(skb);
2205	switch (proto) {
2206	case PPP_VJC_COMP:
2207		/* decompress VJ compressed packets */
2208		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2209			goto err;
2210
2211		if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
2212			/* copy to a new sk_buff with more tailroom */
2213			ns = dev_alloc_skb(skb->len + 128);
2214			if (!ns) {
2215				netdev_err(ppp->dev, "PPP: no memory "
2216					   "(VJ decomp)\n");
2217				goto err;
2218			}
2219			skb_reserve(ns, 2);
2220			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
2221			consume_skb(skb);
2222			skb = ns;
2223		}
2224		else
2225			skb->ip_summed = CHECKSUM_NONE;
2226
2227		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
2228		if (len <= 0) {
2229			netdev_printk(KERN_DEBUG, ppp->dev,
2230				      "PPP: VJ decompression error\n");
2231			goto err;
2232		}
2233		len += 2;
2234		if (len > skb->len)
2235			skb_put(skb, len - skb->len);
2236		else if (len < skb->len)
2237			skb_trim(skb, len);
2238		proto = PPP_IP;
2239		break;
2240
2241	case PPP_VJC_UNCOMP:
2242		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
2243			goto err;
2244
2245		/* Until we fix the decompressor need to make sure
2246		 * data portion is linear.
2247		 */
2248		if (!pskb_may_pull(skb, skb->len))
2249			goto err;
2250
2251		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
2252			netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
2253			goto err;
2254		}
2255		proto = PPP_IP;
2256		break;
2257
2258	case PPP_CCP:
2259		ppp_ccp_peek(ppp, skb, 1);
2260		break;
2261	}
2262
2263	++ppp->stats64.rx_packets;
2264	ppp->stats64.rx_bytes += skb->len - 2;
2265
2266	npi = proto_to_npindex(proto);
2267	if (npi < 0) {
2268		/* control or unknown frame - pass it to pppd */
2269		skb_queue_tail(&ppp->file.rq, skb);
2270		/* limit queue length by dropping old frames */
2271		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
2272		       (skb = skb_dequeue(&ppp->file.rq)))
2273			kfree_skb(skb);
2274		/* wake up any process polling or blocking on read */
2275		wake_up_interruptible(&ppp->file.rwait);
2276
2277	} else {
2278		/* network protocol frame - give it to the kernel */
2279
2280#ifdef CONFIG_PPP_FILTER
2281		/* check if the packet passes the pass and active filters */
2282		/* the filter instructions are constructed assuming
2283		   a four-byte PPP header on each packet */
2284		if (ppp->pass_filter || ppp->active_filter) {
2285			if (skb_unclone(skb, GFP_ATOMIC))
2286				goto err;
2287
2288			*(u8 *)skb_push(skb, 2) = 0;
 
 
2289			if (ppp->pass_filter &&
2290			    BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
2291				if (ppp->debug & 1)
2292					netdev_printk(KERN_DEBUG, ppp->dev,
2293						      "PPP: inbound frame "
2294						      "not passed\n");
2295				kfree_skb(skb);
2296				return;
2297			}
2298			if (!(ppp->active_filter &&
2299			      BPF_PROG_RUN(ppp->active_filter, skb) == 0))
2300				ppp->last_recv = jiffies;
2301			__skb_pull(skb, 2);
2302		} else
2303#endif /* CONFIG_PPP_FILTER */
2304			ppp->last_recv = jiffies;
2305
2306		if ((ppp->dev->flags & IFF_UP) == 0 ||
2307		    ppp->npmode[npi] != NPMODE_PASS) {
2308			kfree_skb(skb);
2309		} else {
2310			/* chop off protocol */
2311			skb_pull_rcsum(skb, 2);
2312			skb->dev = ppp->dev;
2313			skb->protocol = htons(npindex_to_ethertype[npi]);
2314			skb_reset_mac_header(skb);
2315			skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
2316						      dev_net(ppp->dev)));
2317			netif_rx(skb);
2318		}
2319	}
2320	return;
2321
2322 err:
2323	kfree_skb(skb);
2324	ppp_receive_error(ppp);
2325}
2326
2327static struct sk_buff *
2328ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
2329{
2330	int proto = PPP_PROTO(skb);
2331	struct sk_buff *ns;
2332	int len;
2333
2334	/* Until we fix all the decompressor's need to make sure
2335	 * data portion is linear.
2336	 */
2337	if (!pskb_may_pull(skb, skb->len))
2338		goto err;
2339
2340	if (proto == PPP_COMP) {
2341		int obuff_size;
2342
2343		switch(ppp->rcomp->compress_proto) {
2344		case CI_MPPE:
2345			obuff_size = ppp->mru + PPP_HDRLEN + 1;
2346			break;
2347		default:
2348			obuff_size = ppp->mru + PPP_HDRLEN;
2349			break;
2350		}
2351
2352		ns = dev_alloc_skb(obuff_size);
2353		if (!ns) {
2354			netdev_err(ppp->dev, "ppp_decompress_frame: "
2355				   "no memory\n");
2356			goto err;
2357		}
2358		/* the decompressor still expects the A/C bytes in the hdr */
2359		len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
2360				skb->len + 2, ns->data, obuff_size);
2361		if (len < 0) {
2362			/* Pass the compressed frame to pppd as an
2363			   error indication. */
2364			if (len == DECOMP_FATALERROR)
2365				ppp->rstate |= SC_DC_FERROR;
2366			kfree_skb(ns);
2367			goto err;
2368		}
2369
2370		consume_skb(skb);
2371		skb = ns;
2372		skb_put(skb, len);
2373		skb_pull(skb, 2);	/* pull off the A/C bytes */
2374
2375		/* Don't call __ppp_decompress_proto() here, but instead rely on
2376		 * corresponding algo (mppe/bsd/deflate) to decompress it.
2377		 */
2378	} else {
2379		/* Uncompressed frame - pass to decompressor so it
2380		   can update its dictionary if necessary. */
2381		if (ppp->rcomp->incomp)
2382			ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
2383					   skb->len + 2);
2384	}
2385
2386	return skb;
2387
2388 err:
2389	ppp->rstate |= SC_DC_ERROR;
2390	ppp_receive_error(ppp);
2391	return skb;
2392}
2393
2394#ifdef CONFIG_PPP_MULTILINK
2395/*
2396 * Receive a multilink frame.
2397 * We put it on the reconstruction queue and then pull off
2398 * as many completed frames as we can.
2399 */
2400static void
2401ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
2402{
2403	u32 mask, seq;
2404	struct channel *ch;
2405	int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
2406
2407	if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
2408		goto err;		/* no good, throw it away */
2409
2410	/* Decode sequence number and begin/end bits */
2411	if (ppp->flags & SC_MP_SHORTSEQ) {
2412		seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
2413		mask = 0xfff;
2414	} else {
2415		seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
2416		mask = 0xffffff;
2417	}
2418	PPP_MP_CB(skb)->BEbits = skb->data[2];
2419	skb_pull(skb, mphdrlen);	/* pull off PPP and MP headers */
2420
2421	/*
2422	 * Do protocol ID decompression on the first fragment of each packet.
2423	 * We have to do that here, because ppp_receive_nonmp_frame() expects
2424	 * decompressed protocol field.
2425	 */
2426	if (PPP_MP_CB(skb)->BEbits & B)
2427		__ppp_decompress_proto(skb);
2428
2429	/*
2430	 * Expand sequence number to 32 bits, making it as close
2431	 * as possible to ppp->minseq.
2432	 */
2433	seq |= ppp->minseq & ~mask;
2434	if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
2435		seq += mask + 1;
2436	else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
2437		seq -= mask + 1;	/* should never happen */
2438	PPP_MP_CB(skb)->sequence = seq;
2439	pch->lastseq = seq;
2440
2441	/*
2442	 * If this packet comes before the next one we were expecting,
2443	 * drop it.
2444	 */
2445	if (seq_before(seq, ppp->nextseq)) {
2446		kfree_skb(skb);
2447		++ppp->dev->stats.rx_dropped;
2448		ppp_receive_error(ppp);
2449		return;
2450	}
2451
2452	/*
2453	 * Reevaluate minseq, the minimum over all channels of the
2454	 * last sequence number received on each channel.  Because of
2455	 * the increasing sequence number rule, we know that any fragment
2456	 * before `minseq' which hasn't arrived is never going to arrive.
2457	 * The list of channels can't change because we have the receive
2458	 * side of the ppp unit locked.
2459	 */
2460	list_for_each_entry(ch, &ppp->channels, clist) {
2461		if (seq_before(ch->lastseq, seq))
2462			seq = ch->lastseq;
2463	}
2464	if (seq_before(ppp->minseq, seq))
2465		ppp->minseq = seq;
2466
2467	/* Put the fragment on the reconstruction queue */
2468	ppp_mp_insert(ppp, skb);
2469
2470	/* If the queue is getting long, don't wait any longer for packets
2471	   before the start of the queue. */
2472	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
2473		struct sk_buff *mskb = skb_peek(&ppp->mrq);
2474		if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
2475			ppp->minseq = PPP_MP_CB(mskb)->sequence;
2476	}
2477
2478	/* Pull completed packets off the queue and receive them. */
2479	while ((skb = ppp_mp_reconstruct(ppp))) {
2480		if (pskb_may_pull(skb, 2))
2481			ppp_receive_nonmp_frame(ppp, skb);
2482		else {
2483			++ppp->dev->stats.rx_length_errors;
2484			kfree_skb(skb);
2485			ppp_receive_error(ppp);
2486		}
2487	}
2488
2489	return;
2490
2491 err:
2492	kfree_skb(skb);
2493	ppp_receive_error(ppp);
2494}
2495
2496/*
2497 * Insert a fragment on the MP reconstruction queue.
2498 * The queue is ordered by increasing sequence number.
2499 */
2500static void
2501ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
2502{
2503	struct sk_buff *p;
2504	struct sk_buff_head *list = &ppp->mrq;
2505	u32 seq = PPP_MP_CB(skb)->sequence;
2506
2507	/* N.B. we don't need to lock the list lock because we have the
2508	   ppp unit receive-side lock. */
2509	skb_queue_walk(list, p) {
2510		if (seq_before(seq, PPP_MP_CB(p)->sequence))
2511			break;
2512	}
2513	__skb_queue_before(list, p, skb);
2514}
2515
2516/*
2517 * Reconstruct a packet from the MP fragment queue.
2518 * We go through increasing sequence numbers until we find a
2519 * complete packet, or we get to the sequence number for a fragment
2520 * which hasn't arrived but might still do so.
2521 */
2522static struct sk_buff *
2523ppp_mp_reconstruct(struct ppp *ppp)
2524{
2525	u32 seq = ppp->nextseq;
2526	u32 minseq = ppp->minseq;
2527	struct sk_buff_head *list = &ppp->mrq;
2528	struct sk_buff *p, *tmp;
2529	struct sk_buff *head, *tail;
2530	struct sk_buff *skb = NULL;
2531	int lost = 0, len = 0;
2532
2533	if (ppp->mrru == 0)	/* do nothing until mrru is set */
2534		return NULL;
2535	head = __skb_peek(list);
2536	tail = NULL;
2537	skb_queue_walk_safe(list, p, tmp) {
2538	again:
2539		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2540			/* this can't happen, anyway ignore the skb */
2541			netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2542				   "seq %u < %u\n",
2543				   PPP_MP_CB(p)->sequence, seq);
2544			__skb_unlink(p, list);
2545			kfree_skb(p);
2546			continue;
2547		}
2548		if (PPP_MP_CB(p)->sequence != seq) {
2549			u32 oldseq;
2550			/* Fragment `seq' is missing.  If it is after
2551			   minseq, it might arrive later, so stop here. */
2552			if (seq_after(seq, minseq))
2553				break;
2554			/* Fragment `seq' is lost, keep going. */
2555			lost = 1;
2556			oldseq = seq;
2557			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2558				minseq + 1: PPP_MP_CB(p)->sequence;
2559
2560			if (ppp->debug & 1)
2561				netdev_printk(KERN_DEBUG, ppp->dev,
2562					      "lost frag %u..%u\n",
2563					      oldseq, seq-1);
2564
2565			goto again;
2566		}
2567
2568		/*
2569		 * At this point we know that all the fragments from
2570		 * ppp->nextseq to seq are either present or lost.
2571		 * Also, there are no complete packets in the queue
2572		 * that have no missing fragments and end before this
2573		 * fragment.
2574		 */
2575
2576		/* B bit set indicates this fragment starts a packet */
2577		if (PPP_MP_CB(p)->BEbits & B) {
2578			head = p;
2579			lost = 0;
2580			len = 0;
2581		}
2582
2583		len += p->len;
2584
2585		/* Got a complete packet yet? */
2586		if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2587		    (PPP_MP_CB(head)->BEbits & B)) {
2588			if (len > ppp->mrru + 2) {
2589				++ppp->dev->stats.rx_length_errors;
2590				netdev_printk(KERN_DEBUG, ppp->dev,
2591					      "PPP: reconstructed packet"
2592					      " is too long (%d)\n", len);
2593			} else {
2594				tail = p;
2595				break;
2596			}
2597			ppp->nextseq = seq + 1;
2598		}
2599
2600		/*
2601		 * If this is the ending fragment of a packet,
2602		 * and we haven't found a complete valid packet yet,
2603		 * we can discard up to and including this fragment.
2604		 */
2605		if (PPP_MP_CB(p)->BEbits & E) {
2606			struct sk_buff *tmp2;
2607
2608			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2609				if (ppp->debug & 1)
2610					netdev_printk(KERN_DEBUG, ppp->dev,
2611						      "discarding frag %u\n",
2612						      PPP_MP_CB(p)->sequence);
2613				__skb_unlink(p, list);
2614				kfree_skb(p);
2615			}
2616			head = skb_peek(list);
2617			if (!head)
2618				break;
2619		}
2620		++seq;
2621	}
2622
2623	/* If we have a complete packet, copy it all into one skb. */
2624	if (tail != NULL) {
2625		/* If we have discarded any fragments,
2626		   signal a receive error. */
2627		if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2628			skb_queue_walk_safe(list, p, tmp) {
2629				if (p == head)
2630					break;
2631				if (ppp->debug & 1)
2632					netdev_printk(KERN_DEBUG, ppp->dev,
2633						      "discarding frag %u\n",
2634						      PPP_MP_CB(p)->sequence);
2635				__skb_unlink(p, list);
2636				kfree_skb(p);
2637			}
2638
2639			if (ppp->debug & 1)
2640				netdev_printk(KERN_DEBUG, ppp->dev,
2641					      "  missed pkts %u..%u\n",
2642					      ppp->nextseq,
2643					      PPP_MP_CB(head)->sequence-1);
2644			++ppp->dev->stats.rx_dropped;
2645			ppp_receive_error(ppp);
2646		}
2647
2648		skb = head;
2649		if (head != tail) {
2650			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2651			p = skb_queue_next(list, head);
2652			__skb_unlink(skb, list);
2653			skb_queue_walk_from_safe(list, p, tmp) {
2654				__skb_unlink(p, list);
2655				*fragpp = p;
2656				p->next = NULL;
2657				fragpp = &p->next;
2658
2659				skb->len += p->len;
2660				skb->data_len += p->len;
2661				skb->truesize += p->truesize;
2662
2663				if (p == tail)
2664					break;
2665			}
2666		} else {
2667			__skb_unlink(skb, list);
2668		}
2669
2670		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2671	}
2672
2673	return skb;
2674}
2675#endif /* CONFIG_PPP_MULTILINK */
2676
2677/*
2678 * Channel interface.
2679 */
2680
2681/* Create a new, unattached ppp channel. */
2682int ppp_register_channel(struct ppp_channel *chan)
2683{
2684	return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2685}
2686
2687/* Create a new, unattached ppp channel for specified net. */
2688int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2689{
2690	struct channel *pch;
2691	struct ppp_net *pn;
2692
2693	pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2694	if (!pch)
2695		return -ENOMEM;
2696
2697	pn = ppp_pernet(net);
2698
2699	pch->ppp = NULL;
2700	pch->chan = chan;
2701	pch->chan_net = get_net(net);
2702	chan->ppp = pch;
2703	init_ppp_file(&pch->file, CHANNEL);
2704	pch->file.hdrlen = chan->hdrlen;
2705#ifdef CONFIG_PPP_MULTILINK
2706	pch->lastseq = -1;
2707#endif /* CONFIG_PPP_MULTILINK */
2708	init_rwsem(&pch->chan_sem);
2709	spin_lock_init(&pch->downl);
2710	rwlock_init(&pch->upl);
2711
2712	spin_lock_bh(&pn->all_channels_lock);
2713	pch->file.index = ++pn->last_channel_index;
2714	list_add(&pch->list, &pn->new_channels);
2715	atomic_inc(&channel_count);
2716	spin_unlock_bh(&pn->all_channels_lock);
2717
2718	return 0;
2719}
2720
2721/*
2722 * Return the index of a channel.
2723 */
2724int ppp_channel_index(struct ppp_channel *chan)
2725{
2726	struct channel *pch = chan->ppp;
2727
2728	if (pch)
2729		return pch->file.index;
2730	return -1;
2731}
2732
2733/*
2734 * Return the PPP unit number to which a channel is connected.
2735 */
2736int ppp_unit_number(struct ppp_channel *chan)
2737{
2738	struct channel *pch = chan->ppp;
2739	int unit = -1;
2740
2741	if (pch) {
2742		read_lock_bh(&pch->upl);
2743		if (pch->ppp)
2744			unit = pch->ppp->file.index;
2745		read_unlock_bh(&pch->upl);
2746	}
2747	return unit;
2748}
2749
2750/*
2751 * Return the PPP device interface name of a channel.
2752 */
2753char *ppp_dev_name(struct ppp_channel *chan)
2754{
2755	struct channel *pch = chan->ppp;
2756	char *name = NULL;
2757
2758	if (pch) {
2759		read_lock_bh(&pch->upl);
2760		if (pch->ppp && pch->ppp->dev)
2761			name = pch->ppp->dev->name;
2762		read_unlock_bh(&pch->upl);
2763	}
2764	return name;
2765}
2766
2767
2768/*
2769 * Disconnect a channel from the generic layer.
2770 * This must be called in process context.
2771 */
2772void
2773ppp_unregister_channel(struct ppp_channel *chan)
2774{
2775	struct channel *pch = chan->ppp;
2776	struct ppp_net *pn;
2777
2778	if (!pch)
2779		return;		/* should never happen */
2780
2781	chan->ppp = NULL;
2782
2783	/*
2784	 * This ensures that we have returned from any calls into the
2785	 * the channel's start_xmit or ioctl routine before we proceed.
2786	 */
2787	down_write(&pch->chan_sem);
2788	spin_lock_bh(&pch->downl);
2789	pch->chan = NULL;
2790	spin_unlock_bh(&pch->downl);
2791	up_write(&pch->chan_sem);
2792	ppp_disconnect_channel(pch);
2793
2794	pn = ppp_pernet(pch->chan_net);
2795	spin_lock_bh(&pn->all_channels_lock);
2796	list_del(&pch->list);
2797	spin_unlock_bh(&pn->all_channels_lock);
2798
 
 
2799	pch->file.dead = 1;
2800	wake_up_interruptible(&pch->file.rwait);
 
2801	if (refcount_dec_and_test(&pch->file.refcnt))
2802		ppp_destroy_channel(pch);
2803}
2804
2805/*
2806 * Callback from a channel when it can accept more to transmit.
2807 * This should be called at BH/softirq level, not interrupt level.
2808 */
2809void
2810ppp_output_wakeup(struct ppp_channel *chan)
2811{
2812	struct channel *pch = chan->ppp;
2813
2814	if (!pch)
2815		return;
2816	ppp_channel_push(pch);
2817}
2818
2819/*
2820 * Compression control.
2821 */
2822
2823/* Process the PPPIOCSCOMPRESS ioctl. */
2824static int
2825ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data)
2826{
2827	int err = -EFAULT;
2828	struct compressor *cp, *ocomp;
2829	void *state, *ostate;
2830	unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
2831
2832	if (data->length > CCP_MAX_OPTION_LENGTH)
2833		goto out;
2834	if (copy_from_user(ccp_option, data->ptr, data->length))
2835		goto out;
2836
2837	err = -EINVAL;
2838	if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length)
2839		goto out;
2840
2841	cp = try_then_request_module(
2842		find_compressor(ccp_option[0]),
2843		"ppp-compress-%d", ccp_option[0]);
2844	if (!cp)
2845		goto out;
2846
2847	err = -ENOBUFS;
2848	if (data->transmit) {
2849		state = cp->comp_alloc(ccp_option, data->length);
2850		if (state) {
2851			ppp_xmit_lock(ppp);
2852			ppp->xstate &= ~SC_COMP_RUN;
2853			ocomp = ppp->xcomp;
2854			ostate = ppp->xc_state;
2855			ppp->xcomp = cp;
2856			ppp->xc_state = state;
2857			ppp_xmit_unlock(ppp);
2858			if (ostate) {
2859				ocomp->comp_free(ostate);
2860				module_put(ocomp->owner);
2861			}
2862			err = 0;
2863		} else
2864			module_put(cp->owner);
2865
2866	} else {
2867		state = cp->decomp_alloc(ccp_option, data->length);
2868		if (state) {
2869			ppp_recv_lock(ppp);
2870			ppp->rstate &= ~SC_DECOMP_RUN;
2871			ocomp = ppp->rcomp;
2872			ostate = ppp->rc_state;
2873			ppp->rcomp = cp;
2874			ppp->rc_state = state;
2875			ppp_recv_unlock(ppp);
2876			if (ostate) {
2877				ocomp->decomp_free(ostate);
2878				module_put(ocomp->owner);
2879			}
2880			err = 0;
2881		} else
2882			module_put(cp->owner);
2883	}
2884
2885 out:
2886	return err;
2887}
2888
2889/*
2890 * Look at a CCP packet and update our state accordingly.
2891 * We assume the caller has the xmit or recv path locked.
2892 */
2893static void
2894ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
2895{
2896	unsigned char *dp;
2897	int len;
2898
2899	if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
2900		return;	/* no header */
2901	dp = skb->data + 2;
2902
2903	switch (CCP_CODE(dp)) {
2904	case CCP_CONFREQ:
2905
2906		/* A ConfReq starts negotiation of compression
2907		 * in one direction of transmission,
2908		 * and hence brings it down...but which way?
2909		 *
2910		 * Remember:
2911		 * A ConfReq indicates what the sender would like to receive
2912		 */
2913		if(inbound)
2914			/* He is proposing what I should send */
2915			ppp->xstate &= ~SC_COMP_RUN;
2916		else
2917			/* I am proposing to what he should send */
2918			ppp->rstate &= ~SC_DECOMP_RUN;
2919
2920		break;
2921
2922	case CCP_TERMREQ:
2923	case CCP_TERMACK:
2924		/*
2925		 * CCP is going down, both directions of transmission
2926		 */
2927		ppp->rstate &= ~SC_DECOMP_RUN;
2928		ppp->xstate &= ~SC_COMP_RUN;
2929		break;
2930
2931	case CCP_CONFACK:
2932		if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
2933			break;
2934		len = CCP_LENGTH(dp);
2935		if (!pskb_may_pull(skb, len + 2))
2936			return;		/* too short */
2937		dp += CCP_HDRLEN;
2938		len -= CCP_HDRLEN;
2939		if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
2940			break;
2941		if (inbound) {
2942			/* we will start receiving compressed packets */
2943			if (!ppp->rc_state)
2944				break;
2945			if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
2946					ppp->file.index, 0, ppp->mru, ppp->debug)) {
2947				ppp->rstate |= SC_DECOMP_RUN;
2948				ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
2949			}
2950		} else {
2951			/* we will soon start sending compressed packets */
2952			if (!ppp->xc_state)
2953				break;
2954			if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
2955					ppp->file.index, 0, ppp->debug))
2956				ppp->xstate |= SC_COMP_RUN;
2957		}
2958		break;
2959
2960	case CCP_RESETACK:
2961		/* reset the [de]compressor */
2962		if ((ppp->flags & SC_CCP_UP) == 0)
2963			break;
2964		if (inbound) {
2965			if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
2966				ppp->rcomp->decomp_reset(ppp->rc_state);
2967				ppp->rstate &= ~SC_DC_ERROR;
2968			}
2969		} else {
2970			if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
2971				ppp->xcomp->comp_reset(ppp->xc_state);
2972		}
2973		break;
2974	}
2975}
2976
2977/* Free up compression resources. */
2978static void
2979ppp_ccp_closed(struct ppp *ppp)
2980{
2981	void *xstate, *rstate;
2982	struct compressor *xcomp, *rcomp;
2983
2984	ppp_lock(ppp);
2985	ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
2986	ppp->xstate = 0;
2987	xcomp = ppp->xcomp;
2988	xstate = ppp->xc_state;
2989	ppp->xc_state = NULL;
2990	ppp->rstate = 0;
2991	rcomp = ppp->rcomp;
2992	rstate = ppp->rc_state;
2993	ppp->rc_state = NULL;
2994	ppp_unlock(ppp);
2995
2996	if (xstate) {
2997		xcomp->comp_free(xstate);
2998		module_put(xcomp->owner);
2999	}
3000	if (rstate) {
3001		rcomp->decomp_free(rstate);
3002		module_put(rcomp->owner);
3003	}
3004}
3005
3006/* List of compressors. */
3007static LIST_HEAD(compressor_list);
3008static DEFINE_SPINLOCK(compressor_list_lock);
3009
3010struct compressor_entry {
3011	struct list_head list;
3012	struct compressor *comp;
3013};
3014
3015static struct compressor_entry *
3016find_comp_entry(int proto)
3017{
3018	struct compressor_entry *ce;
3019
3020	list_for_each_entry(ce, &compressor_list, list) {
3021		if (ce->comp->compress_proto == proto)
3022			return ce;
3023	}
3024	return NULL;
3025}
3026
3027/* Register a compressor */
3028int
3029ppp_register_compressor(struct compressor *cp)
3030{
3031	struct compressor_entry *ce;
3032	int ret;
3033	spin_lock(&compressor_list_lock);
3034	ret = -EEXIST;
3035	if (find_comp_entry(cp->compress_proto))
3036		goto out;
3037	ret = -ENOMEM;
3038	ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
3039	if (!ce)
3040		goto out;
3041	ret = 0;
3042	ce->comp = cp;
3043	list_add(&ce->list, &compressor_list);
3044 out:
3045	spin_unlock(&compressor_list_lock);
3046	return ret;
3047}
3048
3049/* Unregister a compressor */
3050void
3051ppp_unregister_compressor(struct compressor *cp)
3052{
3053	struct compressor_entry *ce;
3054
3055	spin_lock(&compressor_list_lock);
3056	ce = find_comp_entry(cp->compress_proto);
3057	if (ce && ce->comp == cp) {
3058		list_del(&ce->list);
3059		kfree(ce);
3060	}
3061	spin_unlock(&compressor_list_lock);
3062}
3063
3064/* Find a compressor. */
3065static struct compressor *
3066find_compressor(int type)
3067{
3068	struct compressor_entry *ce;
3069	struct compressor *cp = NULL;
3070
3071	spin_lock(&compressor_list_lock);
3072	ce = find_comp_entry(type);
3073	if (ce) {
3074		cp = ce->comp;
3075		if (!try_module_get(cp->owner))
3076			cp = NULL;
3077	}
3078	spin_unlock(&compressor_list_lock);
3079	return cp;
3080}
3081
3082/*
3083 * Miscelleneous stuff.
3084 */
3085
3086static void
3087ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
3088{
3089	struct slcompress *vj = ppp->vj;
3090
3091	memset(st, 0, sizeof(*st));
3092	st->p.ppp_ipackets = ppp->stats64.rx_packets;
3093	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
3094	st->p.ppp_ibytes = ppp->stats64.rx_bytes;
3095	st->p.ppp_opackets = ppp->stats64.tx_packets;
3096	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
3097	st->p.ppp_obytes = ppp->stats64.tx_bytes;
3098	if (!vj)
3099		return;
3100	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
3101	st->vj.vjs_compressed = vj->sls_o_compressed;
3102	st->vj.vjs_searches = vj->sls_o_searches;
3103	st->vj.vjs_misses = vj->sls_o_misses;
3104	st->vj.vjs_errorin = vj->sls_i_error;
3105	st->vj.vjs_tossed = vj->sls_i_tossed;
3106	st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
3107	st->vj.vjs_compressedin = vj->sls_i_compressed;
3108}
3109
3110/*
3111 * Stuff for handling the lists of ppp units and channels
3112 * and for initialization.
3113 */
3114
3115/*
3116 * Create a new ppp interface unit.  Fails if it can't allocate memory
3117 * or if there is already a unit with the requested number.
3118 * unit == -1 means allocate a new number.
3119 */
3120static int ppp_create_interface(struct net *net, struct file *file, int *unit)
3121{
3122	struct ppp_config conf = {
3123		.file = file,
3124		.unit = *unit,
3125		.ifname_is_set = false,
3126	};
3127	struct net_device *dev;
3128	struct ppp *ppp;
3129	int err;
3130
3131	dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
3132	if (!dev) {
3133		err = -ENOMEM;
3134		goto err;
3135	}
3136	dev_net_set(dev, net);
3137	dev->rtnl_link_ops = &ppp_link_ops;
3138
3139	rtnl_lock();
3140
3141	err = ppp_dev_configure(net, dev, &conf);
3142	if (err < 0)
3143		goto err_dev;
3144	ppp = netdev_priv(dev);
3145	*unit = ppp->file.index;
3146
3147	rtnl_unlock();
3148
3149	return 0;
3150
3151err_dev:
3152	rtnl_unlock();
3153	free_netdev(dev);
3154err:
3155	return err;
3156}
3157
3158/*
3159 * Initialize a ppp_file structure.
3160 */
3161static void
3162init_ppp_file(struct ppp_file *pf, int kind)
3163{
3164	pf->kind = kind;
3165	skb_queue_head_init(&pf->xq);
3166	skb_queue_head_init(&pf->rq);
3167	refcount_set(&pf->refcnt, 1);
3168	init_waitqueue_head(&pf->rwait);
3169}
3170
3171/*
3172 * Free the memory used by a ppp unit.  This is only called once
3173 * there are no channels connected to the unit and no file structs
3174 * that reference the unit.
3175 */
3176static void ppp_destroy_interface(struct ppp *ppp)
3177{
3178	atomic_dec(&ppp_unit_count);
3179
3180	if (!ppp->file.dead || ppp->n_channels) {
3181		/* "can't happen" */
3182		netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
3183			   "but dead=%d n_channels=%d !\n",
3184			   ppp, ppp->file.dead, ppp->n_channels);
3185		return;
3186	}
3187
3188	ppp_ccp_closed(ppp);
3189	if (ppp->vj) {
3190		slhc_free(ppp->vj);
3191		ppp->vj = NULL;
3192	}
3193	skb_queue_purge(&ppp->file.xq);
3194	skb_queue_purge(&ppp->file.rq);
3195#ifdef CONFIG_PPP_MULTILINK
3196	skb_queue_purge(&ppp->mrq);
3197#endif /* CONFIG_PPP_MULTILINK */
3198#ifdef CONFIG_PPP_FILTER
3199	if (ppp->pass_filter) {
3200		bpf_prog_destroy(ppp->pass_filter);
3201		ppp->pass_filter = NULL;
3202	}
3203
3204	if (ppp->active_filter) {
3205		bpf_prog_destroy(ppp->active_filter);
3206		ppp->active_filter = NULL;
3207	}
3208#endif /* CONFIG_PPP_FILTER */
3209
3210	kfree_skb(ppp->xmit_pending);
3211	free_percpu(ppp->xmit_recursion);
3212
3213	free_netdev(ppp->dev);
3214}
3215
3216/*
3217 * Locate an existing ppp unit.
3218 * The caller should have locked the all_ppp_mutex.
3219 */
3220static struct ppp *
3221ppp_find_unit(struct ppp_net *pn, int unit)
3222{
3223	return unit_find(&pn->units_idr, unit);
3224}
3225
3226/*
3227 * Locate an existing ppp channel.
3228 * The caller should have locked the all_channels_lock.
3229 * First we look in the new_channels list, then in the
3230 * all_channels list.  If found in the new_channels list,
3231 * we move it to the all_channels list.  This is for speed
3232 * when we have a lot of channels in use.
3233 */
3234static struct channel *
3235ppp_find_channel(struct ppp_net *pn, int unit)
3236{
3237	struct channel *pch;
3238
3239	list_for_each_entry(pch, &pn->new_channels, list) {
3240		if (pch->file.index == unit) {
3241			list_move(&pch->list, &pn->all_channels);
3242			return pch;
3243		}
3244	}
3245
3246	list_for_each_entry(pch, &pn->all_channels, list) {
3247		if (pch->file.index == unit)
3248			return pch;
3249	}
3250
3251	return NULL;
3252}
3253
3254/*
3255 * Connect a PPP channel to a PPP interface unit.
3256 */
3257static int
3258ppp_connect_channel(struct channel *pch, int unit)
3259{
3260	struct ppp *ppp;
3261	struct ppp_net *pn;
3262	int ret = -ENXIO;
3263	int hdrlen;
3264
3265	pn = ppp_pernet(pch->chan_net);
3266
3267	mutex_lock(&pn->all_ppp_mutex);
3268	ppp = ppp_find_unit(pn, unit);
3269	if (!ppp)
3270		goto out;
3271	write_lock_bh(&pch->upl);
3272	ret = -EINVAL;
3273	if (pch->ppp)
 
3274		goto outl;
3275
3276	ppp_lock(ppp);
3277	spin_lock_bh(&pch->downl);
3278	if (!pch->chan) {
3279		/* Don't connect unregistered channels */
3280		spin_unlock_bh(&pch->downl);
3281		ppp_unlock(ppp);
3282		ret = -ENOTCONN;
3283		goto outl;
3284	}
3285	spin_unlock_bh(&pch->downl);
3286	if (pch->file.hdrlen > ppp->file.hdrlen)
3287		ppp->file.hdrlen = pch->file.hdrlen;
3288	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
3289	if (hdrlen > ppp->dev->hard_header_len)
3290		ppp->dev->hard_header_len = hdrlen;
3291	list_add_tail(&pch->clist, &ppp->channels);
3292	++ppp->n_channels;
3293	pch->ppp = ppp;
3294	refcount_inc(&ppp->file.refcnt);
3295	ppp_unlock(ppp);
3296	ret = 0;
3297
3298 outl:
3299	write_unlock_bh(&pch->upl);
3300 out:
3301	mutex_unlock(&pn->all_ppp_mutex);
3302	return ret;
3303}
3304
3305/*
3306 * Disconnect a channel from its ppp unit.
3307 */
3308static int
3309ppp_disconnect_channel(struct channel *pch)
3310{
3311	struct ppp *ppp;
3312	int err = -EINVAL;
3313
3314	write_lock_bh(&pch->upl);
3315	ppp = pch->ppp;
3316	pch->ppp = NULL;
3317	write_unlock_bh(&pch->upl);
3318	if (ppp) {
3319		/* remove it from the ppp unit's list */
3320		ppp_lock(ppp);
3321		list_del(&pch->clist);
3322		if (--ppp->n_channels == 0)
3323			wake_up_interruptible(&ppp->file.rwait);
3324		ppp_unlock(ppp);
3325		if (refcount_dec_and_test(&ppp->file.refcnt))
3326			ppp_destroy_interface(ppp);
3327		err = 0;
3328	}
3329	return err;
3330}
3331
3332/*
3333 * Free up the resources used by a ppp channel.
3334 */
3335static void ppp_destroy_channel(struct channel *pch)
3336{
3337	put_net(pch->chan_net);
3338	pch->chan_net = NULL;
3339
3340	atomic_dec(&channel_count);
3341
3342	if (!pch->file.dead) {
3343		/* "can't happen" */
3344		pr_err("ppp: destroying undead channel %p !\n", pch);
3345		return;
3346	}
3347	skb_queue_purge(&pch->file.xq);
3348	skb_queue_purge(&pch->file.rq);
3349	kfree(pch);
3350}
3351
3352static void __exit ppp_cleanup(void)
3353{
3354	/* should never happen */
3355	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
3356		pr_err("PPP: removing module but units remain!\n");
3357	rtnl_link_unregister(&ppp_link_ops);
3358	unregister_chrdev(PPP_MAJOR, "ppp");
3359	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
3360	class_destroy(ppp_class);
3361	unregister_pernet_device(&ppp_net_ops);
3362}
3363
3364/*
3365 * Units handling. Caller must protect concurrent access
3366 * by holding all_ppp_mutex
3367 */
3368
3369/* associate pointer with specified number */
3370static int unit_set(struct idr *p, void *ptr, int n)
3371{
3372	int unit;
3373
3374	unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
3375	if (unit == -ENOSPC)
3376		unit = -EINVAL;
3377	return unit;
3378}
3379
3380/* get new free unit number and associate pointer with it */
3381static int unit_get(struct idr *p, void *ptr)
3382{
3383	return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
3384}
3385
3386/* put unit number back to a pool */
3387static void unit_put(struct idr *p, int n)
3388{
3389	idr_remove(p, n);
3390}
3391
3392/* get pointer associated with the number */
3393static void *unit_find(struct idr *p, int n)
3394{
3395	return idr_find(p, n);
3396}
3397
3398/* Module/initialization stuff */
3399
3400module_init(ppp_init);
3401module_exit(ppp_cleanup);
3402
3403EXPORT_SYMBOL(ppp_register_net_channel);
3404EXPORT_SYMBOL(ppp_register_channel);
3405EXPORT_SYMBOL(ppp_unregister_channel);
3406EXPORT_SYMBOL(ppp_channel_index);
3407EXPORT_SYMBOL(ppp_unit_number);
3408EXPORT_SYMBOL(ppp_dev_name);
3409EXPORT_SYMBOL(ppp_input);
3410EXPORT_SYMBOL(ppp_input_error);
3411EXPORT_SYMBOL(ppp_output_wakeup);
3412EXPORT_SYMBOL(ppp_register_compressor);
3413EXPORT_SYMBOL(ppp_unregister_compressor);
 
3414MODULE_LICENSE("GPL");
3415MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
3416MODULE_ALIAS_RTNL_LINK("ppp");
3417MODULE_ALIAS("devname:ppp");