Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Generic PPP layer for Linux.
   3 *
   4 * Copyright 1999-2002 Paul Mackerras.
   5 *
   6 *  This program is free software; you can redistribute it and/or
   7 *  modify it under the terms of the GNU General Public License
   8 *  as published by the Free Software Foundation; either version
   9 *  2 of the License, or (at your option) any later version.
  10 *
  11 * The generic PPP layer handles the PPP network interfaces, the
  12 * /dev/ppp device, packet and VJ compression, and multilink.
  13 * It talks to PPP `channels' via the interface defined in
  14 * include/linux/ppp_channel.h.  Channels provide the basic means for
  15 * sending and receiving PPP frames on some kind of communications
  16 * channel.
  17 *
  18 * Part of the code in this driver was inspired by the old async-only
  19 * PPP driver, written by Michael Callahan and Al Longyear, and
  20 * subsequently hacked by Paul Mackerras.
  21 *
  22 * ==FILEVERSION 20041108==
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/kernel.h>
  27#include <linux/kmod.h>
  28#include <linux/init.h>
  29#include <linux/list.h>
  30#include <linux/idr.h>
  31#include <linux/netdevice.h>
  32#include <linux/poll.h>
  33#include <linux/ppp_defs.h>
  34#include <linux/filter.h>
  35#include <linux/ppp-ioctl.h>
  36#include <linux/ppp_channel.h>
  37#include <linux/ppp-comp.h>
  38#include <linux/skbuff.h>
  39#include <linux/rtnetlink.h>
  40#include <linux/if_arp.h>
  41#include <linux/ip.h>
  42#include <linux/tcp.h>
  43#include <linux/spinlock.h>
  44#include <linux/rwsem.h>
  45#include <linux/stddef.h>
  46#include <linux/device.h>
  47#include <linux/mutex.h>
  48#include <linux/slab.h>
  49#include <asm/unaligned.h>
  50#include <net/slhc_vj.h>
  51#include <linux/atomic.h>
  52
  53#include <linux/nsproxy.h>
  54#include <net/net_namespace.h>
  55#include <net/netns/generic.h>
  56
  57#define PPP_VERSION	"2.4.2"
  58
  59/*
  60 * Network protocols we support.
  61 */
  62#define NP_IP	0		/* Internet Protocol V4 */
  63#define NP_IPV6	1		/* Internet Protocol V6 */
  64#define NP_IPX	2		/* IPX protocol */
  65#define NP_AT	3		/* Appletalk protocol */
  66#define NP_MPLS_UC 4		/* MPLS unicast */
  67#define NP_MPLS_MC 5		/* MPLS multicast */
  68#define NUM_NP	6		/* Number of NPs. */
  69
  70#define MPHDRLEN	6	/* multilink protocol header length */
  71#define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
  72
  73/*
  74 * An instance of /dev/ppp can be associated with either a ppp
  75 * interface unit or a ppp channel.  In both cases, file->private_data
  76 * points to one of these.
  77 */
  78struct ppp_file {
  79	enum {
  80		INTERFACE=1, CHANNEL
  81	}		kind;
  82	struct sk_buff_head xq;		/* pppd transmit queue */
  83	struct sk_buff_head rq;		/* receive queue for pppd */
  84	wait_queue_head_t rwait;	/* for poll on reading /dev/ppp */
  85	atomic_t	refcnt;		/* # refs (incl /dev/ppp attached) */
  86	int		hdrlen;		/* space to leave for headers */
  87	int		index;		/* interface unit / channel number */
  88	int		dead;		/* unit/channel has been shut down */
  89};
  90
  91#define PF_TO_X(pf, X)		container_of(pf, X, file)
  92
  93#define PF_TO_PPP(pf)		PF_TO_X(pf, struct ppp)
  94#define PF_TO_CHANNEL(pf)	PF_TO_X(pf, struct channel)
  95
  96/*
  97 * Data structure to hold primary network stats for which
  98 * we want to use 64 bit storage.  Other network stats
  99 * are stored in dev->stats of the ppp strucute.
 100 */
 101struct ppp_link_stats {
 102	u64 rx_packets;
 103	u64 tx_packets;
 104	u64 rx_bytes;
 105	u64 tx_bytes;
 106};
 107
 108/*
 109 * Data structure describing one ppp unit.
 110 * A ppp unit corresponds to a ppp network interface device
 111 * and represents a multilink bundle.
 112 * It can have 0 or more ppp channels connected to it.
 113 */
 114struct ppp {
 115	struct ppp_file	file;		/* stuff for read/write/poll 0 */
 116	struct file	*owner;		/* file that owns this unit 48 */
 117	struct list_head channels;	/* list of attached channels 4c */
 118	int		n_channels;	/* how many channels are attached 54 */
 119	spinlock_t	rlock;		/* lock for receive side 58 */
 120	spinlock_t	wlock;		/* lock for transmit side 5c */
 121	int		mru;		/* max receive unit 60 */
 122	unsigned int	flags;		/* control bits 64 */
 123	unsigned int	xstate;		/* transmit state bits 68 */
 124	unsigned int	rstate;		/* receive state bits 6c */
 125	int		debug;		/* debug flags 70 */
 126	struct slcompress *vj;		/* state for VJ header compression */
 127	enum NPmode	npmode[NUM_NP];	/* what to do with each net proto 78 */
 128	struct sk_buff	*xmit_pending;	/* a packet ready to go out 88 */
 129	struct compressor *xcomp;	/* transmit packet compressor 8c */
 130	void		*xc_state;	/* its internal state 90 */
 131	struct compressor *rcomp;	/* receive decompressor 94 */
 132	void		*rc_state;	/* its internal state 98 */
 133	unsigned long	last_xmit;	/* jiffies when last pkt sent 9c */
 134	unsigned long	last_recv;	/* jiffies when last pkt rcvd a0 */
 135	struct net_device *dev;		/* network interface device a4 */
 136	int		closing;	/* is device closing down? a8 */
 137#ifdef CONFIG_PPP_MULTILINK
 138	int		nxchan;		/* next channel to send something on */
 139	u32		nxseq;		/* next sequence number to send */
 140	int		mrru;		/* MP: max reconst. receive unit */
 141	u32		nextseq;	/* MP: seq no of next packet */
 142	u32		minseq;		/* MP: min of most recent seqnos */
 143	struct sk_buff_head mrq;	/* MP: receive reconstruction queue */
 144#endif /* CONFIG_PPP_MULTILINK */
 145#ifdef CONFIG_PPP_FILTER
 146	struct sk_filter *pass_filter;	/* filter for packets to pass */
 147	struct sk_filter *active_filter;/* filter for pkts to reset idle */
 
 148#endif /* CONFIG_PPP_FILTER */
 149	struct net	*ppp_net;	/* the net we belong to */
 150	struct ppp_link_stats stats64;	/* 64 bit network stats */
 151};
 152
 153/*
 154 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
 155 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
 156 * SC_MUST_COMP
 157 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
 158 * Bits in xstate: SC_COMP_RUN
 159 */
 160#define SC_FLAG_BITS	(SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
 161			 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
 162			 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
 163
 164/*
 165 * Private data structure for each channel.
 166 * This includes the data structure used for multilink.
 167 */
 168struct channel {
 169	struct ppp_file	file;		/* stuff for read/write/poll */
 170	struct list_head list;		/* link in all/new_channels list */
 171	struct ppp_channel *chan;	/* public channel data structure */
 172	struct rw_semaphore chan_sem;	/* protects `chan' during chan ioctl */
 173	spinlock_t	downl;		/* protects `chan', file.xq dequeue */
 174	struct ppp	*ppp;		/* ppp unit we're connected to */
 175	struct net	*chan_net;	/* the net channel belongs to */
 176	struct list_head clist;		/* link in list of channels per unit */
 177	rwlock_t	upl;		/* protects `ppp' */
 178#ifdef CONFIG_PPP_MULTILINK
 179	u8		avail;		/* flag used in multilink stuff */
 180	u8		had_frag;	/* >= 1 fragments have been sent */
 181	u32		lastseq;	/* MP: last sequence # received */
 182	int		speed;		/* speed of the corresponding ppp channel*/
 183#endif /* CONFIG_PPP_MULTILINK */
 184};
 185
 186/*
 187 * SMP locking issues:
 188 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
 189 * list and the ppp.n_channels field, you need to take both locks
 190 * before you modify them.
 191 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
 192 * channel.downl.
 193 */
 194
 195static DEFINE_MUTEX(ppp_mutex);
 196static atomic_t ppp_unit_count = ATOMIC_INIT(0);
 197static atomic_t channel_count = ATOMIC_INIT(0);
 198
 199/* per-net private data for this module */
 200static int ppp_net_id __read_mostly;
 201struct ppp_net {
 202	/* units to ppp mapping */
 203	struct idr units_idr;
 204
 205	/*
 206	 * all_ppp_mutex protects the units_idr mapping.
 207	 * It also ensures that finding a ppp unit in the units_idr
 208	 * map and updating its file.refcnt field is atomic.
 209	 */
 210	struct mutex all_ppp_mutex;
 211
 212	/* channels */
 213	struct list_head all_channels;
 214	struct list_head new_channels;
 215	int last_channel_index;
 216
 217	/*
 218	 * all_channels_lock protects all_channels and
 219	 * last_channel_index, and the atomicity of find
 220	 * a channel and updating its file.refcnt field.
 221	 */
 222	spinlock_t all_channels_lock;
 223};
 224
 225/* Get the PPP protocol number from a skb */
 226#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
 227
 228/* We limit the length of ppp->file.rq to this (arbitrary) value */
 229#define PPP_MAX_RQLEN	32
 230
 231/*
 232 * Maximum number of multilink fragments queued up.
 233 * This has to be large enough to cope with the maximum latency of
 234 * the slowest channel relative to the others.  Strictly it should
 235 * depend on the number of channels and their characteristics.
 236 */
 237#define PPP_MP_MAX_QLEN	128
 238
 239/* Multilink header bits. */
 240#define B	0x80		/* this fragment begins a packet */
 241#define E	0x40		/* this fragment ends a packet */
 242
 243/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
 244#define seq_before(a, b)	((s32)((a) - (b)) < 0)
 245#define seq_after(a, b)		((s32)((a) - (b)) > 0)
 246
 247/* Prototypes. */
 248static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 249			struct file *file, unsigned int cmd, unsigned long arg);
 250static void ppp_xmit_process(struct ppp *ppp);
 251static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
 252static void ppp_push(struct ppp *ppp);
 253static void ppp_channel_push(struct channel *pch);
 254static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
 255			      struct channel *pch);
 256static void ppp_receive_error(struct ppp *ppp);
 257static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
 258static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
 259					    struct sk_buff *skb);
 260#ifdef CONFIG_PPP_MULTILINK
 261static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
 262				struct channel *pch);
 263static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
 264static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
 265static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
 266#endif /* CONFIG_PPP_MULTILINK */
 267static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
 268static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 269static void ppp_ccp_closed(struct ppp *ppp);
 270static struct compressor *find_compressor(int type);
 271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
 272static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
 273static void init_ppp_file(struct ppp_file *pf, int kind);
 274static void ppp_shutdown_interface(struct ppp *ppp);
 275static void ppp_destroy_interface(struct ppp *ppp);
 276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
 278static int ppp_connect_channel(struct channel *pch, int unit);
 279static int ppp_disconnect_channel(struct channel *pch);
 280static void ppp_destroy_channel(struct channel *pch);
 281static int unit_get(struct idr *p, void *ptr);
 282static int unit_set(struct idr *p, void *ptr, int n);
 283static void unit_put(struct idr *p, int n);
 284static void *unit_find(struct idr *p, int n);
 285
 286static struct class *ppp_class;
 287
 288/* per net-namespace data */
 289static inline struct ppp_net *ppp_pernet(struct net *net)
 290{
 291	BUG_ON(!net);
 292
 293	return net_generic(net, ppp_net_id);
 294}
 295
 296/* Translates a PPP protocol number to a NP index (NP == network protocol) */
 297static inline int proto_to_npindex(int proto)
 298{
 299	switch (proto) {
 300	case PPP_IP:
 301		return NP_IP;
 302	case PPP_IPV6:
 303		return NP_IPV6;
 304	case PPP_IPX:
 305		return NP_IPX;
 306	case PPP_AT:
 307		return NP_AT;
 308	case PPP_MPLS_UC:
 309		return NP_MPLS_UC;
 310	case PPP_MPLS_MC:
 311		return NP_MPLS_MC;
 312	}
 313	return -EINVAL;
 314}
 315
 316/* Translates an NP index into a PPP protocol number */
 317static const int npindex_to_proto[NUM_NP] = {
 318	PPP_IP,
 319	PPP_IPV6,
 320	PPP_IPX,
 321	PPP_AT,
 322	PPP_MPLS_UC,
 323	PPP_MPLS_MC,
 324};
 325
 326/* Translates an ethertype into an NP index */
 327static inline int ethertype_to_npindex(int ethertype)
 328{
 329	switch (ethertype) {
 330	case ETH_P_IP:
 331		return NP_IP;
 332	case ETH_P_IPV6:
 333		return NP_IPV6;
 334	case ETH_P_IPX:
 335		return NP_IPX;
 336	case ETH_P_PPPTALK:
 337	case ETH_P_ATALK:
 338		return NP_AT;
 339	case ETH_P_MPLS_UC:
 340		return NP_MPLS_UC;
 341	case ETH_P_MPLS_MC:
 342		return NP_MPLS_MC;
 343	}
 344	return -1;
 345}
 346
 347/* Translates an NP index into an ethertype */
 348static const int npindex_to_ethertype[NUM_NP] = {
 349	ETH_P_IP,
 350	ETH_P_IPV6,
 351	ETH_P_IPX,
 352	ETH_P_PPPTALK,
 353	ETH_P_MPLS_UC,
 354	ETH_P_MPLS_MC,
 355};
 356
 357/*
 358 * Locking shorthand.
 359 */
 360#define ppp_xmit_lock(ppp)	spin_lock_bh(&(ppp)->wlock)
 361#define ppp_xmit_unlock(ppp)	spin_unlock_bh(&(ppp)->wlock)
 362#define ppp_recv_lock(ppp)	spin_lock_bh(&(ppp)->rlock)
 363#define ppp_recv_unlock(ppp)	spin_unlock_bh(&(ppp)->rlock)
 364#define ppp_lock(ppp)		do { ppp_xmit_lock(ppp); \
 365				     ppp_recv_lock(ppp); } while (0)
 366#define ppp_unlock(ppp)		do { ppp_recv_unlock(ppp); \
 367				     ppp_xmit_unlock(ppp); } while (0)
 368
 369/*
 370 * /dev/ppp device routines.
 371 * The /dev/ppp device is used by pppd to control the ppp unit.
 372 * It supports the read, write, ioctl and poll functions.
 373 * Open instances of /dev/ppp can be in one of three states:
 374 * unattached, attached to a ppp unit, or attached to a ppp channel.
 375 */
 376static int ppp_open(struct inode *inode, struct file *file)
 377{
 378	/*
 379	 * This could (should?) be enforced by the permissions on /dev/ppp.
 380	 */
 381	if (!capable(CAP_NET_ADMIN))
 382		return -EPERM;
 383	return 0;
 384}
 385
 386static int ppp_release(struct inode *unused, struct file *file)
 387{
 388	struct ppp_file *pf = file->private_data;
 389	struct ppp *ppp;
 390
 391	if (pf) {
 392		file->private_data = NULL;
 393		if (pf->kind == INTERFACE) {
 394			ppp = PF_TO_PPP(pf);
 395			if (file == ppp->owner)
 396				ppp_shutdown_interface(ppp);
 397		}
 398		if (atomic_dec_and_test(&pf->refcnt)) {
 399			switch (pf->kind) {
 400			case INTERFACE:
 401				ppp_destroy_interface(PF_TO_PPP(pf));
 402				break;
 403			case CHANNEL:
 404				ppp_destroy_channel(PF_TO_CHANNEL(pf));
 405				break;
 406			}
 407		}
 408	}
 409	return 0;
 410}
 411
 412static ssize_t ppp_read(struct file *file, char __user *buf,
 413			size_t count, loff_t *ppos)
 414{
 415	struct ppp_file *pf = file->private_data;
 416	DECLARE_WAITQUEUE(wait, current);
 417	ssize_t ret;
 418	struct sk_buff *skb = NULL;
 419	struct iovec iov;
 420
 421	ret = count;
 422
 423	if (!pf)
 424		return -ENXIO;
 425	add_wait_queue(&pf->rwait, &wait);
 426	for (;;) {
 427		set_current_state(TASK_INTERRUPTIBLE);
 428		skb = skb_dequeue(&pf->rq);
 429		if (skb)
 430			break;
 431		ret = 0;
 432		if (pf->dead)
 433			break;
 434		if (pf->kind == INTERFACE) {
 435			/*
 436			 * Return 0 (EOF) on an interface that has no
 437			 * channels connected, unless it is looping
 438			 * network traffic (demand mode).
 439			 */
 440			struct ppp *ppp = PF_TO_PPP(pf);
 441			if (ppp->n_channels == 0 &&
 442			    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 443				break;
 444		}
 445		ret = -EAGAIN;
 446		if (file->f_flags & O_NONBLOCK)
 447			break;
 448		ret = -ERESTARTSYS;
 449		if (signal_pending(current))
 450			break;
 451		schedule();
 452	}
 453	set_current_state(TASK_RUNNING);
 454	remove_wait_queue(&pf->rwait, &wait);
 455
 456	if (!skb)
 457		goto out;
 458
 459	ret = -EOVERFLOW;
 460	if (skb->len > count)
 461		goto outf;
 462	ret = -EFAULT;
 463	iov.iov_base = buf;
 464	iov.iov_len = count;
 465	if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
 466		goto outf;
 467	ret = skb->len;
 468
 469 outf:
 470	kfree_skb(skb);
 471 out:
 472	return ret;
 473}
 474
 475static ssize_t ppp_write(struct file *file, const char __user *buf,
 476			 size_t count, loff_t *ppos)
 477{
 478	struct ppp_file *pf = file->private_data;
 479	struct sk_buff *skb;
 480	ssize_t ret;
 481
 482	if (!pf)
 483		return -ENXIO;
 484	ret = -ENOMEM;
 485	skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
 486	if (!skb)
 487		goto out;
 488	skb_reserve(skb, pf->hdrlen);
 489	ret = -EFAULT;
 490	if (copy_from_user(skb_put(skb, count), buf, count)) {
 491		kfree_skb(skb);
 492		goto out;
 493	}
 494
 495	skb_queue_tail(&pf->xq, skb);
 496
 497	switch (pf->kind) {
 498	case INTERFACE:
 499		ppp_xmit_process(PF_TO_PPP(pf));
 500		break;
 501	case CHANNEL:
 502		ppp_channel_push(PF_TO_CHANNEL(pf));
 503		break;
 504	}
 505
 506	ret = count;
 507
 508 out:
 509	return ret;
 510}
 511
 512/* No kernel lock - fine */
 513static unsigned int ppp_poll(struct file *file, poll_table *wait)
 514{
 515	struct ppp_file *pf = file->private_data;
 516	unsigned int mask;
 517
 518	if (!pf)
 519		return 0;
 520	poll_wait(file, &pf->rwait, wait);
 521	mask = POLLOUT | POLLWRNORM;
 522	if (skb_peek(&pf->rq))
 523		mask |= POLLIN | POLLRDNORM;
 524	if (pf->dead)
 525		mask |= POLLHUP;
 526	else if (pf->kind == INTERFACE) {
 527		/* see comment in ppp_read */
 528		struct ppp *ppp = PF_TO_PPP(pf);
 529		if (ppp->n_channels == 0 &&
 530		    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 531			mask |= POLLIN | POLLRDNORM;
 532	}
 533
 534	return mask;
 535}
 536
 537#ifdef CONFIG_PPP_FILTER
 538static int get_filter(void __user *arg, struct sock_filter **p)
 539{
 540	struct sock_fprog uprog;
 541	struct sock_filter *code = NULL;
 542	int len, err;
 543
 544	if (copy_from_user(&uprog, arg, sizeof(uprog)))
 545		return -EFAULT;
 546
 547	if (!uprog.len) {
 548		*p = NULL;
 549		return 0;
 550	}
 551
 552	len = uprog.len * sizeof(struct sock_filter);
 553	code = memdup_user(uprog.filter, len);
 554	if (IS_ERR(code))
 555		return PTR_ERR(code);
 556
 557	err = sk_chk_filter(code, uprog.len);
 558	if (err) {
 559		kfree(code);
 560		return err;
 561	}
 562
 563	*p = code;
 564	return uprog.len;
 565}
 566#endif /* CONFIG_PPP_FILTER */
 567
 568static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 569{
 570	struct ppp_file *pf = file->private_data;
 571	struct ppp *ppp;
 572	int err = -EFAULT, val, val2, i;
 573	struct ppp_idle idle;
 574	struct npioctl npi;
 575	int unit, cflags;
 576	struct slcompress *vj;
 577	void __user *argp = (void __user *)arg;
 578	int __user *p = argp;
 579
 580	if (!pf)
 581		return ppp_unattached_ioctl(current->nsproxy->net_ns,
 582					pf, file, cmd, arg);
 583
 584	if (cmd == PPPIOCDETACH) {
 585		/*
 586		 * We have to be careful here... if the file descriptor
 587		 * has been dup'd, we could have another process in the
 588		 * middle of a poll using the same file *, so we had
 589		 * better not free the interface data structures -
 590		 * instead we fail the ioctl.  Even in this case, we
 591		 * shut down the interface if we are the owner of it.
 592		 * Actually, we should get rid of PPPIOCDETACH, userland
 593		 * (i.e. pppd) could achieve the same effect by closing
 594		 * this fd and reopening /dev/ppp.
 595		 */
 596		err = -EINVAL;
 597		mutex_lock(&ppp_mutex);
 598		if (pf->kind == INTERFACE) {
 599			ppp = PF_TO_PPP(pf);
 600			if (file == ppp->owner)
 601				ppp_shutdown_interface(ppp);
 602		}
 603		if (atomic_long_read(&file->f_count) <= 2) {
 604			ppp_release(NULL, file);
 605			err = 0;
 606		} else
 607			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
 608				atomic_long_read(&file->f_count));
 609		mutex_unlock(&ppp_mutex);
 610		return err;
 611	}
 612
 613	if (pf->kind == CHANNEL) {
 614		struct channel *pch;
 615		struct ppp_channel *chan;
 616
 617		mutex_lock(&ppp_mutex);
 618		pch = PF_TO_CHANNEL(pf);
 619
 620		switch (cmd) {
 621		case PPPIOCCONNECT:
 622			if (get_user(unit, p))
 623				break;
 624			err = ppp_connect_channel(pch, unit);
 625			break;
 626
 627		case PPPIOCDISCONN:
 628			err = ppp_disconnect_channel(pch);
 629			break;
 630
 631		default:
 632			down_read(&pch->chan_sem);
 633			chan = pch->chan;
 634			err = -ENOTTY;
 635			if (chan && chan->ops->ioctl)
 636				err = chan->ops->ioctl(chan, cmd, arg);
 637			up_read(&pch->chan_sem);
 638		}
 639		mutex_unlock(&ppp_mutex);
 640		return err;
 641	}
 642
 643	if (pf->kind != INTERFACE) {
 644		/* can't happen */
 645		pr_err("PPP: not interface or channel??\n");
 646		return -EINVAL;
 647	}
 648
 649	mutex_lock(&ppp_mutex);
 650	ppp = PF_TO_PPP(pf);
 651	switch (cmd) {
 652	case PPPIOCSMRU:
 653		if (get_user(val, p))
 654			break;
 655		ppp->mru = val;
 656		err = 0;
 657		break;
 658
 659	case PPPIOCSFLAGS:
 660		if (get_user(val, p))
 661			break;
 662		ppp_lock(ppp);
 663		cflags = ppp->flags & ~val;
 664		ppp->flags = val & SC_FLAG_BITS;
 665		ppp_unlock(ppp);
 666		if (cflags & SC_CCP_OPEN)
 667			ppp_ccp_closed(ppp);
 668		err = 0;
 669		break;
 670
 671	case PPPIOCGFLAGS:
 672		val = ppp->flags | ppp->xstate | ppp->rstate;
 673		if (put_user(val, p))
 674			break;
 675		err = 0;
 676		break;
 677
 678	case PPPIOCSCOMPRESS:
 679		err = ppp_set_compress(ppp, arg);
 680		break;
 681
 682	case PPPIOCGUNIT:
 683		if (put_user(ppp->file.index, p))
 684			break;
 685		err = 0;
 686		break;
 687
 688	case PPPIOCSDEBUG:
 689		if (get_user(val, p))
 690			break;
 691		ppp->debug = val;
 692		err = 0;
 693		break;
 694
 695	case PPPIOCGDEBUG:
 696		if (put_user(ppp->debug, p))
 697			break;
 698		err = 0;
 699		break;
 700
 701	case PPPIOCGIDLE:
 702		idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
 703		idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
 704		if (copy_to_user(argp, &idle, sizeof(idle)))
 705			break;
 706		err = 0;
 707		break;
 708
 709	case PPPIOCSMAXCID:
 710		if (get_user(val, p))
 711			break;
 712		val2 = 15;
 713		if ((val >> 16) != 0) {
 714			val2 = val >> 16;
 715			val &= 0xffff;
 716		}
 717		vj = slhc_init(val2+1, val+1);
 718		if (!vj) {
 719			netdev_err(ppp->dev,
 720				   "PPP: no memory (VJ compressor)\n");
 721			err = -ENOMEM;
 722			break;
 723		}
 724		ppp_lock(ppp);
 725		if (ppp->vj)
 726			slhc_free(ppp->vj);
 727		ppp->vj = vj;
 728		ppp_unlock(ppp);
 729		err = 0;
 730		break;
 731
 732	case PPPIOCGNPMODE:
 733	case PPPIOCSNPMODE:
 734		if (copy_from_user(&npi, argp, sizeof(npi)))
 735			break;
 736		err = proto_to_npindex(npi.protocol);
 737		if (err < 0)
 738			break;
 739		i = err;
 740		if (cmd == PPPIOCGNPMODE) {
 741			err = -EFAULT;
 742			npi.mode = ppp->npmode[i];
 743			if (copy_to_user(argp, &npi, sizeof(npi)))
 744				break;
 745		} else {
 746			ppp->npmode[i] = npi.mode;
 747			/* we may be able to transmit more packets now (??) */
 748			netif_wake_queue(ppp->dev);
 749		}
 750		err = 0;
 751		break;
 752
 753#ifdef CONFIG_PPP_FILTER
 754	case PPPIOCSPASS:
 755	{
 756		struct sock_filter *code;
 757
 758		err = get_filter(argp, &code);
 759		if (err >= 0) {
 760			struct sock_fprog fprog = {
 761				.len = err,
 762				.filter = code,
 763			};
 764
 765			ppp_lock(ppp);
 766			if (ppp->pass_filter)
 767				sk_unattached_filter_destroy(ppp->pass_filter);
 768			err = sk_unattached_filter_create(&ppp->pass_filter,
 769							  &fprog);
 770			kfree(code);
 771			ppp_unlock(ppp);
 
 772		}
 773		break;
 774	}
 775	case PPPIOCSACTIVE:
 776	{
 777		struct sock_filter *code;
 778
 779		err = get_filter(argp, &code);
 780		if (err >= 0) {
 781			struct sock_fprog fprog = {
 782				.len = err,
 783				.filter = code,
 784			};
 785
 786			ppp_lock(ppp);
 787			if (ppp->active_filter)
 788				sk_unattached_filter_destroy(ppp->active_filter);
 789			err = sk_unattached_filter_create(&ppp->active_filter,
 790							  &fprog);
 791			kfree(code);
 792			ppp_unlock(ppp);
 
 793		}
 794		break;
 795	}
 796#endif /* CONFIG_PPP_FILTER */
 797
 798#ifdef CONFIG_PPP_MULTILINK
 799	case PPPIOCSMRRU:
 800		if (get_user(val, p))
 801			break;
 802		ppp_recv_lock(ppp);
 803		ppp->mrru = val;
 804		ppp_recv_unlock(ppp);
 805		err = 0;
 806		break;
 807#endif /* CONFIG_PPP_MULTILINK */
 808
 809	default:
 810		err = -ENOTTY;
 811	}
 812	mutex_unlock(&ppp_mutex);
 813	return err;
 814}
 815
 816static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 817			struct file *file, unsigned int cmd, unsigned long arg)
 818{
 819	int unit, err = -EFAULT;
 820	struct ppp *ppp;
 821	struct channel *chan;
 822	struct ppp_net *pn;
 823	int __user *p = (int __user *)arg;
 824
 825	mutex_lock(&ppp_mutex);
 826	switch (cmd) {
 827	case PPPIOCNEWUNIT:
 828		/* Create a new ppp unit */
 829		if (get_user(unit, p))
 830			break;
 831		ppp = ppp_create_interface(net, unit, &err);
 832		if (!ppp)
 833			break;
 834		file->private_data = &ppp->file;
 835		ppp->owner = file;
 836		err = -EFAULT;
 837		if (put_user(ppp->file.index, p))
 838			break;
 839		err = 0;
 840		break;
 841
 842	case PPPIOCATTACH:
 843		/* Attach to an existing ppp unit */
 844		if (get_user(unit, p))
 845			break;
 846		err = -ENXIO;
 847		pn = ppp_pernet(net);
 848		mutex_lock(&pn->all_ppp_mutex);
 849		ppp = ppp_find_unit(pn, unit);
 850		if (ppp) {
 851			atomic_inc(&ppp->file.refcnt);
 852			file->private_data = &ppp->file;
 853			err = 0;
 854		}
 855		mutex_unlock(&pn->all_ppp_mutex);
 856		break;
 857
 858	case PPPIOCATTCHAN:
 859		if (get_user(unit, p))
 860			break;
 861		err = -ENXIO;
 862		pn = ppp_pernet(net);
 863		spin_lock_bh(&pn->all_channels_lock);
 864		chan = ppp_find_channel(pn, unit);
 865		if (chan) {
 866			atomic_inc(&chan->file.refcnt);
 867			file->private_data = &chan->file;
 868			err = 0;
 869		}
 870		spin_unlock_bh(&pn->all_channels_lock);
 871		break;
 872
 873	default:
 874		err = -ENOTTY;
 875	}
 876	mutex_unlock(&ppp_mutex);
 877	return err;
 878}
 879
 880static const struct file_operations ppp_device_fops = {
 881	.owner		= THIS_MODULE,
 882	.read		= ppp_read,
 883	.write		= ppp_write,
 884	.poll		= ppp_poll,
 885	.unlocked_ioctl	= ppp_ioctl,
 886	.open		= ppp_open,
 887	.release	= ppp_release,
 888	.llseek		= noop_llseek,
 889};
 890
 891static __net_init int ppp_init_net(struct net *net)
 892{
 893	struct ppp_net *pn = net_generic(net, ppp_net_id);
 894
 895	idr_init(&pn->units_idr);
 896	mutex_init(&pn->all_ppp_mutex);
 897
 898	INIT_LIST_HEAD(&pn->all_channels);
 899	INIT_LIST_HEAD(&pn->new_channels);
 900
 901	spin_lock_init(&pn->all_channels_lock);
 902
 903	return 0;
 904}
 905
 906static __net_exit void ppp_exit_net(struct net *net)
 907{
 908	struct ppp_net *pn = net_generic(net, ppp_net_id);
 909
 910	idr_destroy(&pn->units_idr);
 911}
 912
 913static struct pernet_operations ppp_net_ops = {
 914	.init = ppp_init_net,
 915	.exit = ppp_exit_net,
 916	.id   = &ppp_net_id,
 917	.size = sizeof(struct ppp_net),
 918};
 919
 920#define PPP_MAJOR	108
 921
 922/* Called at boot time if ppp is compiled into the kernel,
 923   or at module load time (from init_module) if compiled as a module. */
 924static int __init ppp_init(void)
 925{
 926	int err;
 927
 928	pr_info("PPP generic driver version " PPP_VERSION "\n");
 929
 930	err = register_pernet_device(&ppp_net_ops);
 931	if (err) {
 932		pr_err("failed to register PPP pernet device (%d)\n", err);
 933		goto out;
 934	}
 935
 936	err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
 937	if (err) {
 938		pr_err("failed to register PPP device (%d)\n", err);
 939		goto out_net;
 940	}
 941
 942	ppp_class = class_create(THIS_MODULE, "ppp");
 943	if (IS_ERR(ppp_class)) {
 944		err = PTR_ERR(ppp_class);
 945		goto out_chrdev;
 946	}
 947
 948	/* not a big deal if we fail here :-) */
 949	device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
 950
 951	return 0;
 952
 953out_chrdev:
 954	unregister_chrdev(PPP_MAJOR, "ppp");
 955out_net:
 956	unregister_pernet_device(&ppp_net_ops);
 957out:
 958	return err;
 959}
 960
 961/*
 962 * Network interface unit routines.
 963 */
 964static netdev_tx_t
 965ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
 966{
 967	struct ppp *ppp = netdev_priv(dev);
 968	int npi, proto;
 969	unsigned char *pp;
 970
 971	npi = ethertype_to_npindex(ntohs(skb->protocol));
 972	if (npi < 0)
 973		goto outf;
 974
 975	/* Drop, accept or reject the packet */
 976	switch (ppp->npmode[npi]) {
 977	case NPMODE_PASS:
 978		break;
 979	case NPMODE_QUEUE:
 980		/* it would be nice to have a way to tell the network
 981		   system to queue this one up for later. */
 982		goto outf;
 983	case NPMODE_DROP:
 984	case NPMODE_ERROR:
 985		goto outf;
 986	}
 987
 988	/* Put the 2-byte PPP protocol number on the front,
 989	   making sure there is room for the address and control fields. */
 990	if (skb_cow_head(skb, PPP_HDRLEN))
 991		goto outf;
 992
 993	pp = skb_push(skb, 2);
 994	proto = npindex_to_proto[npi];
 995	put_unaligned_be16(proto, pp);
 996
 997	skb_queue_tail(&ppp->file.xq, skb);
 998	ppp_xmit_process(ppp);
 999	return NETDEV_TX_OK;
1000
1001 outf:
1002	kfree_skb(skb);
1003	++dev->stats.tx_dropped;
1004	return NETDEV_TX_OK;
1005}
1006
1007static int
1008ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1009{
1010	struct ppp *ppp = netdev_priv(dev);
1011	int err = -EFAULT;
1012	void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
1013	struct ppp_stats stats;
1014	struct ppp_comp_stats cstats;
1015	char *vers;
1016
1017	switch (cmd) {
1018	case SIOCGPPPSTATS:
1019		ppp_get_stats(ppp, &stats);
1020		if (copy_to_user(addr, &stats, sizeof(stats)))
1021			break;
1022		err = 0;
1023		break;
1024
1025	case SIOCGPPPCSTATS:
1026		memset(&cstats, 0, sizeof(cstats));
1027		if (ppp->xc_state)
1028			ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1029		if (ppp->rc_state)
1030			ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1031		if (copy_to_user(addr, &cstats, sizeof(cstats)))
1032			break;
1033		err = 0;
1034		break;
1035
1036	case SIOCGPPPVER:
1037		vers = PPP_VERSION;
1038		if (copy_to_user(addr, vers, strlen(vers) + 1))
1039			break;
1040		err = 0;
1041		break;
1042
1043	default:
1044		err = -EINVAL;
1045	}
1046
1047	return err;
1048}
1049
1050static struct rtnl_link_stats64*
1051ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1052{
1053	struct ppp *ppp = netdev_priv(dev);
1054
1055	ppp_recv_lock(ppp);
1056	stats64->rx_packets = ppp->stats64.rx_packets;
1057	stats64->rx_bytes   = ppp->stats64.rx_bytes;
1058	ppp_recv_unlock(ppp);
1059
1060	ppp_xmit_lock(ppp);
1061	stats64->tx_packets = ppp->stats64.tx_packets;
1062	stats64->tx_bytes   = ppp->stats64.tx_bytes;
1063	ppp_xmit_unlock(ppp);
1064
1065	stats64->rx_errors        = dev->stats.rx_errors;
1066	stats64->tx_errors        = dev->stats.tx_errors;
1067	stats64->rx_dropped       = dev->stats.rx_dropped;
1068	stats64->tx_dropped       = dev->stats.tx_dropped;
1069	stats64->rx_length_errors = dev->stats.rx_length_errors;
1070
1071	return stats64;
1072}
1073
1074static struct lock_class_key ppp_tx_busylock;
1075static int ppp_dev_init(struct net_device *dev)
1076{
1077	dev->qdisc_tx_busylock = &ppp_tx_busylock;
1078	return 0;
1079}
1080
1081static const struct net_device_ops ppp_netdev_ops = {
1082	.ndo_init	 = ppp_dev_init,
1083	.ndo_start_xmit  = ppp_start_xmit,
1084	.ndo_do_ioctl    = ppp_net_ioctl,
1085	.ndo_get_stats64 = ppp_get_stats64,
1086};
1087
1088static void ppp_setup(struct net_device *dev)
1089{
1090	dev->netdev_ops = &ppp_netdev_ops;
1091	dev->hard_header_len = PPP_HDRLEN;
1092	dev->mtu = PPP_MRU;
1093	dev->addr_len = 0;
1094	dev->tx_queue_len = 3;
1095	dev->type = ARPHRD_PPP;
1096	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1097	dev->features |= NETIF_F_NETNS_LOCAL;
1098	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1099}
1100
1101/*
1102 * Transmit-side routines.
1103 */
1104
1105/*
1106 * Called to do any work queued up on the transmit side
1107 * that can now be done.
1108 */
1109static void
1110ppp_xmit_process(struct ppp *ppp)
1111{
1112	struct sk_buff *skb;
1113
1114	ppp_xmit_lock(ppp);
1115	if (!ppp->closing) {
1116		ppp_push(ppp);
1117		while (!ppp->xmit_pending &&
1118		       (skb = skb_dequeue(&ppp->file.xq)))
1119			ppp_send_frame(ppp, skb);
1120		/* If there's no work left to do, tell the core net
1121		   code that we can accept some more. */
1122		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1123			netif_wake_queue(ppp->dev);
1124		else
1125			netif_stop_queue(ppp->dev);
1126	}
1127	ppp_xmit_unlock(ppp);
1128}
1129
1130static inline struct sk_buff *
1131pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1132{
1133	struct sk_buff *new_skb;
1134	int len;
1135	int new_skb_size = ppp->dev->mtu +
1136		ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1137	int compressor_skb_size = ppp->dev->mtu +
1138		ppp->xcomp->comp_extra + PPP_HDRLEN;
1139	new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1140	if (!new_skb) {
1141		if (net_ratelimit())
1142			netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1143		return NULL;
1144	}
1145	if (ppp->dev->hard_header_len > PPP_HDRLEN)
1146		skb_reserve(new_skb,
1147			    ppp->dev->hard_header_len - PPP_HDRLEN);
1148
1149	/* compressor still expects A/C bytes in hdr */
1150	len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1151				   new_skb->data, skb->len + 2,
1152				   compressor_skb_size);
1153	if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1154		consume_skb(skb);
1155		skb = new_skb;
1156		skb_put(skb, len);
1157		skb_pull(skb, 2);	/* pull off A/C bytes */
1158	} else if (len == 0) {
1159		/* didn't compress, or CCP not up yet */
1160		consume_skb(new_skb);
1161		new_skb = skb;
1162	} else {
1163		/*
1164		 * (len < 0)
1165		 * MPPE requires that we do not send unencrypted
1166		 * frames.  The compressor will return -1 if we
1167		 * should drop the frame.  We cannot simply test
1168		 * the compress_proto because MPPE and MPPC share
1169		 * the same number.
1170		 */
1171		if (net_ratelimit())
1172			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1173		kfree_skb(skb);
1174		consume_skb(new_skb);
1175		new_skb = NULL;
1176	}
1177	return new_skb;
1178}
1179
1180/*
1181 * Compress and send a frame.
1182 * The caller should have locked the xmit path,
1183 * and xmit_pending should be 0.
1184 */
1185static void
1186ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1187{
1188	int proto = PPP_PROTO(skb);
1189	struct sk_buff *new_skb;
1190	int len;
1191	unsigned char *cp;
1192
1193	if (proto < 0x8000) {
1194#ifdef CONFIG_PPP_FILTER
1195		/* check if we should pass this packet */
1196		/* the filter instructions are constructed assuming
1197		   a four-byte PPP header on each packet */
1198		*skb_push(skb, 2) = 1;
1199		if (ppp->pass_filter &&
1200		    SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
1201			if (ppp->debug & 1)
1202				netdev_printk(KERN_DEBUG, ppp->dev,
1203					      "PPP: outbound frame "
1204					      "not passed\n");
1205			kfree_skb(skb);
1206			return;
1207		}
1208		/* if this packet passes the active filter, record the time */
1209		if (!(ppp->active_filter &&
1210		      SK_RUN_FILTER(ppp->active_filter, skb) == 0))
1211			ppp->last_xmit = jiffies;
1212		skb_pull(skb, 2);
1213#else
1214		/* for data packets, record the time */
1215		ppp->last_xmit = jiffies;
1216#endif /* CONFIG_PPP_FILTER */
1217	}
1218
1219	++ppp->stats64.tx_packets;
1220	ppp->stats64.tx_bytes += skb->len - 2;
1221
1222	switch (proto) {
1223	case PPP_IP:
1224		if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1225			break;
1226		/* try to do VJ TCP header compression */
1227		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1228				    GFP_ATOMIC);
1229		if (!new_skb) {
1230			netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1231			goto drop;
1232		}
1233		skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1234		cp = skb->data + 2;
1235		len = slhc_compress(ppp->vj, cp, skb->len - 2,
1236				    new_skb->data + 2, &cp,
1237				    !(ppp->flags & SC_NO_TCP_CCID));
1238		if (cp == skb->data + 2) {
1239			/* didn't compress */
1240			consume_skb(new_skb);
1241		} else {
1242			if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1243				proto = PPP_VJC_COMP;
1244				cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1245			} else {
1246				proto = PPP_VJC_UNCOMP;
1247				cp[0] = skb->data[2];
1248			}
1249			consume_skb(skb);
1250			skb = new_skb;
1251			cp = skb_put(skb, len + 2);
1252			cp[0] = 0;
1253			cp[1] = proto;
1254		}
1255		break;
1256
1257	case PPP_CCP:
1258		/* peek at outbound CCP frames */
1259		ppp_ccp_peek(ppp, skb, 0);
1260		break;
1261	}
1262
1263	/* try to do packet compression */
1264	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1265	    proto != PPP_LCP && proto != PPP_CCP) {
1266		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1267			if (net_ratelimit())
1268				netdev_err(ppp->dev,
1269					   "ppp: compression required but "
1270					   "down - pkt dropped.\n");
1271			goto drop;
1272		}
1273		skb = pad_compress_skb(ppp, skb);
1274		if (!skb)
1275			goto drop;
1276	}
1277
1278	/*
1279	 * If we are waiting for traffic (demand dialling),
1280	 * queue it up for pppd to receive.
1281	 */
1282	if (ppp->flags & SC_LOOP_TRAFFIC) {
1283		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1284			goto drop;
1285		skb_queue_tail(&ppp->file.rq, skb);
1286		wake_up_interruptible(&ppp->file.rwait);
1287		return;
1288	}
1289
1290	ppp->xmit_pending = skb;
1291	ppp_push(ppp);
1292	return;
1293
1294 drop:
1295	kfree_skb(skb);
1296	++ppp->dev->stats.tx_errors;
1297}
1298
1299/*
1300 * Try to send the frame in xmit_pending.
1301 * The caller should have the xmit path locked.
1302 */
1303static void
1304ppp_push(struct ppp *ppp)
1305{
1306	struct list_head *list;
1307	struct channel *pch;
1308	struct sk_buff *skb = ppp->xmit_pending;
1309
1310	if (!skb)
1311		return;
1312
1313	list = &ppp->channels;
1314	if (list_empty(list)) {
1315		/* nowhere to send the packet, just drop it */
1316		ppp->xmit_pending = NULL;
1317		kfree_skb(skb);
1318		return;
1319	}
1320
1321	if ((ppp->flags & SC_MULTILINK) == 0) {
1322		/* not doing multilink: send it down the first channel */
1323		list = list->next;
1324		pch = list_entry(list, struct channel, clist);
1325
1326		spin_lock_bh(&pch->downl);
1327		if (pch->chan) {
1328			if (pch->chan->ops->start_xmit(pch->chan, skb))
1329				ppp->xmit_pending = NULL;
1330		} else {
1331			/* channel got unregistered */
1332			kfree_skb(skb);
1333			ppp->xmit_pending = NULL;
1334		}
1335		spin_unlock_bh(&pch->downl);
1336		return;
1337	}
1338
1339#ifdef CONFIG_PPP_MULTILINK
1340	/* Multilink: fragment the packet over as many links
1341	   as can take the packet at the moment. */
1342	if (!ppp_mp_explode(ppp, skb))
1343		return;
1344#endif /* CONFIG_PPP_MULTILINK */
1345
1346	ppp->xmit_pending = NULL;
1347	kfree_skb(skb);
1348}
1349
1350#ifdef CONFIG_PPP_MULTILINK
1351static bool mp_protocol_compress __read_mostly = true;
1352module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
1353MODULE_PARM_DESC(mp_protocol_compress,
1354		 "compress protocol id in multilink fragments");
1355
1356/*
1357 * Divide a packet to be transmitted into fragments and
1358 * send them out the individual links.
1359 */
1360static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1361{
1362	int len, totlen;
1363	int i, bits, hdrlen, mtu;
1364	int flen;
1365	int navail, nfree, nzero;
1366	int nbigger;
1367	int totspeed;
1368	int totfree;
1369	unsigned char *p, *q;
1370	struct list_head *list;
1371	struct channel *pch;
1372	struct sk_buff *frag;
1373	struct ppp_channel *chan;
1374
1375	totspeed = 0; /*total bitrate of the bundle*/
1376	nfree = 0; /* # channels which have no packet already queued */
1377	navail = 0; /* total # of usable channels (not deregistered) */
1378	nzero = 0; /* number of channels with zero speed associated*/
1379	totfree = 0; /*total # of channels available and
1380				  *having no queued packets before
1381				  *starting the fragmentation*/
1382
1383	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1384	i = 0;
1385	list_for_each_entry(pch, &ppp->channels, clist) {
1386		if (pch->chan) {
1387			pch->avail = 1;
1388			navail++;
1389			pch->speed = pch->chan->speed;
1390		} else {
1391			pch->avail = 0;
1392		}
1393		if (pch->avail) {
1394			if (skb_queue_empty(&pch->file.xq) ||
1395				!pch->had_frag) {
1396					if (pch->speed == 0)
1397						nzero++;
1398					else
1399						totspeed += pch->speed;
1400
1401					pch->avail = 2;
1402					++nfree;
1403					++totfree;
1404				}
1405			if (!pch->had_frag && i < ppp->nxchan)
1406				ppp->nxchan = i;
1407		}
1408		++i;
1409	}
1410	/*
1411	 * Don't start sending this packet unless at least half of
1412	 * the channels are free.  This gives much better TCP
1413	 * performance if we have a lot of channels.
1414	 */
1415	if (nfree == 0 || nfree < navail / 2)
1416		return 0; /* can't take now, leave it in xmit_pending */
1417
1418	/* Do protocol field compression */
1419	p = skb->data;
1420	len = skb->len;
1421	if (*p == 0 && mp_protocol_compress) {
1422		++p;
1423		--len;
1424	}
1425
1426	totlen = len;
1427	nbigger = len % nfree;
1428
1429	/* skip to the channel after the one we last used
1430	   and start at that one */
1431	list = &ppp->channels;
1432	for (i = 0; i < ppp->nxchan; ++i) {
1433		list = list->next;
1434		if (list == &ppp->channels) {
1435			i = 0;
1436			break;
1437		}
1438	}
1439
1440	/* create a fragment for each channel */
1441	bits = B;
1442	while (len > 0) {
1443		list = list->next;
1444		if (list == &ppp->channels) {
1445			i = 0;
1446			continue;
1447		}
1448		pch = list_entry(list, struct channel, clist);
1449		++i;
1450		if (!pch->avail)
1451			continue;
1452
1453		/*
1454		 * Skip this channel if it has a fragment pending already and
1455		 * we haven't given a fragment to all of the free channels.
1456		 */
1457		if (pch->avail == 1) {
1458			if (nfree > 0)
1459				continue;
1460		} else {
1461			pch->avail = 1;
1462		}
1463
1464		/* check the channel's mtu and whether it is still attached. */
1465		spin_lock_bh(&pch->downl);
1466		if (pch->chan == NULL) {
1467			/* can't use this channel, it's being deregistered */
1468			if (pch->speed == 0)
1469				nzero--;
1470			else
1471				totspeed -= pch->speed;
1472
1473			spin_unlock_bh(&pch->downl);
1474			pch->avail = 0;
1475			totlen = len;
1476			totfree--;
1477			nfree--;
1478			if (--navail == 0)
1479				break;
1480			continue;
1481		}
1482
1483		/*
1484		*if the channel speed is not set divide
1485		*the packet evenly among the free channels;
1486		*otherwise divide it according to the speed
1487		*of the channel we are going to transmit on
1488		*/
1489		flen = len;
1490		if (nfree > 0) {
1491			if (pch->speed == 0) {
1492				flen = len/nfree;
1493				if (nbigger > 0) {
1494					flen++;
1495					nbigger--;
1496				}
1497			} else {
1498				flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1499					((totspeed*totfree)/pch->speed)) - hdrlen;
1500				if (nbigger > 0) {
1501					flen += ((totfree - nzero)*pch->speed)/totspeed;
1502					nbigger -= ((totfree - nzero)*pch->speed)/
1503							totspeed;
1504				}
1505			}
1506			nfree--;
1507		}
1508
1509		/*
1510		 *check if we are on the last channel or
1511		 *we exceded the length of the data to
1512		 *fragment
1513		 */
1514		if ((nfree <= 0) || (flen > len))
1515			flen = len;
1516		/*
1517		 *it is not worth to tx on slow channels:
1518		 *in that case from the resulting flen according to the
1519		 *above formula will be equal or less than zero.
1520		 *Skip the channel in this case
1521		 */
1522		if (flen <= 0) {
1523			pch->avail = 2;
1524			spin_unlock_bh(&pch->downl);
1525			continue;
1526		}
1527
1528		/*
1529		 * hdrlen includes the 2-byte PPP protocol field, but the
1530		 * MTU counts only the payload excluding the protocol field.
1531		 * (RFC1661 Section 2)
1532		 */
1533		mtu = pch->chan->mtu - (hdrlen - 2);
1534		if (mtu < 4)
1535			mtu = 4;
1536		if (flen > mtu)
1537			flen = mtu;
1538		if (flen == len)
1539			bits |= E;
1540		frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1541		if (!frag)
1542			goto noskb;
1543		q = skb_put(frag, flen + hdrlen);
1544
1545		/* make the MP header */
1546		put_unaligned_be16(PPP_MP, q);
1547		if (ppp->flags & SC_MP_XSHORTSEQ) {
1548			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1549			q[3] = ppp->nxseq;
1550		} else {
1551			q[2] = bits;
1552			q[3] = ppp->nxseq >> 16;
1553			q[4] = ppp->nxseq >> 8;
1554			q[5] = ppp->nxseq;
1555		}
1556
1557		memcpy(q + hdrlen, p, flen);
1558
1559		/* try to send it down the channel */
1560		chan = pch->chan;
1561		if (!skb_queue_empty(&pch->file.xq) ||
1562			!chan->ops->start_xmit(chan, frag))
1563			skb_queue_tail(&pch->file.xq, frag);
1564		pch->had_frag = 1;
1565		p += flen;
1566		len -= flen;
1567		++ppp->nxseq;
1568		bits = 0;
1569		spin_unlock_bh(&pch->downl);
1570	}
1571	ppp->nxchan = i;
1572
1573	return 1;
1574
1575 noskb:
1576	spin_unlock_bh(&pch->downl);
1577	if (ppp->debug & 1)
1578		netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1579	++ppp->dev->stats.tx_errors;
1580	++ppp->nxseq;
1581	return 1;	/* abandon the frame */
1582}
1583#endif /* CONFIG_PPP_MULTILINK */
1584
1585/*
1586 * Try to send data out on a channel.
1587 */
1588static void
1589ppp_channel_push(struct channel *pch)
1590{
1591	struct sk_buff *skb;
1592	struct ppp *ppp;
1593
1594	spin_lock_bh(&pch->downl);
1595	if (pch->chan) {
1596		while (!skb_queue_empty(&pch->file.xq)) {
1597			skb = skb_dequeue(&pch->file.xq);
1598			if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
1599				/* put the packet back and try again later */
1600				skb_queue_head(&pch->file.xq, skb);
1601				break;
1602			}
1603		}
1604	} else {
1605		/* channel got deregistered */
1606		skb_queue_purge(&pch->file.xq);
1607	}
1608	spin_unlock_bh(&pch->downl);
1609	/* see if there is anything from the attached unit to be sent */
1610	if (skb_queue_empty(&pch->file.xq)) {
1611		read_lock_bh(&pch->upl);
1612		ppp = pch->ppp;
1613		if (ppp)
1614			ppp_xmit_process(ppp);
1615		read_unlock_bh(&pch->upl);
1616	}
1617}
1618
1619/*
1620 * Receive-side routines.
1621 */
1622
1623struct ppp_mp_skb_parm {
1624	u32		sequence;
1625	u8		BEbits;
1626};
1627#define PPP_MP_CB(skb)	((struct ppp_mp_skb_parm *)((skb)->cb))
1628
1629static inline void
1630ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1631{
1632	ppp_recv_lock(ppp);
1633	if (!ppp->closing)
1634		ppp_receive_frame(ppp, skb, pch);
1635	else
1636		kfree_skb(skb);
1637	ppp_recv_unlock(ppp);
1638}
1639
1640void
1641ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1642{
1643	struct channel *pch = chan->ppp;
1644	int proto;
1645
1646	if (!pch) {
1647		kfree_skb(skb);
1648		return;
1649	}
1650
1651	read_lock_bh(&pch->upl);
1652	if (!pskb_may_pull(skb, 2)) {
1653		kfree_skb(skb);
1654		if (pch->ppp) {
1655			++pch->ppp->dev->stats.rx_length_errors;
1656			ppp_receive_error(pch->ppp);
1657		}
1658		goto done;
1659	}
1660
1661	proto = PPP_PROTO(skb);
1662	if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1663		/* put it on the channel queue */
1664		skb_queue_tail(&pch->file.rq, skb);
1665		/* drop old frames if queue too long */
1666		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
1667		       (skb = skb_dequeue(&pch->file.rq)))
1668			kfree_skb(skb);
1669		wake_up_interruptible(&pch->file.rwait);
1670	} else {
1671		ppp_do_recv(pch->ppp, skb, pch);
1672	}
1673
1674done:
1675	read_unlock_bh(&pch->upl);
1676}
1677
1678/* Put a 0-length skb in the receive queue as an error indication */
1679void
1680ppp_input_error(struct ppp_channel *chan, int code)
1681{
1682	struct channel *pch = chan->ppp;
1683	struct sk_buff *skb;
1684
1685	if (!pch)
1686		return;
1687
1688	read_lock_bh(&pch->upl);
1689	if (pch->ppp) {
1690		skb = alloc_skb(0, GFP_ATOMIC);
1691		if (skb) {
1692			skb->len = 0;		/* probably unnecessary */
1693			skb->cb[0] = code;
1694			ppp_do_recv(pch->ppp, skb, pch);
1695		}
1696	}
1697	read_unlock_bh(&pch->upl);
1698}
1699
1700/*
1701 * We come in here to process a received frame.
1702 * The receive side of the ppp unit is locked.
1703 */
1704static void
1705ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1706{
1707	/* note: a 0-length skb is used as an error indication */
1708	if (skb->len > 0) {
1709#ifdef CONFIG_PPP_MULTILINK
1710		/* XXX do channel-level decompression here */
1711		if (PPP_PROTO(skb) == PPP_MP)
1712			ppp_receive_mp_frame(ppp, skb, pch);
1713		else
1714#endif /* CONFIG_PPP_MULTILINK */
1715			ppp_receive_nonmp_frame(ppp, skb);
1716	} else {
1717		kfree_skb(skb);
1718		ppp_receive_error(ppp);
1719	}
1720}
1721
1722static void
1723ppp_receive_error(struct ppp *ppp)
1724{
1725	++ppp->dev->stats.rx_errors;
1726	if (ppp->vj)
1727		slhc_toss(ppp->vj);
1728}
1729
1730static void
1731ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1732{
1733	struct sk_buff *ns;
1734	int proto, len, npi;
1735
1736	/*
1737	 * Decompress the frame, if compressed.
1738	 * Note that some decompressors need to see uncompressed frames
1739	 * that come in as well as compressed frames.
1740	 */
1741	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
1742	    (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
1743		skb = ppp_decompress_frame(ppp, skb);
1744
1745	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
1746		goto err;
1747
1748	proto = PPP_PROTO(skb);
1749	switch (proto) {
1750	case PPP_VJC_COMP:
1751		/* decompress VJ compressed packets */
1752		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1753			goto err;
1754
1755		if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
1756			/* copy to a new sk_buff with more tailroom */
1757			ns = dev_alloc_skb(skb->len + 128);
1758			if (!ns) {
1759				netdev_err(ppp->dev, "PPP: no memory "
1760					   "(VJ decomp)\n");
1761				goto err;
1762			}
1763			skb_reserve(ns, 2);
1764			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
1765			consume_skb(skb);
1766			skb = ns;
1767		}
1768		else
1769			skb->ip_summed = CHECKSUM_NONE;
1770
1771		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1772		if (len <= 0) {
1773			netdev_printk(KERN_DEBUG, ppp->dev,
1774				      "PPP: VJ decompression error\n");
1775			goto err;
1776		}
1777		len += 2;
1778		if (len > skb->len)
1779			skb_put(skb, len - skb->len);
1780		else if (len < skb->len)
1781			skb_trim(skb, len);
1782		proto = PPP_IP;
1783		break;
1784
1785	case PPP_VJC_UNCOMP:
1786		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1787			goto err;
1788
1789		/* Until we fix the decompressor need to make sure
1790		 * data portion is linear.
1791		 */
1792		if (!pskb_may_pull(skb, skb->len))
1793			goto err;
1794
1795		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1796			netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1797			goto err;
1798		}
1799		proto = PPP_IP;
1800		break;
1801
1802	case PPP_CCP:
1803		ppp_ccp_peek(ppp, skb, 1);
1804		break;
1805	}
1806
1807	++ppp->stats64.rx_packets;
1808	ppp->stats64.rx_bytes += skb->len - 2;
1809
1810	npi = proto_to_npindex(proto);
1811	if (npi < 0) {
1812		/* control or unknown frame - pass it to pppd */
1813		skb_queue_tail(&ppp->file.rq, skb);
1814		/* limit queue length by dropping old frames */
1815		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
1816		       (skb = skb_dequeue(&ppp->file.rq)))
1817			kfree_skb(skb);
1818		/* wake up any process polling or blocking on read */
1819		wake_up_interruptible(&ppp->file.rwait);
1820
1821	} else {
1822		/* network protocol frame - give it to the kernel */
1823
1824#ifdef CONFIG_PPP_FILTER
1825		/* check if the packet passes the pass and active filters */
1826		/* the filter instructions are constructed assuming
1827		   a four-byte PPP header on each packet */
1828		if (ppp->pass_filter || ppp->active_filter) {
1829			if (skb_unclone(skb, GFP_ATOMIC))
 
1830				goto err;
1831
1832			*skb_push(skb, 2) = 0;
1833			if (ppp->pass_filter &&
1834			    SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
1835				if (ppp->debug & 1)
1836					netdev_printk(KERN_DEBUG, ppp->dev,
1837						      "PPP: inbound frame "
1838						      "not passed\n");
1839				kfree_skb(skb);
1840				return;
1841			}
1842			if (!(ppp->active_filter &&
1843			      SK_RUN_FILTER(ppp->active_filter, skb) == 0))
1844				ppp->last_recv = jiffies;
1845			__skb_pull(skb, 2);
1846		} else
1847#endif /* CONFIG_PPP_FILTER */
1848			ppp->last_recv = jiffies;
1849
1850		if ((ppp->dev->flags & IFF_UP) == 0 ||
1851		    ppp->npmode[npi] != NPMODE_PASS) {
1852			kfree_skb(skb);
1853		} else {
1854			/* chop off protocol */
1855			skb_pull_rcsum(skb, 2);
1856			skb->dev = ppp->dev;
1857			skb->protocol = htons(npindex_to_ethertype[npi]);
1858			skb_reset_mac_header(skb);
1859			netif_rx(skb);
1860		}
1861	}
1862	return;
1863
1864 err:
1865	kfree_skb(skb);
1866	ppp_receive_error(ppp);
1867}
1868
1869static struct sk_buff *
1870ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1871{
1872	int proto = PPP_PROTO(skb);
1873	struct sk_buff *ns;
1874	int len;
1875
1876	/* Until we fix all the decompressor's need to make sure
1877	 * data portion is linear.
1878	 */
1879	if (!pskb_may_pull(skb, skb->len))
1880		goto err;
1881
1882	if (proto == PPP_COMP) {
1883		int obuff_size;
1884
1885		switch(ppp->rcomp->compress_proto) {
1886		case CI_MPPE:
1887			obuff_size = ppp->mru + PPP_HDRLEN + 1;
1888			break;
1889		default:
1890			obuff_size = ppp->mru + PPP_HDRLEN;
1891			break;
1892		}
1893
1894		ns = dev_alloc_skb(obuff_size);
1895		if (!ns) {
1896			netdev_err(ppp->dev, "ppp_decompress_frame: "
1897				   "no memory\n");
1898			goto err;
1899		}
1900		/* the decompressor still expects the A/C bytes in the hdr */
1901		len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
1902				skb->len + 2, ns->data, obuff_size);
1903		if (len < 0) {
1904			/* Pass the compressed frame to pppd as an
1905			   error indication. */
1906			if (len == DECOMP_FATALERROR)
1907				ppp->rstate |= SC_DC_FERROR;
1908			kfree_skb(ns);
1909			goto err;
1910		}
1911
1912		consume_skb(skb);
1913		skb = ns;
1914		skb_put(skb, len);
1915		skb_pull(skb, 2);	/* pull off the A/C bytes */
1916
1917	} else {
1918		/* Uncompressed frame - pass to decompressor so it
1919		   can update its dictionary if necessary. */
1920		if (ppp->rcomp->incomp)
1921			ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
1922					   skb->len + 2);
1923	}
1924
1925	return skb;
1926
1927 err:
1928	ppp->rstate |= SC_DC_ERROR;
1929	ppp_receive_error(ppp);
1930	return skb;
1931}
1932
1933#ifdef CONFIG_PPP_MULTILINK
1934/*
1935 * Receive a multilink frame.
1936 * We put it on the reconstruction queue and then pull off
1937 * as many completed frames as we can.
1938 */
1939static void
1940ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1941{
1942	u32 mask, seq;
1943	struct channel *ch;
1944	int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1945
1946	if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
1947		goto err;		/* no good, throw it away */
1948
1949	/* Decode sequence number and begin/end bits */
1950	if (ppp->flags & SC_MP_SHORTSEQ) {
1951		seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
1952		mask = 0xfff;
1953	} else {
1954		seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
1955		mask = 0xffffff;
1956	}
1957	PPP_MP_CB(skb)->BEbits = skb->data[2];
1958	skb_pull(skb, mphdrlen);	/* pull off PPP and MP headers */
1959
1960	/*
1961	 * Do protocol ID decompression on the first fragment of each packet.
1962	 */
1963	if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
1964		*skb_push(skb, 1) = 0;
1965
1966	/*
1967	 * Expand sequence number to 32 bits, making it as close
1968	 * as possible to ppp->minseq.
1969	 */
1970	seq |= ppp->minseq & ~mask;
1971	if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
1972		seq += mask + 1;
1973	else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
1974		seq -= mask + 1;	/* should never happen */
1975	PPP_MP_CB(skb)->sequence = seq;
1976	pch->lastseq = seq;
1977
1978	/*
1979	 * If this packet comes before the next one we were expecting,
1980	 * drop it.
1981	 */
1982	if (seq_before(seq, ppp->nextseq)) {
1983		kfree_skb(skb);
1984		++ppp->dev->stats.rx_dropped;
1985		ppp_receive_error(ppp);
1986		return;
1987	}
1988
1989	/*
1990	 * Reevaluate minseq, the minimum over all channels of the
1991	 * last sequence number received on each channel.  Because of
1992	 * the increasing sequence number rule, we know that any fragment
1993	 * before `minseq' which hasn't arrived is never going to arrive.
1994	 * The list of channels can't change because we have the receive
1995	 * side of the ppp unit locked.
1996	 */
1997	list_for_each_entry(ch, &ppp->channels, clist) {
1998		if (seq_before(ch->lastseq, seq))
1999			seq = ch->lastseq;
2000	}
2001	if (seq_before(ppp->minseq, seq))
2002		ppp->minseq = seq;
2003
2004	/* Put the fragment on the reconstruction queue */
2005	ppp_mp_insert(ppp, skb);
2006
2007	/* If the queue is getting long, don't wait any longer for packets
2008	   before the start of the queue. */
2009	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
2010		struct sk_buff *mskb = skb_peek(&ppp->mrq);
2011		if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
2012			ppp->minseq = PPP_MP_CB(mskb)->sequence;
2013	}
2014
2015	/* Pull completed packets off the queue and receive them. */
2016	while ((skb = ppp_mp_reconstruct(ppp))) {
2017		if (pskb_may_pull(skb, 2))
2018			ppp_receive_nonmp_frame(ppp, skb);
2019		else {
2020			++ppp->dev->stats.rx_length_errors;
2021			kfree_skb(skb);
2022			ppp_receive_error(ppp);
2023		}
2024	}
2025
2026	return;
2027
2028 err:
2029	kfree_skb(skb);
2030	ppp_receive_error(ppp);
2031}
2032
2033/*
2034 * Insert a fragment on the MP reconstruction queue.
2035 * The queue is ordered by increasing sequence number.
2036 */
2037static void
2038ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
2039{
2040	struct sk_buff *p;
2041	struct sk_buff_head *list = &ppp->mrq;
2042	u32 seq = PPP_MP_CB(skb)->sequence;
2043
2044	/* N.B. we don't need to lock the list lock because we have the
2045	   ppp unit receive-side lock. */
2046	skb_queue_walk(list, p) {
2047		if (seq_before(seq, PPP_MP_CB(p)->sequence))
2048			break;
2049	}
2050	__skb_queue_before(list, p, skb);
2051}
2052
2053/*
2054 * Reconstruct a packet from the MP fragment queue.
2055 * We go through increasing sequence numbers until we find a
2056 * complete packet, or we get to the sequence number for a fragment
2057 * which hasn't arrived but might still do so.
2058 */
2059static struct sk_buff *
2060ppp_mp_reconstruct(struct ppp *ppp)
2061{
2062	u32 seq = ppp->nextseq;
2063	u32 minseq = ppp->minseq;
2064	struct sk_buff_head *list = &ppp->mrq;
2065	struct sk_buff *p, *tmp;
2066	struct sk_buff *head, *tail;
2067	struct sk_buff *skb = NULL;
2068	int lost = 0, len = 0;
2069
2070	if (ppp->mrru == 0)	/* do nothing until mrru is set */
2071		return NULL;
2072	head = list->next;
2073	tail = NULL;
2074	skb_queue_walk_safe(list, p, tmp) {
2075	again:
2076		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2077			/* this can't happen, anyway ignore the skb */
2078			netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2079				   "seq %u < %u\n",
2080				   PPP_MP_CB(p)->sequence, seq);
2081			__skb_unlink(p, list);
2082			kfree_skb(p);
2083			continue;
2084		}
2085		if (PPP_MP_CB(p)->sequence != seq) {
2086			u32 oldseq;
2087			/* Fragment `seq' is missing.  If it is after
2088			   minseq, it might arrive later, so stop here. */
2089			if (seq_after(seq, minseq))
2090				break;
2091			/* Fragment `seq' is lost, keep going. */
2092			lost = 1;
2093			oldseq = seq;
2094			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2095				minseq + 1: PPP_MP_CB(p)->sequence;
2096
2097			if (ppp->debug & 1)
2098				netdev_printk(KERN_DEBUG, ppp->dev,
2099					      "lost frag %u..%u\n",
2100					      oldseq, seq-1);
2101
2102			goto again;
2103		}
2104
2105		/*
2106		 * At this point we know that all the fragments from
2107		 * ppp->nextseq to seq are either present or lost.
2108		 * Also, there are no complete packets in the queue
2109		 * that have no missing fragments and end before this
2110		 * fragment.
2111		 */
2112
2113		/* B bit set indicates this fragment starts a packet */
2114		if (PPP_MP_CB(p)->BEbits & B) {
2115			head = p;
2116			lost = 0;
2117			len = 0;
2118		}
2119
2120		len += p->len;
2121
2122		/* Got a complete packet yet? */
2123		if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2124		    (PPP_MP_CB(head)->BEbits & B)) {
2125			if (len > ppp->mrru + 2) {
2126				++ppp->dev->stats.rx_length_errors;
2127				netdev_printk(KERN_DEBUG, ppp->dev,
2128					      "PPP: reconstructed packet"
2129					      " is too long (%d)\n", len);
2130			} else {
2131				tail = p;
2132				break;
2133			}
2134			ppp->nextseq = seq + 1;
2135		}
2136
2137		/*
2138		 * If this is the ending fragment of a packet,
2139		 * and we haven't found a complete valid packet yet,
2140		 * we can discard up to and including this fragment.
2141		 */
2142		if (PPP_MP_CB(p)->BEbits & E) {
2143			struct sk_buff *tmp2;
2144
2145			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2146				if (ppp->debug & 1)
2147					netdev_printk(KERN_DEBUG, ppp->dev,
2148						      "discarding frag %u\n",
2149						      PPP_MP_CB(p)->sequence);
2150				__skb_unlink(p, list);
2151				kfree_skb(p);
2152			}
2153			head = skb_peek(list);
2154			if (!head)
2155				break;
2156		}
2157		++seq;
2158	}
2159
2160	/* If we have a complete packet, copy it all into one skb. */
2161	if (tail != NULL) {
2162		/* If we have discarded any fragments,
2163		   signal a receive error. */
2164		if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2165			skb_queue_walk_safe(list, p, tmp) {
2166				if (p == head)
2167					break;
2168				if (ppp->debug & 1)
2169					netdev_printk(KERN_DEBUG, ppp->dev,
2170						      "discarding frag %u\n",
2171						      PPP_MP_CB(p)->sequence);
2172				__skb_unlink(p, list);
2173				kfree_skb(p);
2174			}
2175
2176			if (ppp->debug & 1)
2177				netdev_printk(KERN_DEBUG, ppp->dev,
2178					      "  missed pkts %u..%u\n",
2179					      ppp->nextseq,
2180					      PPP_MP_CB(head)->sequence-1);
2181			++ppp->dev->stats.rx_dropped;
2182			ppp_receive_error(ppp);
2183		}
2184
2185		skb = head;
2186		if (head != tail) {
2187			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2188			p = skb_queue_next(list, head);
2189			__skb_unlink(skb, list);
2190			skb_queue_walk_from_safe(list, p, tmp) {
2191				__skb_unlink(p, list);
2192				*fragpp = p;
2193				p->next = NULL;
2194				fragpp = &p->next;
2195
2196				skb->len += p->len;
2197				skb->data_len += p->len;
2198				skb->truesize += p->truesize;
2199
2200				if (p == tail)
2201					break;
2202			}
2203		} else {
2204			__skb_unlink(skb, list);
2205		}
2206
2207		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2208	}
2209
2210	return skb;
2211}
2212#endif /* CONFIG_PPP_MULTILINK */
2213
2214/*
2215 * Channel interface.
2216 */
2217
2218/* Create a new, unattached ppp channel. */
2219int ppp_register_channel(struct ppp_channel *chan)
2220{
2221	return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2222}
2223
2224/* Create a new, unattached ppp channel for specified net. */
2225int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2226{
2227	struct channel *pch;
2228	struct ppp_net *pn;
2229
2230	pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2231	if (!pch)
2232		return -ENOMEM;
2233
2234	pn = ppp_pernet(net);
2235
2236	pch->ppp = NULL;
2237	pch->chan = chan;
2238	pch->chan_net = net;
2239	chan->ppp = pch;
2240	init_ppp_file(&pch->file, CHANNEL);
2241	pch->file.hdrlen = chan->hdrlen;
2242#ifdef CONFIG_PPP_MULTILINK
2243	pch->lastseq = -1;
2244#endif /* CONFIG_PPP_MULTILINK */
2245	init_rwsem(&pch->chan_sem);
2246	spin_lock_init(&pch->downl);
2247	rwlock_init(&pch->upl);
2248
2249	spin_lock_bh(&pn->all_channels_lock);
2250	pch->file.index = ++pn->last_channel_index;
2251	list_add(&pch->list, &pn->new_channels);
2252	atomic_inc(&channel_count);
2253	spin_unlock_bh(&pn->all_channels_lock);
2254
2255	return 0;
2256}
2257
2258/*
2259 * Return the index of a channel.
2260 */
2261int ppp_channel_index(struct ppp_channel *chan)
2262{
2263	struct channel *pch = chan->ppp;
2264
2265	if (pch)
2266		return pch->file.index;
2267	return -1;
2268}
2269
2270/*
2271 * Return the PPP unit number to which a channel is connected.
2272 */
2273int ppp_unit_number(struct ppp_channel *chan)
2274{
2275	struct channel *pch = chan->ppp;
2276	int unit = -1;
2277
2278	if (pch) {
2279		read_lock_bh(&pch->upl);
2280		if (pch->ppp)
2281			unit = pch->ppp->file.index;
2282		read_unlock_bh(&pch->upl);
2283	}
2284	return unit;
2285}
2286
2287/*
2288 * Return the PPP device interface name of a channel.
2289 */
2290char *ppp_dev_name(struct ppp_channel *chan)
2291{
2292	struct channel *pch = chan->ppp;
2293	char *name = NULL;
2294
2295	if (pch) {
2296		read_lock_bh(&pch->upl);
2297		if (pch->ppp && pch->ppp->dev)
2298			name = pch->ppp->dev->name;
2299		read_unlock_bh(&pch->upl);
2300	}
2301	return name;
2302}
2303
2304
2305/*
2306 * Disconnect a channel from the generic layer.
2307 * This must be called in process context.
2308 */
2309void
2310ppp_unregister_channel(struct ppp_channel *chan)
2311{
2312	struct channel *pch = chan->ppp;
2313	struct ppp_net *pn;
2314
2315	if (!pch)
2316		return;		/* should never happen */
2317
2318	chan->ppp = NULL;
2319
2320	/*
2321	 * This ensures that we have returned from any calls into the
2322	 * the channel's start_xmit or ioctl routine before we proceed.
2323	 */
2324	down_write(&pch->chan_sem);
2325	spin_lock_bh(&pch->downl);
2326	pch->chan = NULL;
2327	spin_unlock_bh(&pch->downl);
2328	up_write(&pch->chan_sem);
2329	ppp_disconnect_channel(pch);
2330
2331	pn = ppp_pernet(pch->chan_net);
2332	spin_lock_bh(&pn->all_channels_lock);
2333	list_del(&pch->list);
2334	spin_unlock_bh(&pn->all_channels_lock);
2335
2336	pch->file.dead = 1;
2337	wake_up_interruptible(&pch->file.rwait);
2338	if (atomic_dec_and_test(&pch->file.refcnt))
2339		ppp_destroy_channel(pch);
2340}
2341
2342/*
2343 * Callback from a channel when it can accept more to transmit.
2344 * This should be called at BH/softirq level, not interrupt level.
2345 */
2346void
2347ppp_output_wakeup(struct ppp_channel *chan)
2348{
2349	struct channel *pch = chan->ppp;
2350
2351	if (!pch)
2352		return;
2353	ppp_channel_push(pch);
2354}
2355
2356/*
2357 * Compression control.
2358 */
2359
2360/* Process the PPPIOCSCOMPRESS ioctl. */
2361static int
2362ppp_set_compress(struct ppp *ppp, unsigned long arg)
2363{
2364	int err;
2365	struct compressor *cp, *ocomp;
2366	struct ppp_option_data data;
2367	void *state, *ostate;
2368	unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
2369
2370	err = -EFAULT;
2371	if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
2372	    (data.length <= CCP_MAX_OPTION_LENGTH &&
2373	     copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
2374		goto out;
2375	err = -EINVAL;
2376	if (data.length > CCP_MAX_OPTION_LENGTH ||
2377	    ccp_option[1] < 2 || ccp_option[1] > data.length)
2378		goto out;
2379
2380	cp = try_then_request_module(
2381		find_compressor(ccp_option[0]),
2382		"ppp-compress-%d", ccp_option[0]);
2383	if (!cp)
2384		goto out;
2385
2386	err = -ENOBUFS;
2387	if (data.transmit) {
2388		state = cp->comp_alloc(ccp_option, data.length);
2389		if (state) {
2390			ppp_xmit_lock(ppp);
2391			ppp->xstate &= ~SC_COMP_RUN;
2392			ocomp = ppp->xcomp;
2393			ostate = ppp->xc_state;
2394			ppp->xcomp = cp;
2395			ppp->xc_state = state;
2396			ppp_xmit_unlock(ppp);
2397			if (ostate) {
2398				ocomp->comp_free(ostate);
2399				module_put(ocomp->owner);
2400			}
2401			err = 0;
2402		} else
2403			module_put(cp->owner);
2404
2405	} else {
2406		state = cp->decomp_alloc(ccp_option, data.length);
2407		if (state) {
2408			ppp_recv_lock(ppp);
2409			ppp->rstate &= ~SC_DECOMP_RUN;
2410			ocomp = ppp->rcomp;
2411			ostate = ppp->rc_state;
2412			ppp->rcomp = cp;
2413			ppp->rc_state = state;
2414			ppp_recv_unlock(ppp);
2415			if (ostate) {
2416				ocomp->decomp_free(ostate);
2417				module_put(ocomp->owner);
2418			}
2419			err = 0;
2420		} else
2421			module_put(cp->owner);
2422	}
2423
2424 out:
2425	return err;
2426}
2427
2428/*
2429 * Look at a CCP packet and update our state accordingly.
2430 * We assume the caller has the xmit or recv path locked.
2431 */
2432static void
2433ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
2434{
2435	unsigned char *dp;
2436	int len;
2437
2438	if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
2439		return;	/* no header */
2440	dp = skb->data + 2;
2441
2442	switch (CCP_CODE(dp)) {
2443	case CCP_CONFREQ:
2444
2445		/* A ConfReq starts negotiation of compression
2446		 * in one direction of transmission,
2447		 * and hence brings it down...but which way?
2448		 *
2449		 * Remember:
2450		 * A ConfReq indicates what the sender would like to receive
2451		 */
2452		if(inbound)
2453			/* He is proposing what I should send */
2454			ppp->xstate &= ~SC_COMP_RUN;
2455		else
2456			/* I am proposing to what he should send */
2457			ppp->rstate &= ~SC_DECOMP_RUN;
2458
2459		break;
2460
2461	case CCP_TERMREQ:
2462	case CCP_TERMACK:
2463		/*
2464		 * CCP is going down, both directions of transmission
2465		 */
2466		ppp->rstate &= ~SC_DECOMP_RUN;
2467		ppp->xstate &= ~SC_COMP_RUN;
2468		break;
2469
2470	case CCP_CONFACK:
2471		if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
2472			break;
2473		len = CCP_LENGTH(dp);
2474		if (!pskb_may_pull(skb, len + 2))
2475			return;		/* too short */
2476		dp += CCP_HDRLEN;
2477		len -= CCP_HDRLEN;
2478		if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
2479			break;
2480		if (inbound) {
2481			/* we will start receiving compressed packets */
2482			if (!ppp->rc_state)
2483				break;
2484			if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
2485					ppp->file.index, 0, ppp->mru, ppp->debug)) {
2486				ppp->rstate |= SC_DECOMP_RUN;
2487				ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
2488			}
2489		} else {
2490			/* we will soon start sending compressed packets */
2491			if (!ppp->xc_state)
2492				break;
2493			if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
2494					ppp->file.index, 0, ppp->debug))
2495				ppp->xstate |= SC_COMP_RUN;
2496		}
2497		break;
2498
2499	case CCP_RESETACK:
2500		/* reset the [de]compressor */
2501		if ((ppp->flags & SC_CCP_UP) == 0)
2502			break;
2503		if (inbound) {
2504			if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
2505				ppp->rcomp->decomp_reset(ppp->rc_state);
2506				ppp->rstate &= ~SC_DC_ERROR;
2507			}
2508		} else {
2509			if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
2510				ppp->xcomp->comp_reset(ppp->xc_state);
2511		}
2512		break;
2513	}
2514}
2515
2516/* Free up compression resources. */
2517static void
2518ppp_ccp_closed(struct ppp *ppp)
2519{
2520	void *xstate, *rstate;
2521	struct compressor *xcomp, *rcomp;
2522
2523	ppp_lock(ppp);
2524	ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
2525	ppp->xstate = 0;
2526	xcomp = ppp->xcomp;
2527	xstate = ppp->xc_state;
2528	ppp->xc_state = NULL;
2529	ppp->rstate = 0;
2530	rcomp = ppp->rcomp;
2531	rstate = ppp->rc_state;
2532	ppp->rc_state = NULL;
2533	ppp_unlock(ppp);
2534
2535	if (xstate) {
2536		xcomp->comp_free(xstate);
2537		module_put(xcomp->owner);
2538	}
2539	if (rstate) {
2540		rcomp->decomp_free(rstate);
2541		module_put(rcomp->owner);
2542	}
2543}
2544
2545/* List of compressors. */
2546static LIST_HEAD(compressor_list);
2547static DEFINE_SPINLOCK(compressor_list_lock);
2548
2549struct compressor_entry {
2550	struct list_head list;
2551	struct compressor *comp;
2552};
2553
2554static struct compressor_entry *
2555find_comp_entry(int proto)
2556{
2557	struct compressor_entry *ce;
2558
2559	list_for_each_entry(ce, &compressor_list, list) {
2560		if (ce->comp->compress_proto == proto)
2561			return ce;
2562	}
2563	return NULL;
2564}
2565
2566/* Register a compressor */
2567int
2568ppp_register_compressor(struct compressor *cp)
2569{
2570	struct compressor_entry *ce;
2571	int ret;
2572	spin_lock(&compressor_list_lock);
2573	ret = -EEXIST;
2574	if (find_comp_entry(cp->compress_proto))
2575		goto out;
2576	ret = -ENOMEM;
2577	ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
2578	if (!ce)
2579		goto out;
2580	ret = 0;
2581	ce->comp = cp;
2582	list_add(&ce->list, &compressor_list);
2583 out:
2584	spin_unlock(&compressor_list_lock);
2585	return ret;
2586}
2587
2588/* Unregister a compressor */
2589void
2590ppp_unregister_compressor(struct compressor *cp)
2591{
2592	struct compressor_entry *ce;
2593
2594	spin_lock(&compressor_list_lock);
2595	ce = find_comp_entry(cp->compress_proto);
2596	if (ce && ce->comp == cp) {
2597		list_del(&ce->list);
2598		kfree(ce);
2599	}
2600	spin_unlock(&compressor_list_lock);
2601}
2602
2603/* Find a compressor. */
2604static struct compressor *
2605find_compressor(int type)
2606{
2607	struct compressor_entry *ce;
2608	struct compressor *cp = NULL;
2609
2610	spin_lock(&compressor_list_lock);
2611	ce = find_comp_entry(type);
2612	if (ce) {
2613		cp = ce->comp;
2614		if (!try_module_get(cp->owner))
2615			cp = NULL;
2616	}
2617	spin_unlock(&compressor_list_lock);
2618	return cp;
2619}
2620
2621/*
2622 * Miscelleneous stuff.
2623 */
2624
2625static void
2626ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2627{
2628	struct slcompress *vj = ppp->vj;
2629
2630	memset(st, 0, sizeof(*st));
2631	st->p.ppp_ipackets = ppp->stats64.rx_packets;
2632	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
2633	st->p.ppp_ibytes = ppp->stats64.rx_bytes;
2634	st->p.ppp_opackets = ppp->stats64.tx_packets;
2635	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
2636	st->p.ppp_obytes = ppp->stats64.tx_bytes;
2637	if (!vj)
2638		return;
2639	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
2640	st->vj.vjs_compressed = vj->sls_o_compressed;
2641	st->vj.vjs_searches = vj->sls_o_searches;
2642	st->vj.vjs_misses = vj->sls_o_misses;
2643	st->vj.vjs_errorin = vj->sls_i_error;
2644	st->vj.vjs_tossed = vj->sls_i_tossed;
2645	st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
2646	st->vj.vjs_compressedin = vj->sls_i_compressed;
2647}
2648
2649/*
2650 * Stuff for handling the lists of ppp units and channels
2651 * and for initialization.
2652 */
2653
2654/*
2655 * Create a new ppp interface unit.  Fails if it can't allocate memory
2656 * or if there is already a unit with the requested number.
2657 * unit == -1 means allocate a new number.
2658 */
2659static struct ppp *
2660ppp_create_interface(struct net *net, int unit, int *retp)
2661{
2662	struct ppp *ppp;
2663	struct ppp_net *pn;
2664	struct net_device *dev = NULL;
2665	int ret = -ENOMEM;
2666	int i;
2667
2668	dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
2669	if (!dev)
2670		goto out1;
2671
2672	pn = ppp_pernet(net);
2673
2674	ppp = netdev_priv(dev);
2675	ppp->dev = dev;
2676	ppp->mru = PPP_MRU;
2677	init_ppp_file(&ppp->file, INTERFACE);
2678	ppp->file.hdrlen = PPP_HDRLEN - 2;	/* don't count proto bytes */
2679	for (i = 0; i < NUM_NP; ++i)
2680		ppp->npmode[i] = NPMODE_PASS;
2681	INIT_LIST_HEAD(&ppp->channels);
2682	spin_lock_init(&ppp->rlock);
2683	spin_lock_init(&ppp->wlock);
2684#ifdef CONFIG_PPP_MULTILINK
2685	ppp->minseq = -1;
2686	skb_queue_head_init(&ppp->mrq);
2687#endif /* CONFIG_PPP_MULTILINK */
2688#ifdef CONFIG_PPP_FILTER
2689	ppp->pass_filter = NULL;
2690	ppp->active_filter = NULL;
2691#endif /* CONFIG_PPP_FILTER */
2692
2693	/*
2694	 * drum roll: don't forget to set
2695	 * the net device is belong to
2696	 */
2697	dev_net_set(dev, net);
2698
2699	mutex_lock(&pn->all_ppp_mutex);
2700
2701	if (unit < 0) {
2702		unit = unit_get(&pn->units_idr, ppp);
2703		if (unit < 0) {
2704			ret = unit;
2705			goto out2;
2706		}
2707	} else {
2708		ret = -EEXIST;
2709		if (unit_find(&pn->units_idr, unit))
2710			goto out2; /* unit already exists */
2711		/*
2712		 * if caller need a specified unit number
2713		 * lets try to satisfy him, otherwise --
2714		 * he should better ask us for new unit number
2715		 *
2716		 * NOTE: yes I know that returning EEXIST it's not
2717		 * fair but at least pppd will ask us to allocate
2718		 * new unit in this case so user is happy :)
2719		 */
2720		unit = unit_set(&pn->units_idr, ppp, unit);
2721		if (unit < 0)
2722			goto out2;
2723	}
2724
2725	/* Initialize the new ppp unit */
2726	ppp->file.index = unit;
2727	sprintf(dev->name, "ppp%d", unit);
2728
2729	ret = register_netdev(dev);
2730	if (ret != 0) {
2731		unit_put(&pn->units_idr, unit);
2732		netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2733			   dev->name, ret);
2734		goto out2;
2735	}
2736
2737	ppp->ppp_net = net;
2738
2739	atomic_inc(&ppp_unit_count);
2740	mutex_unlock(&pn->all_ppp_mutex);
2741
2742	*retp = 0;
2743	return ppp;
2744
2745out2:
2746	mutex_unlock(&pn->all_ppp_mutex);
2747	free_netdev(dev);
2748out1:
2749	*retp = ret;
2750	return NULL;
2751}
2752
2753/*
2754 * Initialize a ppp_file structure.
2755 */
2756static void
2757init_ppp_file(struct ppp_file *pf, int kind)
2758{
2759	pf->kind = kind;
2760	skb_queue_head_init(&pf->xq);
2761	skb_queue_head_init(&pf->rq);
2762	atomic_set(&pf->refcnt, 1);
2763	init_waitqueue_head(&pf->rwait);
2764}
2765
2766/*
2767 * Take down a ppp interface unit - called when the owning file
2768 * (the one that created the unit) is closed or detached.
2769 */
2770static void ppp_shutdown_interface(struct ppp *ppp)
2771{
2772	struct ppp_net *pn;
2773
2774	pn = ppp_pernet(ppp->ppp_net);
2775	mutex_lock(&pn->all_ppp_mutex);
2776
2777	/* This will call dev_close() for us. */
2778	ppp_lock(ppp);
2779	if (!ppp->closing) {
2780		ppp->closing = 1;
2781		ppp_unlock(ppp);
2782		unregister_netdev(ppp->dev);
2783		unit_put(&pn->units_idr, ppp->file.index);
2784	} else
2785		ppp_unlock(ppp);
2786
2787	ppp->file.dead = 1;
2788	ppp->owner = NULL;
2789	wake_up_interruptible(&ppp->file.rwait);
2790
2791	mutex_unlock(&pn->all_ppp_mutex);
2792}
2793
2794/*
2795 * Free the memory used by a ppp unit.  This is only called once
2796 * there are no channels connected to the unit and no file structs
2797 * that reference the unit.
2798 */
2799static void ppp_destroy_interface(struct ppp *ppp)
2800{
2801	atomic_dec(&ppp_unit_count);
2802
2803	if (!ppp->file.dead || ppp->n_channels) {
2804		/* "can't happen" */
2805		netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2806			   "but dead=%d n_channels=%d !\n",
2807			   ppp, ppp->file.dead, ppp->n_channels);
2808		return;
2809	}
2810
2811	ppp_ccp_closed(ppp);
2812	if (ppp->vj) {
2813		slhc_free(ppp->vj);
2814		ppp->vj = NULL;
2815	}
2816	skb_queue_purge(&ppp->file.xq);
2817	skb_queue_purge(&ppp->file.rq);
2818#ifdef CONFIG_PPP_MULTILINK
2819	skb_queue_purge(&ppp->mrq);
2820#endif /* CONFIG_PPP_MULTILINK */
2821#ifdef CONFIG_PPP_FILTER
2822	if (ppp->pass_filter) {
2823		sk_unattached_filter_destroy(ppp->pass_filter);
2824		ppp->pass_filter = NULL;
2825	}
2826
2827	if (ppp->active_filter) {
2828		sk_unattached_filter_destroy(ppp->active_filter);
2829		ppp->active_filter = NULL;
2830	}
2831#endif /* CONFIG_PPP_FILTER */
2832
2833	kfree_skb(ppp->xmit_pending);
2834
2835	free_netdev(ppp->dev);
2836}
2837
2838/*
2839 * Locate an existing ppp unit.
2840 * The caller should have locked the all_ppp_mutex.
2841 */
2842static struct ppp *
2843ppp_find_unit(struct ppp_net *pn, int unit)
2844{
2845	return unit_find(&pn->units_idr, unit);
2846}
2847
2848/*
2849 * Locate an existing ppp channel.
2850 * The caller should have locked the all_channels_lock.
2851 * First we look in the new_channels list, then in the
2852 * all_channels list.  If found in the new_channels list,
2853 * we move it to the all_channels list.  This is for speed
2854 * when we have a lot of channels in use.
2855 */
2856static struct channel *
2857ppp_find_channel(struct ppp_net *pn, int unit)
2858{
2859	struct channel *pch;
2860
2861	list_for_each_entry(pch, &pn->new_channels, list) {
2862		if (pch->file.index == unit) {
2863			list_move(&pch->list, &pn->all_channels);
2864			return pch;
2865		}
2866	}
2867
2868	list_for_each_entry(pch, &pn->all_channels, list) {
2869		if (pch->file.index == unit)
2870			return pch;
2871	}
2872
2873	return NULL;
2874}
2875
2876/*
2877 * Connect a PPP channel to a PPP interface unit.
2878 */
2879static int
2880ppp_connect_channel(struct channel *pch, int unit)
2881{
2882	struct ppp *ppp;
2883	struct ppp_net *pn;
2884	int ret = -ENXIO;
2885	int hdrlen;
2886
2887	pn = ppp_pernet(pch->chan_net);
2888
2889	mutex_lock(&pn->all_ppp_mutex);
2890	ppp = ppp_find_unit(pn, unit);
2891	if (!ppp)
2892		goto out;
2893	write_lock_bh(&pch->upl);
2894	ret = -EINVAL;
2895	if (pch->ppp)
2896		goto outl;
2897
2898	ppp_lock(ppp);
2899	if (pch->file.hdrlen > ppp->file.hdrlen)
2900		ppp->file.hdrlen = pch->file.hdrlen;
2901	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
2902	if (hdrlen > ppp->dev->hard_header_len)
2903		ppp->dev->hard_header_len = hdrlen;
2904	list_add_tail(&pch->clist, &ppp->channels);
2905	++ppp->n_channels;
2906	pch->ppp = ppp;
2907	atomic_inc(&ppp->file.refcnt);
2908	ppp_unlock(ppp);
2909	ret = 0;
2910
2911 outl:
2912	write_unlock_bh(&pch->upl);
2913 out:
2914	mutex_unlock(&pn->all_ppp_mutex);
2915	return ret;
2916}
2917
2918/*
2919 * Disconnect a channel from its ppp unit.
2920 */
2921static int
2922ppp_disconnect_channel(struct channel *pch)
2923{
2924	struct ppp *ppp;
2925	int err = -EINVAL;
2926
2927	write_lock_bh(&pch->upl);
2928	ppp = pch->ppp;
2929	pch->ppp = NULL;
2930	write_unlock_bh(&pch->upl);
2931	if (ppp) {
2932		/* remove it from the ppp unit's list */
2933		ppp_lock(ppp);
2934		list_del(&pch->clist);
2935		if (--ppp->n_channels == 0)
2936			wake_up_interruptible(&ppp->file.rwait);
2937		ppp_unlock(ppp);
2938		if (atomic_dec_and_test(&ppp->file.refcnt))
2939			ppp_destroy_interface(ppp);
2940		err = 0;
2941	}
2942	return err;
2943}
2944
2945/*
2946 * Free up the resources used by a ppp channel.
2947 */
2948static void ppp_destroy_channel(struct channel *pch)
2949{
2950	atomic_dec(&channel_count);
2951
2952	if (!pch->file.dead) {
2953		/* "can't happen" */
2954		pr_err("ppp: destroying undead channel %p !\n", pch);
2955		return;
2956	}
2957	skb_queue_purge(&pch->file.xq);
2958	skb_queue_purge(&pch->file.rq);
2959	kfree(pch);
2960}
2961
2962static void __exit ppp_cleanup(void)
2963{
2964	/* should never happen */
2965	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2966		pr_err("PPP: removing module but units remain!\n");
2967	unregister_chrdev(PPP_MAJOR, "ppp");
2968	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2969	class_destroy(ppp_class);
2970	unregister_pernet_device(&ppp_net_ops);
2971}
2972
2973/*
2974 * Units handling. Caller must protect concurrent access
2975 * by holding all_ppp_mutex
2976 */
2977
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2978/* associate pointer with specified number */
2979static int unit_set(struct idr *p, void *ptr, int n)
2980{
2981	int unit;
2982
2983	unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
2984	if (unit == -ENOSPC)
2985		unit = -EINVAL;
 
 
 
 
 
2986	return unit;
2987}
2988
2989/* get new free unit number and associate pointer with it */
2990static int unit_get(struct idr *p, void *ptr)
2991{
2992	return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
2993}
2994
2995/* put unit number back to a pool */
2996static void unit_put(struct idr *p, int n)
2997{
2998	idr_remove(p, n);
2999}
3000
3001/* get pointer associated with the number */
3002static void *unit_find(struct idr *p, int n)
3003{
3004	return idr_find(p, n);
3005}
3006
3007/* Module/initialization stuff */
3008
3009module_init(ppp_init);
3010module_exit(ppp_cleanup);
3011
3012EXPORT_SYMBOL(ppp_register_net_channel);
3013EXPORT_SYMBOL(ppp_register_channel);
3014EXPORT_SYMBOL(ppp_unregister_channel);
3015EXPORT_SYMBOL(ppp_channel_index);
3016EXPORT_SYMBOL(ppp_unit_number);
3017EXPORT_SYMBOL(ppp_dev_name);
3018EXPORT_SYMBOL(ppp_input);
3019EXPORT_SYMBOL(ppp_input_error);
3020EXPORT_SYMBOL(ppp_output_wakeup);
3021EXPORT_SYMBOL(ppp_register_compressor);
3022EXPORT_SYMBOL(ppp_unregister_compressor);
3023MODULE_LICENSE("GPL");
3024MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
3025MODULE_ALIAS("devname:ppp");
v3.5.6
   1/*
   2 * Generic PPP layer for Linux.
   3 *
   4 * Copyright 1999-2002 Paul Mackerras.
   5 *
   6 *  This program is free software; you can redistribute it and/or
   7 *  modify it under the terms of the GNU General Public License
   8 *  as published by the Free Software Foundation; either version
   9 *  2 of the License, or (at your option) any later version.
  10 *
  11 * The generic PPP layer handles the PPP network interfaces, the
  12 * /dev/ppp device, packet and VJ compression, and multilink.
  13 * It talks to PPP `channels' via the interface defined in
  14 * include/linux/ppp_channel.h.  Channels provide the basic means for
  15 * sending and receiving PPP frames on some kind of communications
  16 * channel.
  17 *
  18 * Part of the code in this driver was inspired by the old async-only
  19 * PPP driver, written by Michael Callahan and Al Longyear, and
  20 * subsequently hacked by Paul Mackerras.
  21 *
  22 * ==FILEVERSION 20041108==
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/kernel.h>
  27#include <linux/kmod.h>
  28#include <linux/init.h>
  29#include <linux/list.h>
  30#include <linux/idr.h>
  31#include <linux/netdevice.h>
  32#include <linux/poll.h>
  33#include <linux/ppp_defs.h>
  34#include <linux/filter.h>
  35#include <linux/ppp-ioctl.h>
  36#include <linux/ppp_channel.h>
  37#include <linux/ppp-comp.h>
  38#include <linux/skbuff.h>
  39#include <linux/rtnetlink.h>
  40#include <linux/if_arp.h>
  41#include <linux/ip.h>
  42#include <linux/tcp.h>
  43#include <linux/spinlock.h>
  44#include <linux/rwsem.h>
  45#include <linux/stddef.h>
  46#include <linux/device.h>
  47#include <linux/mutex.h>
  48#include <linux/slab.h>
  49#include <asm/unaligned.h>
  50#include <net/slhc_vj.h>
  51#include <linux/atomic.h>
  52
  53#include <linux/nsproxy.h>
  54#include <net/net_namespace.h>
  55#include <net/netns/generic.h>
  56
  57#define PPP_VERSION	"2.4.2"
  58
  59/*
  60 * Network protocols we support.
  61 */
  62#define NP_IP	0		/* Internet Protocol V4 */
  63#define NP_IPV6	1		/* Internet Protocol V6 */
  64#define NP_IPX	2		/* IPX protocol */
  65#define NP_AT	3		/* Appletalk protocol */
  66#define NP_MPLS_UC 4		/* MPLS unicast */
  67#define NP_MPLS_MC 5		/* MPLS multicast */
  68#define NUM_NP	6		/* Number of NPs. */
  69
  70#define MPHDRLEN	6	/* multilink protocol header length */
  71#define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
  72
  73/*
  74 * An instance of /dev/ppp can be associated with either a ppp
  75 * interface unit or a ppp channel.  In both cases, file->private_data
  76 * points to one of these.
  77 */
  78struct ppp_file {
  79	enum {
  80		INTERFACE=1, CHANNEL
  81	}		kind;
  82	struct sk_buff_head xq;		/* pppd transmit queue */
  83	struct sk_buff_head rq;		/* receive queue for pppd */
  84	wait_queue_head_t rwait;	/* for poll on reading /dev/ppp */
  85	atomic_t	refcnt;		/* # refs (incl /dev/ppp attached) */
  86	int		hdrlen;		/* space to leave for headers */
  87	int		index;		/* interface unit / channel number */
  88	int		dead;		/* unit/channel has been shut down */
  89};
  90
  91#define PF_TO_X(pf, X)		container_of(pf, X, file)
  92
  93#define PF_TO_PPP(pf)		PF_TO_X(pf, struct ppp)
  94#define PF_TO_CHANNEL(pf)	PF_TO_X(pf, struct channel)
  95
  96/*
 
 
 
 
 
 
 
 
 
 
 
 
  97 * Data structure describing one ppp unit.
  98 * A ppp unit corresponds to a ppp network interface device
  99 * and represents a multilink bundle.
 100 * It can have 0 or more ppp channels connected to it.
 101 */
 102struct ppp {
 103	struct ppp_file	file;		/* stuff for read/write/poll 0 */
 104	struct file	*owner;		/* file that owns this unit 48 */
 105	struct list_head channels;	/* list of attached channels 4c */
 106	int		n_channels;	/* how many channels are attached 54 */
 107	spinlock_t	rlock;		/* lock for receive side 58 */
 108	spinlock_t	wlock;		/* lock for transmit side 5c */
 109	int		mru;		/* max receive unit 60 */
 110	unsigned int	flags;		/* control bits 64 */
 111	unsigned int	xstate;		/* transmit state bits 68 */
 112	unsigned int	rstate;		/* receive state bits 6c */
 113	int		debug;		/* debug flags 70 */
 114	struct slcompress *vj;		/* state for VJ header compression */
 115	enum NPmode	npmode[NUM_NP];	/* what to do with each net proto 78 */
 116	struct sk_buff	*xmit_pending;	/* a packet ready to go out 88 */
 117	struct compressor *xcomp;	/* transmit packet compressor 8c */
 118	void		*xc_state;	/* its internal state 90 */
 119	struct compressor *rcomp;	/* receive decompressor 94 */
 120	void		*rc_state;	/* its internal state 98 */
 121	unsigned long	last_xmit;	/* jiffies when last pkt sent 9c */
 122	unsigned long	last_recv;	/* jiffies when last pkt rcvd a0 */
 123	struct net_device *dev;		/* network interface device a4 */
 124	int		closing;	/* is device closing down? a8 */
 125#ifdef CONFIG_PPP_MULTILINK
 126	int		nxchan;		/* next channel to send something on */
 127	u32		nxseq;		/* next sequence number to send */
 128	int		mrru;		/* MP: max reconst. receive unit */
 129	u32		nextseq;	/* MP: seq no of next packet */
 130	u32		minseq;		/* MP: min of most recent seqnos */
 131	struct sk_buff_head mrq;	/* MP: receive reconstruction queue */
 132#endif /* CONFIG_PPP_MULTILINK */
 133#ifdef CONFIG_PPP_FILTER
 134	struct sock_filter *pass_filter;	/* filter for packets to pass */
 135	struct sock_filter *active_filter;/* filter for pkts to reset idle */
 136	unsigned pass_len, active_len;
 137#endif /* CONFIG_PPP_FILTER */
 138	struct net	*ppp_net;	/* the net we belong to */
 
 139};
 140
 141/*
 142 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
 143 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
 144 * SC_MUST_COMP
 145 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
 146 * Bits in xstate: SC_COMP_RUN
 147 */
 148#define SC_FLAG_BITS	(SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
 149			 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
 150			 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
 151
 152/*
 153 * Private data structure for each channel.
 154 * This includes the data structure used for multilink.
 155 */
 156struct channel {
 157	struct ppp_file	file;		/* stuff for read/write/poll */
 158	struct list_head list;		/* link in all/new_channels list */
 159	struct ppp_channel *chan;	/* public channel data structure */
 160	struct rw_semaphore chan_sem;	/* protects `chan' during chan ioctl */
 161	spinlock_t	downl;		/* protects `chan', file.xq dequeue */
 162	struct ppp	*ppp;		/* ppp unit we're connected to */
 163	struct net	*chan_net;	/* the net channel belongs to */
 164	struct list_head clist;		/* link in list of channels per unit */
 165	rwlock_t	upl;		/* protects `ppp' */
 166#ifdef CONFIG_PPP_MULTILINK
 167	u8		avail;		/* flag used in multilink stuff */
 168	u8		had_frag;	/* >= 1 fragments have been sent */
 169	u32		lastseq;	/* MP: last sequence # received */
 170	int		speed;		/* speed of the corresponding ppp channel*/
 171#endif /* CONFIG_PPP_MULTILINK */
 172};
 173
 174/*
 175 * SMP locking issues:
 176 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
 177 * list and the ppp.n_channels field, you need to take both locks
 178 * before you modify them.
 179 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
 180 * channel.downl.
 181 */
 182
 183static DEFINE_MUTEX(ppp_mutex);
 184static atomic_t ppp_unit_count = ATOMIC_INIT(0);
 185static atomic_t channel_count = ATOMIC_INIT(0);
 186
 187/* per-net private data for this module */
 188static int ppp_net_id __read_mostly;
 189struct ppp_net {
 190	/* units to ppp mapping */
 191	struct idr units_idr;
 192
 193	/*
 194	 * all_ppp_mutex protects the units_idr mapping.
 195	 * It also ensures that finding a ppp unit in the units_idr
 196	 * map and updating its file.refcnt field is atomic.
 197	 */
 198	struct mutex all_ppp_mutex;
 199
 200	/* channels */
 201	struct list_head all_channels;
 202	struct list_head new_channels;
 203	int last_channel_index;
 204
 205	/*
 206	 * all_channels_lock protects all_channels and
 207	 * last_channel_index, and the atomicity of find
 208	 * a channel and updating its file.refcnt field.
 209	 */
 210	spinlock_t all_channels_lock;
 211};
 212
 213/* Get the PPP protocol number from a skb */
 214#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
 215
 216/* We limit the length of ppp->file.rq to this (arbitrary) value */
 217#define PPP_MAX_RQLEN	32
 218
 219/*
 220 * Maximum number of multilink fragments queued up.
 221 * This has to be large enough to cope with the maximum latency of
 222 * the slowest channel relative to the others.  Strictly it should
 223 * depend on the number of channels and their characteristics.
 224 */
 225#define PPP_MP_MAX_QLEN	128
 226
 227/* Multilink header bits. */
 228#define B	0x80		/* this fragment begins a packet */
 229#define E	0x40		/* this fragment ends a packet */
 230
 231/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
 232#define seq_before(a, b)	((s32)((a) - (b)) < 0)
 233#define seq_after(a, b)		((s32)((a) - (b)) > 0)
 234
 235/* Prototypes. */
 236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 237			struct file *file, unsigned int cmd, unsigned long arg);
 238static void ppp_xmit_process(struct ppp *ppp);
 239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
 240static void ppp_push(struct ppp *ppp);
 241static void ppp_channel_push(struct channel *pch);
 242static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
 243			      struct channel *pch);
 244static void ppp_receive_error(struct ppp *ppp);
 245static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
 246static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
 247					    struct sk_buff *skb);
 248#ifdef CONFIG_PPP_MULTILINK
 249static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
 250				struct channel *pch);
 251static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
 252static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
 253static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
 254#endif /* CONFIG_PPP_MULTILINK */
 255static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
 256static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 257static void ppp_ccp_closed(struct ppp *ppp);
 258static struct compressor *find_compressor(int type);
 259static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
 260static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
 261static void init_ppp_file(struct ppp_file *pf, int kind);
 262static void ppp_shutdown_interface(struct ppp *ppp);
 263static void ppp_destroy_interface(struct ppp *ppp);
 264static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 265static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
 266static int ppp_connect_channel(struct channel *pch, int unit);
 267static int ppp_disconnect_channel(struct channel *pch);
 268static void ppp_destroy_channel(struct channel *pch);
 269static int unit_get(struct idr *p, void *ptr);
 270static int unit_set(struct idr *p, void *ptr, int n);
 271static void unit_put(struct idr *p, int n);
 272static void *unit_find(struct idr *p, int n);
 273
 274static struct class *ppp_class;
 275
 276/* per net-namespace data */
 277static inline struct ppp_net *ppp_pernet(struct net *net)
 278{
 279	BUG_ON(!net);
 280
 281	return net_generic(net, ppp_net_id);
 282}
 283
 284/* Translates a PPP protocol number to a NP index (NP == network protocol) */
 285static inline int proto_to_npindex(int proto)
 286{
 287	switch (proto) {
 288	case PPP_IP:
 289		return NP_IP;
 290	case PPP_IPV6:
 291		return NP_IPV6;
 292	case PPP_IPX:
 293		return NP_IPX;
 294	case PPP_AT:
 295		return NP_AT;
 296	case PPP_MPLS_UC:
 297		return NP_MPLS_UC;
 298	case PPP_MPLS_MC:
 299		return NP_MPLS_MC;
 300	}
 301	return -EINVAL;
 302}
 303
 304/* Translates an NP index into a PPP protocol number */
 305static const int npindex_to_proto[NUM_NP] = {
 306	PPP_IP,
 307	PPP_IPV6,
 308	PPP_IPX,
 309	PPP_AT,
 310	PPP_MPLS_UC,
 311	PPP_MPLS_MC,
 312};
 313
 314/* Translates an ethertype into an NP index */
 315static inline int ethertype_to_npindex(int ethertype)
 316{
 317	switch (ethertype) {
 318	case ETH_P_IP:
 319		return NP_IP;
 320	case ETH_P_IPV6:
 321		return NP_IPV6;
 322	case ETH_P_IPX:
 323		return NP_IPX;
 324	case ETH_P_PPPTALK:
 325	case ETH_P_ATALK:
 326		return NP_AT;
 327	case ETH_P_MPLS_UC:
 328		return NP_MPLS_UC;
 329	case ETH_P_MPLS_MC:
 330		return NP_MPLS_MC;
 331	}
 332	return -1;
 333}
 334
 335/* Translates an NP index into an ethertype */
 336static const int npindex_to_ethertype[NUM_NP] = {
 337	ETH_P_IP,
 338	ETH_P_IPV6,
 339	ETH_P_IPX,
 340	ETH_P_PPPTALK,
 341	ETH_P_MPLS_UC,
 342	ETH_P_MPLS_MC,
 343};
 344
 345/*
 346 * Locking shorthand.
 347 */
 348#define ppp_xmit_lock(ppp)	spin_lock_bh(&(ppp)->wlock)
 349#define ppp_xmit_unlock(ppp)	spin_unlock_bh(&(ppp)->wlock)
 350#define ppp_recv_lock(ppp)	spin_lock_bh(&(ppp)->rlock)
 351#define ppp_recv_unlock(ppp)	spin_unlock_bh(&(ppp)->rlock)
 352#define ppp_lock(ppp)		do { ppp_xmit_lock(ppp); \
 353				     ppp_recv_lock(ppp); } while (0)
 354#define ppp_unlock(ppp)		do { ppp_recv_unlock(ppp); \
 355				     ppp_xmit_unlock(ppp); } while (0)
 356
 357/*
 358 * /dev/ppp device routines.
 359 * The /dev/ppp device is used by pppd to control the ppp unit.
 360 * It supports the read, write, ioctl and poll functions.
 361 * Open instances of /dev/ppp can be in one of three states:
 362 * unattached, attached to a ppp unit, or attached to a ppp channel.
 363 */
 364static int ppp_open(struct inode *inode, struct file *file)
 365{
 366	/*
 367	 * This could (should?) be enforced by the permissions on /dev/ppp.
 368	 */
 369	if (!capable(CAP_NET_ADMIN))
 370		return -EPERM;
 371	return 0;
 372}
 373
 374static int ppp_release(struct inode *unused, struct file *file)
 375{
 376	struct ppp_file *pf = file->private_data;
 377	struct ppp *ppp;
 378
 379	if (pf) {
 380		file->private_data = NULL;
 381		if (pf->kind == INTERFACE) {
 382			ppp = PF_TO_PPP(pf);
 383			if (file == ppp->owner)
 384				ppp_shutdown_interface(ppp);
 385		}
 386		if (atomic_dec_and_test(&pf->refcnt)) {
 387			switch (pf->kind) {
 388			case INTERFACE:
 389				ppp_destroy_interface(PF_TO_PPP(pf));
 390				break;
 391			case CHANNEL:
 392				ppp_destroy_channel(PF_TO_CHANNEL(pf));
 393				break;
 394			}
 395		}
 396	}
 397	return 0;
 398}
 399
 400static ssize_t ppp_read(struct file *file, char __user *buf,
 401			size_t count, loff_t *ppos)
 402{
 403	struct ppp_file *pf = file->private_data;
 404	DECLARE_WAITQUEUE(wait, current);
 405	ssize_t ret;
 406	struct sk_buff *skb = NULL;
 407	struct iovec iov;
 408
 409	ret = count;
 410
 411	if (!pf)
 412		return -ENXIO;
 413	add_wait_queue(&pf->rwait, &wait);
 414	for (;;) {
 415		set_current_state(TASK_INTERRUPTIBLE);
 416		skb = skb_dequeue(&pf->rq);
 417		if (skb)
 418			break;
 419		ret = 0;
 420		if (pf->dead)
 421			break;
 422		if (pf->kind == INTERFACE) {
 423			/*
 424			 * Return 0 (EOF) on an interface that has no
 425			 * channels connected, unless it is looping
 426			 * network traffic (demand mode).
 427			 */
 428			struct ppp *ppp = PF_TO_PPP(pf);
 429			if (ppp->n_channels == 0 &&
 430			    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 431				break;
 432		}
 433		ret = -EAGAIN;
 434		if (file->f_flags & O_NONBLOCK)
 435			break;
 436		ret = -ERESTARTSYS;
 437		if (signal_pending(current))
 438			break;
 439		schedule();
 440	}
 441	set_current_state(TASK_RUNNING);
 442	remove_wait_queue(&pf->rwait, &wait);
 443
 444	if (!skb)
 445		goto out;
 446
 447	ret = -EOVERFLOW;
 448	if (skb->len > count)
 449		goto outf;
 450	ret = -EFAULT;
 451	iov.iov_base = buf;
 452	iov.iov_len = count;
 453	if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
 454		goto outf;
 455	ret = skb->len;
 456
 457 outf:
 458	kfree_skb(skb);
 459 out:
 460	return ret;
 461}
 462
 463static ssize_t ppp_write(struct file *file, const char __user *buf,
 464			 size_t count, loff_t *ppos)
 465{
 466	struct ppp_file *pf = file->private_data;
 467	struct sk_buff *skb;
 468	ssize_t ret;
 469
 470	if (!pf)
 471		return -ENXIO;
 472	ret = -ENOMEM;
 473	skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
 474	if (!skb)
 475		goto out;
 476	skb_reserve(skb, pf->hdrlen);
 477	ret = -EFAULT;
 478	if (copy_from_user(skb_put(skb, count), buf, count)) {
 479		kfree_skb(skb);
 480		goto out;
 481	}
 482
 483	skb_queue_tail(&pf->xq, skb);
 484
 485	switch (pf->kind) {
 486	case INTERFACE:
 487		ppp_xmit_process(PF_TO_PPP(pf));
 488		break;
 489	case CHANNEL:
 490		ppp_channel_push(PF_TO_CHANNEL(pf));
 491		break;
 492	}
 493
 494	ret = count;
 495
 496 out:
 497	return ret;
 498}
 499
 500/* No kernel lock - fine */
 501static unsigned int ppp_poll(struct file *file, poll_table *wait)
 502{
 503	struct ppp_file *pf = file->private_data;
 504	unsigned int mask;
 505
 506	if (!pf)
 507		return 0;
 508	poll_wait(file, &pf->rwait, wait);
 509	mask = POLLOUT | POLLWRNORM;
 510	if (skb_peek(&pf->rq))
 511		mask |= POLLIN | POLLRDNORM;
 512	if (pf->dead)
 513		mask |= POLLHUP;
 514	else if (pf->kind == INTERFACE) {
 515		/* see comment in ppp_read */
 516		struct ppp *ppp = PF_TO_PPP(pf);
 517		if (ppp->n_channels == 0 &&
 518		    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
 519			mask |= POLLIN | POLLRDNORM;
 520	}
 521
 522	return mask;
 523}
 524
 525#ifdef CONFIG_PPP_FILTER
 526static int get_filter(void __user *arg, struct sock_filter **p)
 527{
 528	struct sock_fprog uprog;
 529	struct sock_filter *code = NULL;
 530	int len, err;
 531
 532	if (copy_from_user(&uprog, arg, sizeof(uprog)))
 533		return -EFAULT;
 534
 535	if (!uprog.len) {
 536		*p = NULL;
 537		return 0;
 538	}
 539
 540	len = uprog.len * sizeof(struct sock_filter);
 541	code = memdup_user(uprog.filter, len);
 542	if (IS_ERR(code))
 543		return PTR_ERR(code);
 544
 545	err = sk_chk_filter(code, uprog.len);
 546	if (err) {
 547		kfree(code);
 548		return err;
 549	}
 550
 551	*p = code;
 552	return uprog.len;
 553}
 554#endif /* CONFIG_PPP_FILTER */
 555
 556static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 557{
 558	struct ppp_file *pf = file->private_data;
 559	struct ppp *ppp;
 560	int err = -EFAULT, val, val2, i;
 561	struct ppp_idle idle;
 562	struct npioctl npi;
 563	int unit, cflags;
 564	struct slcompress *vj;
 565	void __user *argp = (void __user *)arg;
 566	int __user *p = argp;
 567
 568	if (!pf)
 569		return ppp_unattached_ioctl(current->nsproxy->net_ns,
 570					pf, file, cmd, arg);
 571
 572	if (cmd == PPPIOCDETACH) {
 573		/*
 574		 * We have to be careful here... if the file descriptor
 575		 * has been dup'd, we could have another process in the
 576		 * middle of a poll using the same file *, so we had
 577		 * better not free the interface data structures -
 578		 * instead we fail the ioctl.  Even in this case, we
 579		 * shut down the interface if we are the owner of it.
 580		 * Actually, we should get rid of PPPIOCDETACH, userland
 581		 * (i.e. pppd) could achieve the same effect by closing
 582		 * this fd and reopening /dev/ppp.
 583		 */
 584		err = -EINVAL;
 585		mutex_lock(&ppp_mutex);
 586		if (pf->kind == INTERFACE) {
 587			ppp = PF_TO_PPP(pf);
 588			if (file == ppp->owner)
 589				ppp_shutdown_interface(ppp);
 590		}
 591		if (atomic_long_read(&file->f_count) <= 2) {
 592			ppp_release(NULL, file);
 593			err = 0;
 594		} else
 595			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
 596				atomic_long_read(&file->f_count));
 597		mutex_unlock(&ppp_mutex);
 598		return err;
 599	}
 600
 601	if (pf->kind == CHANNEL) {
 602		struct channel *pch;
 603		struct ppp_channel *chan;
 604
 605		mutex_lock(&ppp_mutex);
 606		pch = PF_TO_CHANNEL(pf);
 607
 608		switch (cmd) {
 609		case PPPIOCCONNECT:
 610			if (get_user(unit, p))
 611				break;
 612			err = ppp_connect_channel(pch, unit);
 613			break;
 614
 615		case PPPIOCDISCONN:
 616			err = ppp_disconnect_channel(pch);
 617			break;
 618
 619		default:
 620			down_read(&pch->chan_sem);
 621			chan = pch->chan;
 622			err = -ENOTTY;
 623			if (chan && chan->ops->ioctl)
 624				err = chan->ops->ioctl(chan, cmd, arg);
 625			up_read(&pch->chan_sem);
 626		}
 627		mutex_unlock(&ppp_mutex);
 628		return err;
 629	}
 630
 631	if (pf->kind != INTERFACE) {
 632		/* can't happen */
 633		pr_err("PPP: not interface or channel??\n");
 634		return -EINVAL;
 635	}
 636
 637	mutex_lock(&ppp_mutex);
 638	ppp = PF_TO_PPP(pf);
 639	switch (cmd) {
 640	case PPPIOCSMRU:
 641		if (get_user(val, p))
 642			break;
 643		ppp->mru = val;
 644		err = 0;
 645		break;
 646
 647	case PPPIOCSFLAGS:
 648		if (get_user(val, p))
 649			break;
 650		ppp_lock(ppp);
 651		cflags = ppp->flags & ~val;
 652		ppp->flags = val & SC_FLAG_BITS;
 653		ppp_unlock(ppp);
 654		if (cflags & SC_CCP_OPEN)
 655			ppp_ccp_closed(ppp);
 656		err = 0;
 657		break;
 658
 659	case PPPIOCGFLAGS:
 660		val = ppp->flags | ppp->xstate | ppp->rstate;
 661		if (put_user(val, p))
 662			break;
 663		err = 0;
 664		break;
 665
 666	case PPPIOCSCOMPRESS:
 667		err = ppp_set_compress(ppp, arg);
 668		break;
 669
 670	case PPPIOCGUNIT:
 671		if (put_user(ppp->file.index, p))
 672			break;
 673		err = 0;
 674		break;
 675
 676	case PPPIOCSDEBUG:
 677		if (get_user(val, p))
 678			break;
 679		ppp->debug = val;
 680		err = 0;
 681		break;
 682
 683	case PPPIOCGDEBUG:
 684		if (put_user(ppp->debug, p))
 685			break;
 686		err = 0;
 687		break;
 688
 689	case PPPIOCGIDLE:
 690		idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
 691		idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
 692		if (copy_to_user(argp, &idle, sizeof(idle)))
 693			break;
 694		err = 0;
 695		break;
 696
 697	case PPPIOCSMAXCID:
 698		if (get_user(val, p))
 699			break;
 700		val2 = 15;
 701		if ((val >> 16) != 0) {
 702			val2 = val >> 16;
 703			val &= 0xffff;
 704		}
 705		vj = slhc_init(val2+1, val+1);
 706		if (!vj) {
 707			netdev_err(ppp->dev,
 708				   "PPP: no memory (VJ compressor)\n");
 709			err = -ENOMEM;
 710			break;
 711		}
 712		ppp_lock(ppp);
 713		if (ppp->vj)
 714			slhc_free(ppp->vj);
 715		ppp->vj = vj;
 716		ppp_unlock(ppp);
 717		err = 0;
 718		break;
 719
 720	case PPPIOCGNPMODE:
 721	case PPPIOCSNPMODE:
 722		if (copy_from_user(&npi, argp, sizeof(npi)))
 723			break;
 724		err = proto_to_npindex(npi.protocol);
 725		if (err < 0)
 726			break;
 727		i = err;
 728		if (cmd == PPPIOCGNPMODE) {
 729			err = -EFAULT;
 730			npi.mode = ppp->npmode[i];
 731			if (copy_to_user(argp, &npi, sizeof(npi)))
 732				break;
 733		} else {
 734			ppp->npmode[i] = npi.mode;
 735			/* we may be able to transmit more packets now (??) */
 736			netif_wake_queue(ppp->dev);
 737		}
 738		err = 0;
 739		break;
 740
 741#ifdef CONFIG_PPP_FILTER
 742	case PPPIOCSPASS:
 743	{
 744		struct sock_filter *code;
 
 745		err = get_filter(argp, &code);
 746		if (err >= 0) {
 
 
 
 
 
 747			ppp_lock(ppp);
 748			kfree(ppp->pass_filter);
 749			ppp->pass_filter = code;
 750			ppp->pass_len = err;
 
 
 751			ppp_unlock(ppp);
 752			err = 0;
 753		}
 754		break;
 755	}
 756	case PPPIOCSACTIVE:
 757	{
 758		struct sock_filter *code;
 
 759		err = get_filter(argp, &code);
 760		if (err >= 0) {
 
 
 
 
 
 761			ppp_lock(ppp);
 762			kfree(ppp->active_filter);
 763			ppp->active_filter = code;
 764			ppp->active_len = err;
 
 
 765			ppp_unlock(ppp);
 766			err = 0;
 767		}
 768		break;
 769	}
 770#endif /* CONFIG_PPP_FILTER */
 771
 772#ifdef CONFIG_PPP_MULTILINK
 773	case PPPIOCSMRRU:
 774		if (get_user(val, p))
 775			break;
 776		ppp_recv_lock(ppp);
 777		ppp->mrru = val;
 778		ppp_recv_unlock(ppp);
 779		err = 0;
 780		break;
 781#endif /* CONFIG_PPP_MULTILINK */
 782
 783	default:
 784		err = -ENOTTY;
 785	}
 786	mutex_unlock(&ppp_mutex);
 787	return err;
 788}
 789
 790static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 791			struct file *file, unsigned int cmd, unsigned long arg)
 792{
 793	int unit, err = -EFAULT;
 794	struct ppp *ppp;
 795	struct channel *chan;
 796	struct ppp_net *pn;
 797	int __user *p = (int __user *)arg;
 798
 799	mutex_lock(&ppp_mutex);
 800	switch (cmd) {
 801	case PPPIOCNEWUNIT:
 802		/* Create a new ppp unit */
 803		if (get_user(unit, p))
 804			break;
 805		ppp = ppp_create_interface(net, unit, &err);
 806		if (!ppp)
 807			break;
 808		file->private_data = &ppp->file;
 809		ppp->owner = file;
 810		err = -EFAULT;
 811		if (put_user(ppp->file.index, p))
 812			break;
 813		err = 0;
 814		break;
 815
 816	case PPPIOCATTACH:
 817		/* Attach to an existing ppp unit */
 818		if (get_user(unit, p))
 819			break;
 820		err = -ENXIO;
 821		pn = ppp_pernet(net);
 822		mutex_lock(&pn->all_ppp_mutex);
 823		ppp = ppp_find_unit(pn, unit);
 824		if (ppp) {
 825			atomic_inc(&ppp->file.refcnt);
 826			file->private_data = &ppp->file;
 827			err = 0;
 828		}
 829		mutex_unlock(&pn->all_ppp_mutex);
 830		break;
 831
 832	case PPPIOCATTCHAN:
 833		if (get_user(unit, p))
 834			break;
 835		err = -ENXIO;
 836		pn = ppp_pernet(net);
 837		spin_lock_bh(&pn->all_channels_lock);
 838		chan = ppp_find_channel(pn, unit);
 839		if (chan) {
 840			atomic_inc(&chan->file.refcnt);
 841			file->private_data = &chan->file;
 842			err = 0;
 843		}
 844		spin_unlock_bh(&pn->all_channels_lock);
 845		break;
 846
 847	default:
 848		err = -ENOTTY;
 849	}
 850	mutex_unlock(&ppp_mutex);
 851	return err;
 852}
 853
 854static const struct file_operations ppp_device_fops = {
 855	.owner		= THIS_MODULE,
 856	.read		= ppp_read,
 857	.write		= ppp_write,
 858	.poll		= ppp_poll,
 859	.unlocked_ioctl	= ppp_ioctl,
 860	.open		= ppp_open,
 861	.release	= ppp_release,
 862	.llseek		= noop_llseek,
 863};
 864
 865static __net_init int ppp_init_net(struct net *net)
 866{
 867	struct ppp_net *pn = net_generic(net, ppp_net_id);
 868
 869	idr_init(&pn->units_idr);
 870	mutex_init(&pn->all_ppp_mutex);
 871
 872	INIT_LIST_HEAD(&pn->all_channels);
 873	INIT_LIST_HEAD(&pn->new_channels);
 874
 875	spin_lock_init(&pn->all_channels_lock);
 876
 877	return 0;
 878}
 879
 880static __net_exit void ppp_exit_net(struct net *net)
 881{
 882	struct ppp_net *pn = net_generic(net, ppp_net_id);
 883
 884	idr_destroy(&pn->units_idr);
 885}
 886
 887static struct pernet_operations ppp_net_ops = {
 888	.init = ppp_init_net,
 889	.exit = ppp_exit_net,
 890	.id   = &ppp_net_id,
 891	.size = sizeof(struct ppp_net),
 892};
 893
 894#define PPP_MAJOR	108
 895
 896/* Called at boot time if ppp is compiled into the kernel,
 897   or at module load time (from init_module) if compiled as a module. */
 898static int __init ppp_init(void)
 899{
 900	int err;
 901
 902	pr_info("PPP generic driver version " PPP_VERSION "\n");
 903
 904	err = register_pernet_device(&ppp_net_ops);
 905	if (err) {
 906		pr_err("failed to register PPP pernet device (%d)\n", err);
 907		goto out;
 908	}
 909
 910	err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
 911	if (err) {
 912		pr_err("failed to register PPP device (%d)\n", err);
 913		goto out_net;
 914	}
 915
 916	ppp_class = class_create(THIS_MODULE, "ppp");
 917	if (IS_ERR(ppp_class)) {
 918		err = PTR_ERR(ppp_class);
 919		goto out_chrdev;
 920	}
 921
 922	/* not a big deal if we fail here :-) */
 923	device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
 924
 925	return 0;
 926
 927out_chrdev:
 928	unregister_chrdev(PPP_MAJOR, "ppp");
 929out_net:
 930	unregister_pernet_device(&ppp_net_ops);
 931out:
 932	return err;
 933}
 934
 935/*
 936 * Network interface unit routines.
 937 */
 938static netdev_tx_t
 939ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
 940{
 941	struct ppp *ppp = netdev_priv(dev);
 942	int npi, proto;
 943	unsigned char *pp;
 944
 945	npi = ethertype_to_npindex(ntohs(skb->protocol));
 946	if (npi < 0)
 947		goto outf;
 948
 949	/* Drop, accept or reject the packet */
 950	switch (ppp->npmode[npi]) {
 951	case NPMODE_PASS:
 952		break;
 953	case NPMODE_QUEUE:
 954		/* it would be nice to have a way to tell the network
 955		   system to queue this one up for later. */
 956		goto outf;
 957	case NPMODE_DROP:
 958	case NPMODE_ERROR:
 959		goto outf;
 960	}
 961
 962	/* Put the 2-byte PPP protocol number on the front,
 963	   making sure there is room for the address and control fields. */
 964	if (skb_cow_head(skb, PPP_HDRLEN))
 965		goto outf;
 966
 967	pp = skb_push(skb, 2);
 968	proto = npindex_to_proto[npi];
 969	put_unaligned_be16(proto, pp);
 970
 971	skb_queue_tail(&ppp->file.xq, skb);
 972	ppp_xmit_process(ppp);
 973	return NETDEV_TX_OK;
 974
 975 outf:
 976	kfree_skb(skb);
 977	++dev->stats.tx_dropped;
 978	return NETDEV_TX_OK;
 979}
 980
 981static int
 982ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 983{
 984	struct ppp *ppp = netdev_priv(dev);
 985	int err = -EFAULT;
 986	void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
 987	struct ppp_stats stats;
 988	struct ppp_comp_stats cstats;
 989	char *vers;
 990
 991	switch (cmd) {
 992	case SIOCGPPPSTATS:
 993		ppp_get_stats(ppp, &stats);
 994		if (copy_to_user(addr, &stats, sizeof(stats)))
 995			break;
 996		err = 0;
 997		break;
 998
 999	case SIOCGPPPCSTATS:
1000		memset(&cstats, 0, sizeof(cstats));
1001		if (ppp->xc_state)
1002			ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
1003		if (ppp->rc_state)
1004			ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
1005		if (copy_to_user(addr, &cstats, sizeof(cstats)))
1006			break;
1007		err = 0;
1008		break;
1009
1010	case SIOCGPPPVER:
1011		vers = PPP_VERSION;
1012		if (copy_to_user(addr, vers, strlen(vers) + 1))
1013			break;
1014		err = 0;
1015		break;
1016
1017	default:
1018		err = -EINVAL;
1019	}
1020
1021	return err;
1022}
1023
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1024static const struct net_device_ops ppp_netdev_ops = {
1025	.ndo_start_xmit = ppp_start_xmit,
1026	.ndo_do_ioctl   = ppp_net_ioctl,
 
 
1027};
1028
1029static void ppp_setup(struct net_device *dev)
1030{
1031	dev->netdev_ops = &ppp_netdev_ops;
1032	dev->hard_header_len = PPP_HDRLEN;
1033	dev->mtu = PPP_MRU;
1034	dev->addr_len = 0;
1035	dev->tx_queue_len = 3;
1036	dev->type = ARPHRD_PPP;
1037	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1038	dev->features |= NETIF_F_NETNS_LOCAL;
1039	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1040}
1041
1042/*
1043 * Transmit-side routines.
1044 */
1045
1046/*
1047 * Called to do any work queued up on the transmit side
1048 * that can now be done.
1049 */
1050static void
1051ppp_xmit_process(struct ppp *ppp)
1052{
1053	struct sk_buff *skb;
1054
1055	ppp_xmit_lock(ppp);
1056	if (!ppp->closing) {
1057		ppp_push(ppp);
1058		while (!ppp->xmit_pending &&
1059		       (skb = skb_dequeue(&ppp->file.xq)))
1060			ppp_send_frame(ppp, skb);
1061		/* If there's no work left to do, tell the core net
1062		   code that we can accept some more. */
1063		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
1064			netif_wake_queue(ppp->dev);
1065		else
1066			netif_stop_queue(ppp->dev);
1067	}
1068	ppp_xmit_unlock(ppp);
1069}
1070
1071static inline struct sk_buff *
1072pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1073{
1074	struct sk_buff *new_skb;
1075	int len;
1076	int new_skb_size = ppp->dev->mtu +
1077		ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1078	int compressor_skb_size = ppp->dev->mtu +
1079		ppp->xcomp->comp_extra + PPP_HDRLEN;
1080	new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1081	if (!new_skb) {
1082		if (net_ratelimit())
1083			netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1084		return NULL;
1085	}
1086	if (ppp->dev->hard_header_len > PPP_HDRLEN)
1087		skb_reserve(new_skb,
1088			    ppp->dev->hard_header_len - PPP_HDRLEN);
1089
1090	/* compressor still expects A/C bytes in hdr */
1091	len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1092				   new_skb->data, skb->len + 2,
1093				   compressor_skb_size);
1094	if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1095		consume_skb(skb);
1096		skb = new_skb;
1097		skb_put(skb, len);
1098		skb_pull(skb, 2);	/* pull off A/C bytes */
1099	} else if (len == 0) {
1100		/* didn't compress, or CCP not up yet */
1101		consume_skb(new_skb);
1102		new_skb = skb;
1103	} else {
1104		/*
1105		 * (len < 0)
1106		 * MPPE requires that we do not send unencrypted
1107		 * frames.  The compressor will return -1 if we
1108		 * should drop the frame.  We cannot simply test
1109		 * the compress_proto because MPPE and MPPC share
1110		 * the same number.
1111		 */
1112		if (net_ratelimit())
1113			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1114		kfree_skb(skb);
1115		consume_skb(new_skb);
1116		new_skb = NULL;
1117	}
1118	return new_skb;
1119}
1120
1121/*
1122 * Compress and send a frame.
1123 * The caller should have locked the xmit path,
1124 * and xmit_pending should be 0.
1125 */
1126static void
1127ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1128{
1129	int proto = PPP_PROTO(skb);
1130	struct sk_buff *new_skb;
1131	int len;
1132	unsigned char *cp;
1133
1134	if (proto < 0x8000) {
1135#ifdef CONFIG_PPP_FILTER
1136		/* check if we should pass this packet */
1137		/* the filter instructions are constructed assuming
1138		   a four-byte PPP header on each packet */
1139		*skb_push(skb, 2) = 1;
1140		if (ppp->pass_filter &&
1141		    sk_run_filter(skb, ppp->pass_filter) == 0) {
1142			if (ppp->debug & 1)
1143				netdev_printk(KERN_DEBUG, ppp->dev,
1144					      "PPP: outbound frame "
1145					      "not passed\n");
1146			kfree_skb(skb);
1147			return;
1148		}
1149		/* if this packet passes the active filter, record the time */
1150		if (!(ppp->active_filter &&
1151		      sk_run_filter(skb, ppp->active_filter) == 0))
1152			ppp->last_xmit = jiffies;
1153		skb_pull(skb, 2);
1154#else
1155		/* for data packets, record the time */
1156		ppp->last_xmit = jiffies;
1157#endif /* CONFIG_PPP_FILTER */
1158	}
1159
1160	++ppp->dev->stats.tx_packets;
1161	ppp->dev->stats.tx_bytes += skb->len - 2;
1162
1163	switch (proto) {
1164	case PPP_IP:
1165		if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
1166			break;
1167		/* try to do VJ TCP header compression */
1168		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1169				    GFP_ATOMIC);
1170		if (!new_skb) {
1171			netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1172			goto drop;
1173		}
1174		skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1175		cp = skb->data + 2;
1176		len = slhc_compress(ppp->vj, cp, skb->len - 2,
1177				    new_skb->data + 2, &cp,
1178				    !(ppp->flags & SC_NO_TCP_CCID));
1179		if (cp == skb->data + 2) {
1180			/* didn't compress */
1181			consume_skb(new_skb);
1182		} else {
1183			if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1184				proto = PPP_VJC_COMP;
1185				cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1186			} else {
1187				proto = PPP_VJC_UNCOMP;
1188				cp[0] = skb->data[2];
1189			}
1190			consume_skb(skb);
1191			skb = new_skb;
1192			cp = skb_put(skb, len + 2);
1193			cp[0] = 0;
1194			cp[1] = proto;
1195		}
1196		break;
1197
1198	case PPP_CCP:
1199		/* peek at outbound CCP frames */
1200		ppp_ccp_peek(ppp, skb, 0);
1201		break;
1202	}
1203
1204	/* try to do packet compression */
1205	if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state &&
1206	    proto != PPP_LCP && proto != PPP_CCP) {
1207		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1208			if (net_ratelimit())
1209				netdev_err(ppp->dev,
1210					   "ppp: compression required but "
1211					   "down - pkt dropped.\n");
1212			goto drop;
1213		}
1214		skb = pad_compress_skb(ppp, skb);
1215		if (!skb)
1216			goto drop;
1217	}
1218
1219	/*
1220	 * If we are waiting for traffic (demand dialling),
1221	 * queue it up for pppd to receive.
1222	 */
1223	if (ppp->flags & SC_LOOP_TRAFFIC) {
1224		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1225			goto drop;
1226		skb_queue_tail(&ppp->file.rq, skb);
1227		wake_up_interruptible(&ppp->file.rwait);
1228		return;
1229	}
1230
1231	ppp->xmit_pending = skb;
1232	ppp_push(ppp);
1233	return;
1234
1235 drop:
1236	kfree_skb(skb);
1237	++ppp->dev->stats.tx_errors;
1238}
1239
1240/*
1241 * Try to send the frame in xmit_pending.
1242 * The caller should have the xmit path locked.
1243 */
1244static void
1245ppp_push(struct ppp *ppp)
1246{
1247	struct list_head *list;
1248	struct channel *pch;
1249	struct sk_buff *skb = ppp->xmit_pending;
1250
1251	if (!skb)
1252		return;
1253
1254	list = &ppp->channels;
1255	if (list_empty(list)) {
1256		/* nowhere to send the packet, just drop it */
1257		ppp->xmit_pending = NULL;
1258		kfree_skb(skb);
1259		return;
1260	}
1261
1262	if ((ppp->flags & SC_MULTILINK) == 0) {
1263		/* not doing multilink: send it down the first channel */
1264		list = list->next;
1265		pch = list_entry(list, struct channel, clist);
1266
1267		spin_lock_bh(&pch->downl);
1268		if (pch->chan) {
1269			if (pch->chan->ops->start_xmit(pch->chan, skb))
1270				ppp->xmit_pending = NULL;
1271		} else {
1272			/* channel got unregistered */
1273			kfree_skb(skb);
1274			ppp->xmit_pending = NULL;
1275		}
1276		spin_unlock_bh(&pch->downl);
1277		return;
1278	}
1279
1280#ifdef CONFIG_PPP_MULTILINK
1281	/* Multilink: fragment the packet over as many links
1282	   as can take the packet at the moment. */
1283	if (!ppp_mp_explode(ppp, skb))
1284		return;
1285#endif /* CONFIG_PPP_MULTILINK */
1286
1287	ppp->xmit_pending = NULL;
1288	kfree_skb(skb);
1289}
1290
1291#ifdef CONFIG_PPP_MULTILINK
1292static bool mp_protocol_compress __read_mostly = true;
1293module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
1294MODULE_PARM_DESC(mp_protocol_compress,
1295		 "compress protocol id in multilink fragments");
1296
1297/*
1298 * Divide a packet to be transmitted into fragments and
1299 * send them out the individual links.
1300 */
1301static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1302{
1303	int len, totlen;
1304	int i, bits, hdrlen, mtu;
1305	int flen;
1306	int navail, nfree, nzero;
1307	int nbigger;
1308	int totspeed;
1309	int totfree;
1310	unsigned char *p, *q;
1311	struct list_head *list;
1312	struct channel *pch;
1313	struct sk_buff *frag;
1314	struct ppp_channel *chan;
1315
1316	totspeed = 0; /*total bitrate of the bundle*/
1317	nfree = 0; /* # channels which have no packet already queued */
1318	navail = 0; /* total # of usable channels (not deregistered) */
1319	nzero = 0; /* number of channels with zero speed associated*/
1320	totfree = 0; /*total # of channels available and
1321				  *having no queued packets before
1322				  *starting the fragmentation*/
1323
1324	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1325	i = 0;
1326	list_for_each_entry(pch, &ppp->channels, clist) {
1327		if (pch->chan) {
1328			pch->avail = 1;
1329			navail++;
1330			pch->speed = pch->chan->speed;
1331		} else {
1332			pch->avail = 0;
1333		}
1334		if (pch->avail) {
1335			if (skb_queue_empty(&pch->file.xq) ||
1336				!pch->had_frag) {
1337					if (pch->speed == 0)
1338						nzero++;
1339					else
1340						totspeed += pch->speed;
1341
1342					pch->avail = 2;
1343					++nfree;
1344					++totfree;
1345				}
1346			if (!pch->had_frag && i < ppp->nxchan)
1347				ppp->nxchan = i;
1348		}
1349		++i;
1350	}
1351	/*
1352	 * Don't start sending this packet unless at least half of
1353	 * the channels are free.  This gives much better TCP
1354	 * performance if we have a lot of channels.
1355	 */
1356	if (nfree == 0 || nfree < navail / 2)
1357		return 0; /* can't take now, leave it in xmit_pending */
1358
1359	/* Do protocol field compression */
1360	p = skb->data;
1361	len = skb->len;
1362	if (*p == 0 && mp_protocol_compress) {
1363		++p;
1364		--len;
1365	}
1366
1367	totlen = len;
1368	nbigger = len % nfree;
1369
1370	/* skip to the channel after the one we last used
1371	   and start at that one */
1372	list = &ppp->channels;
1373	for (i = 0; i < ppp->nxchan; ++i) {
1374		list = list->next;
1375		if (list == &ppp->channels) {
1376			i = 0;
1377			break;
1378		}
1379	}
1380
1381	/* create a fragment for each channel */
1382	bits = B;
1383	while (len > 0) {
1384		list = list->next;
1385		if (list == &ppp->channels) {
1386			i = 0;
1387			continue;
1388		}
1389		pch = list_entry(list, struct channel, clist);
1390		++i;
1391		if (!pch->avail)
1392			continue;
1393
1394		/*
1395		 * Skip this channel if it has a fragment pending already and
1396		 * we haven't given a fragment to all of the free channels.
1397		 */
1398		if (pch->avail == 1) {
1399			if (nfree > 0)
1400				continue;
1401		} else {
1402			pch->avail = 1;
1403		}
1404
1405		/* check the channel's mtu and whether it is still attached. */
1406		spin_lock_bh(&pch->downl);
1407		if (pch->chan == NULL) {
1408			/* can't use this channel, it's being deregistered */
1409			if (pch->speed == 0)
1410				nzero--;
1411			else
1412				totspeed -= pch->speed;
1413
1414			spin_unlock_bh(&pch->downl);
1415			pch->avail = 0;
1416			totlen = len;
1417			totfree--;
1418			nfree--;
1419			if (--navail == 0)
1420				break;
1421			continue;
1422		}
1423
1424		/*
1425		*if the channel speed is not set divide
1426		*the packet evenly among the free channels;
1427		*otherwise divide it according to the speed
1428		*of the channel we are going to transmit on
1429		*/
1430		flen = len;
1431		if (nfree > 0) {
1432			if (pch->speed == 0) {
1433				flen = len/nfree;
1434				if (nbigger > 0) {
1435					flen++;
1436					nbigger--;
1437				}
1438			} else {
1439				flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1440					((totspeed*totfree)/pch->speed)) - hdrlen;
1441				if (nbigger > 0) {
1442					flen += ((totfree - nzero)*pch->speed)/totspeed;
1443					nbigger -= ((totfree - nzero)*pch->speed)/
1444							totspeed;
1445				}
1446			}
1447			nfree--;
1448		}
1449
1450		/*
1451		 *check if we are on the last channel or
1452		 *we exceded the length of the data to
1453		 *fragment
1454		 */
1455		if ((nfree <= 0) || (flen > len))
1456			flen = len;
1457		/*
1458		 *it is not worth to tx on slow channels:
1459		 *in that case from the resulting flen according to the
1460		 *above formula will be equal or less than zero.
1461		 *Skip the channel in this case
1462		 */
1463		if (flen <= 0) {
1464			pch->avail = 2;
1465			spin_unlock_bh(&pch->downl);
1466			continue;
1467		}
1468
1469		/*
1470		 * hdrlen includes the 2-byte PPP protocol field, but the
1471		 * MTU counts only the payload excluding the protocol field.
1472		 * (RFC1661 Section 2)
1473		 */
1474		mtu = pch->chan->mtu - (hdrlen - 2);
1475		if (mtu < 4)
1476			mtu = 4;
1477		if (flen > mtu)
1478			flen = mtu;
1479		if (flen == len)
1480			bits |= E;
1481		frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1482		if (!frag)
1483			goto noskb;
1484		q = skb_put(frag, flen + hdrlen);
1485
1486		/* make the MP header */
1487		put_unaligned_be16(PPP_MP, q);
1488		if (ppp->flags & SC_MP_XSHORTSEQ) {
1489			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1490			q[3] = ppp->nxseq;
1491		} else {
1492			q[2] = bits;
1493			q[3] = ppp->nxseq >> 16;
1494			q[4] = ppp->nxseq >> 8;
1495			q[5] = ppp->nxseq;
1496		}
1497
1498		memcpy(q + hdrlen, p, flen);
1499
1500		/* try to send it down the channel */
1501		chan = pch->chan;
1502		if (!skb_queue_empty(&pch->file.xq) ||
1503			!chan->ops->start_xmit(chan, frag))
1504			skb_queue_tail(&pch->file.xq, frag);
1505		pch->had_frag = 1;
1506		p += flen;
1507		len -= flen;
1508		++ppp->nxseq;
1509		bits = 0;
1510		spin_unlock_bh(&pch->downl);
1511	}
1512	ppp->nxchan = i;
1513
1514	return 1;
1515
1516 noskb:
1517	spin_unlock_bh(&pch->downl);
1518	if (ppp->debug & 1)
1519		netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1520	++ppp->dev->stats.tx_errors;
1521	++ppp->nxseq;
1522	return 1;	/* abandon the frame */
1523}
1524#endif /* CONFIG_PPP_MULTILINK */
1525
1526/*
1527 * Try to send data out on a channel.
1528 */
1529static void
1530ppp_channel_push(struct channel *pch)
1531{
1532	struct sk_buff *skb;
1533	struct ppp *ppp;
1534
1535	spin_lock_bh(&pch->downl);
1536	if (pch->chan) {
1537		while (!skb_queue_empty(&pch->file.xq)) {
1538			skb = skb_dequeue(&pch->file.xq);
1539			if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
1540				/* put the packet back and try again later */
1541				skb_queue_head(&pch->file.xq, skb);
1542				break;
1543			}
1544		}
1545	} else {
1546		/* channel got deregistered */
1547		skb_queue_purge(&pch->file.xq);
1548	}
1549	spin_unlock_bh(&pch->downl);
1550	/* see if there is anything from the attached unit to be sent */
1551	if (skb_queue_empty(&pch->file.xq)) {
1552		read_lock_bh(&pch->upl);
1553		ppp = pch->ppp;
1554		if (ppp)
1555			ppp_xmit_process(ppp);
1556		read_unlock_bh(&pch->upl);
1557	}
1558}
1559
1560/*
1561 * Receive-side routines.
1562 */
1563
1564struct ppp_mp_skb_parm {
1565	u32		sequence;
1566	u8		BEbits;
1567};
1568#define PPP_MP_CB(skb)	((struct ppp_mp_skb_parm *)((skb)->cb))
1569
1570static inline void
1571ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1572{
1573	ppp_recv_lock(ppp);
1574	if (!ppp->closing)
1575		ppp_receive_frame(ppp, skb, pch);
1576	else
1577		kfree_skb(skb);
1578	ppp_recv_unlock(ppp);
1579}
1580
1581void
1582ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1583{
1584	struct channel *pch = chan->ppp;
1585	int proto;
1586
1587	if (!pch) {
1588		kfree_skb(skb);
1589		return;
1590	}
1591
1592	read_lock_bh(&pch->upl);
1593	if (!pskb_may_pull(skb, 2)) {
1594		kfree_skb(skb);
1595		if (pch->ppp) {
1596			++pch->ppp->dev->stats.rx_length_errors;
1597			ppp_receive_error(pch->ppp);
1598		}
1599		goto done;
1600	}
1601
1602	proto = PPP_PROTO(skb);
1603	if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1604		/* put it on the channel queue */
1605		skb_queue_tail(&pch->file.rq, skb);
1606		/* drop old frames if queue too long */
1607		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
1608		       (skb = skb_dequeue(&pch->file.rq)))
1609			kfree_skb(skb);
1610		wake_up_interruptible(&pch->file.rwait);
1611	} else {
1612		ppp_do_recv(pch->ppp, skb, pch);
1613	}
1614
1615done:
1616	read_unlock_bh(&pch->upl);
1617}
1618
1619/* Put a 0-length skb in the receive queue as an error indication */
1620void
1621ppp_input_error(struct ppp_channel *chan, int code)
1622{
1623	struct channel *pch = chan->ppp;
1624	struct sk_buff *skb;
1625
1626	if (!pch)
1627		return;
1628
1629	read_lock_bh(&pch->upl);
1630	if (pch->ppp) {
1631		skb = alloc_skb(0, GFP_ATOMIC);
1632		if (skb) {
1633			skb->len = 0;		/* probably unnecessary */
1634			skb->cb[0] = code;
1635			ppp_do_recv(pch->ppp, skb, pch);
1636		}
1637	}
1638	read_unlock_bh(&pch->upl);
1639}
1640
1641/*
1642 * We come in here to process a received frame.
1643 * The receive side of the ppp unit is locked.
1644 */
1645static void
1646ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1647{
1648	/* note: a 0-length skb is used as an error indication */
1649	if (skb->len > 0) {
1650#ifdef CONFIG_PPP_MULTILINK
1651		/* XXX do channel-level decompression here */
1652		if (PPP_PROTO(skb) == PPP_MP)
1653			ppp_receive_mp_frame(ppp, skb, pch);
1654		else
1655#endif /* CONFIG_PPP_MULTILINK */
1656			ppp_receive_nonmp_frame(ppp, skb);
1657	} else {
1658		kfree_skb(skb);
1659		ppp_receive_error(ppp);
1660	}
1661}
1662
1663static void
1664ppp_receive_error(struct ppp *ppp)
1665{
1666	++ppp->dev->stats.rx_errors;
1667	if (ppp->vj)
1668		slhc_toss(ppp->vj);
1669}
1670
1671static void
1672ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1673{
1674	struct sk_buff *ns;
1675	int proto, len, npi;
1676
1677	/*
1678	 * Decompress the frame, if compressed.
1679	 * Note that some decompressors need to see uncompressed frames
1680	 * that come in as well as compressed frames.
1681	 */
1682	if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) &&
1683	    (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
1684		skb = ppp_decompress_frame(ppp, skb);
1685
1686	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
1687		goto err;
1688
1689	proto = PPP_PROTO(skb);
1690	switch (proto) {
1691	case PPP_VJC_COMP:
1692		/* decompress VJ compressed packets */
1693		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1694			goto err;
1695
1696		if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
1697			/* copy to a new sk_buff with more tailroom */
1698			ns = dev_alloc_skb(skb->len + 128);
1699			if (!ns) {
1700				netdev_err(ppp->dev, "PPP: no memory "
1701					   "(VJ decomp)\n");
1702				goto err;
1703			}
1704			skb_reserve(ns, 2);
1705			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
1706			consume_skb(skb);
1707			skb = ns;
1708		}
1709		else
1710			skb->ip_summed = CHECKSUM_NONE;
1711
1712		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1713		if (len <= 0) {
1714			netdev_printk(KERN_DEBUG, ppp->dev,
1715				      "PPP: VJ decompression error\n");
1716			goto err;
1717		}
1718		len += 2;
1719		if (len > skb->len)
1720			skb_put(skb, len - skb->len);
1721		else if (len < skb->len)
1722			skb_trim(skb, len);
1723		proto = PPP_IP;
1724		break;
1725
1726	case PPP_VJC_UNCOMP:
1727		if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
1728			goto err;
1729
1730		/* Until we fix the decompressor need to make sure
1731		 * data portion is linear.
1732		 */
1733		if (!pskb_may_pull(skb, skb->len))
1734			goto err;
1735
1736		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1737			netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1738			goto err;
1739		}
1740		proto = PPP_IP;
1741		break;
1742
1743	case PPP_CCP:
1744		ppp_ccp_peek(ppp, skb, 1);
1745		break;
1746	}
1747
1748	++ppp->dev->stats.rx_packets;
1749	ppp->dev->stats.rx_bytes += skb->len - 2;
1750
1751	npi = proto_to_npindex(proto);
1752	if (npi < 0) {
1753		/* control or unknown frame - pass it to pppd */
1754		skb_queue_tail(&ppp->file.rq, skb);
1755		/* limit queue length by dropping old frames */
1756		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
1757		       (skb = skb_dequeue(&ppp->file.rq)))
1758			kfree_skb(skb);
1759		/* wake up any process polling or blocking on read */
1760		wake_up_interruptible(&ppp->file.rwait);
1761
1762	} else {
1763		/* network protocol frame - give it to the kernel */
1764
1765#ifdef CONFIG_PPP_FILTER
1766		/* check if the packet passes the pass and active filters */
1767		/* the filter instructions are constructed assuming
1768		   a four-byte PPP header on each packet */
1769		if (ppp->pass_filter || ppp->active_filter) {
1770			if (skb_cloned(skb) &&
1771			    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1772				goto err;
1773
1774			*skb_push(skb, 2) = 0;
1775			if (ppp->pass_filter &&
1776			    sk_run_filter(skb, ppp->pass_filter) == 0) {
1777				if (ppp->debug & 1)
1778					netdev_printk(KERN_DEBUG, ppp->dev,
1779						      "PPP: inbound frame "
1780						      "not passed\n");
1781				kfree_skb(skb);
1782				return;
1783			}
1784			if (!(ppp->active_filter &&
1785			      sk_run_filter(skb, ppp->active_filter) == 0))
1786				ppp->last_recv = jiffies;
1787			__skb_pull(skb, 2);
1788		} else
1789#endif /* CONFIG_PPP_FILTER */
1790			ppp->last_recv = jiffies;
1791
1792		if ((ppp->dev->flags & IFF_UP) == 0 ||
1793		    ppp->npmode[npi] != NPMODE_PASS) {
1794			kfree_skb(skb);
1795		} else {
1796			/* chop off protocol */
1797			skb_pull_rcsum(skb, 2);
1798			skb->dev = ppp->dev;
1799			skb->protocol = htons(npindex_to_ethertype[npi]);
1800			skb_reset_mac_header(skb);
1801			netif_rx(skb);
1802		}
1803	}
1804	return;
1805
1806 err:
1807	kfree_skb(skb);
1808	ppp_receive_error(ppp);
1809}
1810
1811static struct sk_buff *
1812ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1813{
1814	int proto = PPP_PROTO(skb);
1815	struct sk_buff *ns;
1816	int len;
1817
1818	/* Until we fix all the decompressor's need to make sure
1819	 * data portion is linear.
1820	 */
1821	if (!pskb_may_pull(skb, skb->len))
1822		goto err;
1823
1824	if (proto == PPP_COMP) {
1825		int obuff_size;
1826
1827		switch(ppp->rcomp->compress_proto) {
1828		case CI_MPPE:
1829			obuff_size = ppp->mru + PPP_HDRLEN + 1;
1830			break;
1831		default:
1832			obuff_size = ppp->mru + PPP_HDRLEN;
1833			break;
1834		}
1835
1836		ns = dev_alloc_skb(obuff_size);
1837		if (!ns) {
1838			netdev_err(ppp->dev, "ppp_decompress_frame: "
1839				   "no memory\n");
1840			goto err;
1841		}
1842		/* the decompressor still expects the A/C bytes in the hdr */
1843		len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
1844				skb->len + 2, ns->data, obuff_size);
1845		if (len < 0) {
1846			/* Pass the compressed frame to pppd as an
1847			   error indication. */
1848			if (len == DECOMP_FATALERROR)
1849				ppp->rstate |= SC_DC_FERROR;
1850			kfree_skb(ns);
1851			goto err;
1852		}
1853
1854		consume_skb(skb);
1855		skb = ns;
1856		skb_put(skb, len);
1857		skb_pull(skb, 2);	/* pull off the A/C bytes */
1858
1859	} else {
1860		/* Uncompressed frame - pass to decompressor so it
1861		   can update its dictionary if necessary. */
1862		if (ppp->rcomp->incomp)
1863			ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
1864					   skb->len + 2);
1865	}
1866
1867	return skb;
1868
1869 err:
1870	ppp->rstate |= SC_DC_ERROR;
1871	ppp_receive_error(ppp);
1872	return skb;
1873}
1874
1875#ifdef CONFIG_PPP_MULTILINK
1876/*
1877 * Receive a multilink frame.
1878 * We put it on the reconstruction queue and then pull off
1879 * as many completed frames as we can.
1880 */
1881static void
1882ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1883{
1884	u32 mask, seq;
1885	struct channel *ch;
1886	int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1887
1888	if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
1889		goto err;		/* no good, throw it away */
1890
1891	/* Decode sequence number and begin/end bits */
1892	if (ppp->flags & SC_MP_SHORTSEQ) {
1893		seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
1894		mask = 0xfff;
1895	} else {
1896		seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
1897		mask = 0xffffff;
1898	}
1899	PPP_MP_CB(skb)->BEbits = skb->data[2];
1900	skb_pull(skb, mphdrlen);	/* pull off PPP and MP headers */
1901
1902	/*
1903	 * Do protocol ID decompression on the first fragment of each packet.
1904	 */
1905	if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
1906		*skb_push(skb, 1) = 0;
1907
1908	/*
1909	 * Expand sequence number to 32 bits, making it as close
1910	 * as possible to ppp->minseq.
1911	 */
1912	seq |= ppp->minseq & ~mask;
1913	if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
1914		seq += mask + 1;
1915	else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
1916		seq -= mask + 1;	/* should never happen */
1917	PPP_MP_CB(skb)->sequence = seq;
1918	pch->lastseq = seq;
1919
1920	/*
1921	 * If this packet comes before the next one we were expecting,
1922	 * drop it.
1923	 */
1924	if (seq_before(seq, ppp->nextseq)) {
1925		kfree_skb(skb);
1926		++ppp->dev->stats.rx_dropped;
1927		ppp_receive_error(ppp);
1928		return;
1929	}
1930
1931	/*
1932	 * Reevaluate minseq, the minimum over all channels of the
1933	 * last sequence number received on each channel.  Because of
1934	 * the increasing sequence number rule, we know that any fragment
1935	 * before `minseq' which hasn't arrived is never going to arrive.
1936	 * The list of channels can't change because we have the receive
1937	 * side of the ppp unit locked.
1938	 */
1939	list_for_each_entry(ch, &ppp->channels, clist) {
1940		if (seq_before(ch->lastseq, seq))
1941			seq = ch->lastseq;
1942	}
1943	if (seq_before(ppp->minseq, seq))
1944		ppp->minseq = seq;
1945
1946	/* Put the fragment on the reconstruction queue */
1947	ppp_mp_insert(ppp, skb);
1948
1949	/* If the queue is getting long, don't wait any longer for packets
1950	   before the start of the queue. */
1951	if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
1952		struct sk_buff *mskb = skb_peek(&ppp->mrq);
1953		if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
1954			ppp->minseq = PPP_MP_CB(mskb)->sequence;
1955	}
1956
1957	/* Pull completed packets off the queue and receive them. */
1958	while ((skb = ppp_mp_reconstruct(ppp))) {
1959		if (pskb_may_pull(skb, 2))
1960			ppp_receive_nonmp_frame(ppp, skb);
1961		else {
1962			++ppp->dev->stats.rx_length_errors;
1963			kfree_skb(skb);
1964			ppp_receive_error(ppp);
1965		}
1966	}
1967
1968	return;
1969
1970 err:
1971	kfree_skb(skb);
1972	ppp_receive_error(ppp);
1973}
1974
1975/*
1976 * Insert a fragment on the MP reconstruction queue.
1977 * The queue is ordered by increasing sequence number.
1978 */
1979static void
1980ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
1981{
1982	struct sk_buff *p;
1983	struct sk_buff_head *list = &ppp->mrq;
1984	u32 seq = PPP_MP_CB(skb)->sequence;
1985
1986	/* N.B. we don't need to lock the list lock because we have the
1987	   ppp unit receive-side lock. */
1988	skb_queue_walk(list, p) {
1989		if (seq_before(seq, PPP_MP_CB(p)->sequence))
1990			break;
1991	}
1992	__skb_queue_before(list, p, skb);
1993}
1994
1995/*
1996 * Reconstruct a packet from the MP fragment queue.
1997 * We go through increasing sequence numbers until we find a
1998 * complete packet, or we get to the sequence number for a fragment
1999 * which hasn't arrived but might still do so.
2000 */
2001static struct sk_buff *
2002ppp_mp_reconstruct(struct ppp *ppp)
2003{
2004	u32 seq = ppp->nextseq;
2005	u32 minseq = ppp->minseq;
2006	struct sk_buff_head *list = &ppp->mrq;
2007	struct sk_buff *p, *tmp;
2008	struct sk_buff *head, *tail;
2009	struct sk_buff *skb = NULL;
2010	int lost = 0, len = 0;
2011
2012	if (ppp->mrru == 0)	/* do nothing until mrru is set */
2013		return NULL;
2014	head = list->next;
2015	tail = NULL;
2016	skb_queue_walk_safe(list, p, tmp) {
2017	again:
2018		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2019			/* this can't happen, anyway ignore the skb */
2020			netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2021				   "seq %u < %u\n",
2022				   PPP_MP_CB(p)->sequence, seq);
2023			__skb_unlink(p, list);
2024			kfree_skb(p);
2025			continue;
2026		}
2027		if (PPP_MP_CB(p)->sequence != seq) {
2028			u32 oldseq;
2029			/* Fragment `seq' is missing.  If it is after
2030			   minseq, it might arrive later, so stop here. */
2031			if (seq_after(seq, minseq))
2032				break;
2033			/* Fragment `seq' is lost, keep going. */
2034			lost = 1;
2035			oldseq = seq;
2036			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2037				minseq + 1: PPP_MP_CB(p)->sequence;
2038
2039			if (ppp->debug & 1)
2040				netdev_printk(KERN_DEBUG, ppp->dev,
2041					      "lost frag %u..%u\n",
2042					      oldseq, seq-1);
2043
2044			goto again;
2045		}
2046
2047		/*
2048		 * At this point we know that all the fragments from
2049		 * ppp->nextseq to seq are either present or lost.
2050		 * Also, there are no complete packets in the queue
2051		 * that have no missing fragments and end before this
2052		 * fragment.
2053		 */
2054
2055		/* B bit set indicates this fragment starts a packet */
2056		if (PPP_MP_CB(p)->BEbits & B) {
2057			head = p;
2058			lost = 0;
2059			len = 0;
2060		}
2061
2062		len += p->len;
2063
2064		/* Got a complete packet yet? */
2065		if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2066		    (PPP_MP_CB(head)->BEbits & B)) {
2067			if (len > ppp->mrru + 2) {
2068				++ppp->dev->stats.rx_length_errors;
2069				netdev_printk(KERN_DEBUG, ppp->dev,
2070					      "PPP: reconstructed packet"
2071					      " is too long (%d)\n", len);
2072			} else {
2073				tail = p;
2074				break;
2075			}
2076			ppp->nextseq = seq + 1;
2077		}
2078
2079		/*
2080		 * If this is the ending fragment of a packet,
2081		 * and we haven't found a complete valid packet yet,
2082		 * we can discard up to and including this fragment.
2083		 */
2084		if (PPP_MP_CB(p)->BEbits & E) {
2085			struct sk_buff *tmp2;
2086
2087			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2088				if (ppp->debug & 1)
2089					netdev_printk(KERN_DEBUG, ppp->dev,
2090						      "discarding frag %u\n",
2091						      PPP_MP_CB(p)->sequence);
2092				__skb_unlink(p, list);
2093				kfree_skb(p);
2094			}
2095			head = skb_peek(list);
2096			if (!head)
2097				break;
2098		}
2099		++seq;
2100	}
2101
2102	/* If we have a complete packet, copy it all into one skb. */
2103	if (tail != NULL) {
2104		/* If we have discarded any fragments,
2105		   signal a receive error. */
2106		if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2107			skb_queue_walk_safe(list, p, tmp) {
2108				if (p == head)
2109					break;
2110				if (ppp->debug & 1)
2111					netdev_printk(KERN_DEBUG, ppp->dev,
2112						      "discarding frag %u\n",
2113						      PPP_MP_CB(p)->sequence);
2114				__skb_unlink(p, list);
2115				kfree_skb(p);
2116			}
2117
2118			if (ppp->debug & 1)
2119				netdev_printk(KERN_DEBUG, ppp->dev,
2120					      "  missed pkts %u..%u\n",
2121					      ppp->nextseq,
2122					      PPP_MP_CB(head)->sequence-1);
2123			++ppp->dev->stats.rx_dropped;
2124			ppp_receive_error(ppp);
2125		}
2126
2127		skb = head;
2128		if (head != tail) {
2129			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2130			p = skb_queue_next(list, head);
2131			__skb_unlink(skb, list);
2132			skb_queue_walk_from_safe(list, p, tmp) {
2133				__skb_unlink(p, list);
2134				*fragpp = p;
2135				p->next = NULL;
2136				fragpp = &p->next;
2137
2138				skb->len += p->len;
2139				skb->data_len += p->len;
2140				skb->truesize += p->truesize;
2141
2142				if (p == tail)
2143					break;
2144			}
2145		} else {
2146			__skb_unlink(skb, list);
2147		}
2148
2149		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2150	}
2151
2152	return skb;
2153}
2154#endif /* CONFIG_PPP_MULTILINK */
2155
2156/*
2157 * Channel interface.
2158 */
2159
2160/* Create a new, unattached ppp channel. */
2161int ppp_register_channel(struct ppp_channel *chan)
2162{
2163	return ppp_register_net_channel(current->nsproxy->net_ns, chan);
2164}
2165
2166/* Create a new, unattached ppp channel for specified net. */
2167int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
2168{
2169	struct channel *pch;
2170	struct ppp_net *pn;
2171
2172	pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
2173	if (!pch)
2174		return -ENOMEM;
2175
2176	pn = ppp_pernet(net);
2177
2178	pch->ppp = NULL;
2179	pch->chan = chan;
2180	pch->chan_net = net;
2181	chan->ppp = pch;
2182	init_ppp_file(&pch->file, CHANNEL);
2183	pch->file.hdrlen = chan->hdrlen;
2184#ifdef CONFIG_PPP_MULTILINK
2185	pch->lastseq = -1;
2186#endif /* CONFIG_PPP_MULTILINK */
2187	init_rwsem(&pch->chan_sem);
2188	spin_lock_init(&pch->downl);
2189	rwlock_init(&pch->upl);
2190
2191	spin_lock_bh(&pn->all_channels_lock);
2192	pch->file.index = ++pn->last_channel_index;
2193	list_add(&pch->list, &pn->new_channels);
2194	atomic_inc(&channel_count);
2195	spin_unlock_bh(&pn->all_channels_lock);
2196
2197	return 0;
2198}
2199
2200/*
2201 * Return the index of a channel.
2202 */
2203int ppp_channel_index(struct ppp_channel *chan)
2204{
2205	struct channel *pch = chan->ppp;
2206
2207	if (pch)
2208		return pch->file.index;
2209	return -1;
2210}
2211
2212/*
2213 * Return the PPP unit number to which a channel is connected.
2214 */
2215int ppp_unit_number(struct ppp_channel *chan)
2216{
2217	struct channel *pch = chan->ppp;
2218	int unit = -1;
2219
2220	if (pch) {
2221		read_lock_bh(&pch->upl);
2222		if (pch->ppp)
2223			unit = pch->ppp->file.index;
2224		read_unlock_bh(&pch->upl);
2225	}
2226	return unit;
2227}
2228
2229/*
2230 * Return the PPP device interface name of a channel.
2231 */
2232char *ppp_dev_name(struct ppp_channel *chan)
2233{
2234	struct channel *pch = chan->ppp;
2235	char *name = NULL;
2236
2237	if (pch) {
2238		read_lock_bh(&pch->upl);
2239		if (pch->ppp && pch->ppp->dev)
2240			name = pch->ppp->dev->name;
2241		read_unlock_bh(&pch->upl);
2242	}
2243	return name;
2244}
2245
2246
2247/*
2248 * Disconnect a channel from the generic layer.
2249 * This must be called in process context.
2250 */
2251void
2252ppp_unregister_channel(struct ppp_channel *chan)
2253{
2254	struct channel *pch = chan->ppp;
2255	struct ppp_net *pn;
2256
2257	if (!pch)
2258		return;		/* should never happen */
2259
2260	chan->ppp = NULL;
2261
2262	/*
2263	 * This ensures that we have returned from any calls into the
2264	 * the channel's start_xmit or ioctl routine before we proceed.
2265	 */
2266	down_write(&pch->chan_sem);
2267	spin_lock_bh(&pch->downl);
2268	pch->chan = NULL;
2269	spin_unlock_bh(&pch->downl);
2270	up_write(&pch->chan_sem);
2271	ppp_disconnect_channel(pch);
2272
2273	pn = ppp_pernet(pch->chan_net);
2274	spin_lock_bh(&pn->all_channels_lock);
2275	list_del(&pch->list);
2276	spin_unlock_bh(&pn->all_channels_lock);
2277
2278	pch->file.dead = 1;
2279	wake_up_interruptible(&pch->file.rwait);
2280	if (atomic_dec_and_test(&pch->file.refcnt))
2281		ppp_destroy_channel(pch);
2282}
2283
2284/*
2285 * Callback from a channel when it can accept more to transmit.
2286 * This should be called at BH/softirq level, not interrupt level.
2287 */
2288void
2289ppp_output_wakeup(struct ppp_channel *chan)
2290{
2291	struct channel *pch = chan->ppp;
2292
2293	if (!pch)
2294		return;
2295	ppp_channel_push(pch);
2296}
2297
2298/*
2299 * Compression control.
2300 */
2301
2302/* Process the PPPIOCSCOMPRESS ioctl. */
2303static int
2304ppp_set_compress(struct ppp *ppp, unsigned long arg)
2305{
2306	int err;
2307	struct compressor *cp, *ocomp;
2308	struct ppp_option_data data;
2309	void *state, *ostate;
2310	unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
2311
2312	err = -EFAULT;
2313	if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
2314	    (data.length <= CCP_MAX_OPTION_LENGTH &&
2315	     copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
2316		goto out;
2317	err = -EINVAL;
2318	if (data.length > CCP_MAX_OPTION_LENGTH ||
2319	    ccp_option[1] < 2 || ccp_option[1] > data.length)
2320		goto out;
2321
2322	cp = try_then_request_module(
2323		find_compressor(ccp_option[0]),
2324		"ppp-compress-%d", ccp_option[0]);
2325	if (!cp)
2326		goto out;
2327
2328	err = -ENOBUFS;
2329	if (data.transmit) {
2330		state = cp->comp_alloc(ccp_option, data.length);
2331		if (state) {
2332			ppp_xmit_lock(ppp);
2333			ppp->xstate &= ~SC_COMP_RUN;
2334			ocomp = ppp->xcomp;
2335			ostate = ppp->xc_state;
2336			ppp->xcomp = cp;
2337			ppp->xc_state = state;
2338			ppp_xmit_unlock(ppp);
2339			if (ostate) {
2340				ocomp->comp_free(ostate);
2341				module_put(ocomp->owner);
2342			}
2343			err = 0;
2344		} else
2345			module_put(cp->owner);
2346
2347	} else {
2348		state = cp->decomp_alloc(ccp_option, data.length);
2349		if (state) {
2350			ppp_recv_lock(ppp);
2351			ppp->rstate &= ~SC_DECOMP_RUN;
2352			ocomp = ppp->rcomp;
2353			ostate = ppp->rc_state;
2354			ppp->rcomp = cp;
2355			ppp->rc_state = state;
2356			ppp_recv_unlock(ppp);
2357			if (ostate) {
2358				ocomp->decomp_free(ostate);
2359				module_put(ocomp->owner);
2360			}
2361			err = 0;
2362		} else
2363			module_put(cp->owner);
2364	}
2365
2366 out:
2367	return err;
2368}
2369
2370/*
2371 * Look at a CCP packet and update our state accordingly.
2372 * We assume the caller has the xmit or recv path locked.
2373 */
2374static void
2375ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
2376{
2377	unsigned char *dp;
2378	int len;
2379
2380	if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
2381		return;	/* no header */
2382	dp = skb->data + 2;
2383
2384	switch (CCP_CODE(dp)) {
2385	case CCP_CONFREQ:
2386
2387		/* A ConfReq starts negotiation of compression
2388		 * in one direction of transmission,
2389		 * and hence brings it down...but which way?
2390		 *
2391		 * Remember:
2392		 * A ConfReq indicates what the sender would like to receive
2393		 */
2394		if(inbound)
2395			/* He is proposing what I should send */
2396			ppp->xstate &= ~SC_COMP_RUN;
2397		else
2398			/* I am proposing to what he should send */
2399			ppp->rstate &= ~SC_DECOMP_RUN;
2400
2401		break;
2402
2403	case CCP_TERMREQ:
2404	case CCP_TERMACK:
2405		/*
2406		 * CCP is going down, both directions of transmission
2407		 */
2408		ppp->rstate &= ~SC_DECOMP_RUN;
2409		ppp->xstate &= ~SC_COMP_RUN;
2410		break;
2411
2412	case CCP_CONFACK:
2413		if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
2414			break;
2415		len = CCP_LENGTH(dp);
2416		if (!pskb_may_pull(skb, len + 2))
2417			return;		/* too short */
2418		dp += CCP_HDRLEN;
2419		len -= CCP_HDRLEN;
2420		if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
2421			break;
2422		if (inbound) {
2423			/* we will start receiving compressed packets */
2424			if (!ppp->rc_state)
2425				break;
2426			if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
2427					ppp->file.index, 0, ppp->mru, ppp->debug)) {
2428				ppp->rstate |= SC_DECOMP_RUN;
2429				ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
2430			}
2431		} else {
2432			/* we will soon start sending compressed packets */
2433			if (!ppp->xc_state)
2434				break;
2435			if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
2436					ppp->file.index, 0, ppp->debug))
2437				ppp->xstate |= SC_COMP_RUN;
2438		}
2439		break;
2440
2441	case CCP_RESETACK:
2442		/* reset the [de]compressor */
2443		if ((ppp->flags & SC_CCP_UP) == 0)
2444			break;
2445		if (inbound) {
2446			if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
2447				ppp->rcomp->decomp_reset(ppp->rc_state);
2448				ppp->rstate &= ~SC_DC_ERROR;
2449			}
2450		} else {
2451			if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
2452				ppp->xcomp->comp_reset(ppp->xc_state);
2453		}
2454		break;
2455	}
2456}
2457
2458/* Free up compression resources. */
2459static void
2460ppp_ccp_closed(struct ppp *ppp)
2461{
2462	void *xstate, *rstate;
2463	struct compressor *xcomp, *rcomp;
2464
2465	ppp_lock(ppp);
2466	ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
2467	ppp->xstate = 0;
2468	xcomp = ppp->xcomp;
2469	xstate = ppp->xc_state;
2470	ppp->xc_state = NULL;
2471	ppp->rstate = 0;
2472	rcomp = ppp->rcomp;
2473	rstate = ppp->rc_state;
2474	ppp->rc_state = NULL;
2475	ppp_unlock(ppp);
2476
2477	if (xstate) {
2478		xcomp->comp_free(xstate);
2479		module_put(xcomp->owner);
2480	}
2481	if (rstate) {
2482		rcomp->decomp_free(rstate);
2483		module_put(rcomp->owner);
2484	}
2485}
2486
2487/* List of compressors. */
2488static LIST_HEAD(compressor_list);
2489static DEFINE_SPINLOCK(compressor_list_lock);
2490
2491struct compressor_entry {
2492	struct list_head list;
2493	struct compressor *comp;
2494};
2495
2496static struct compressor_entry *
2497find_comp_entry(int proto)
2498{
2499	struct compressor_entry *ce;
2500
2501	list_for_each_entry(ce, &compressor_list, list) {
2502		if (ce->comp->compress_proto == proto)
2503			return ce;
2504	}
2505	return NULL;
2506}
2507
2508/* Register a compressor */
2509int
2510ppp_register_compressor(struct compressor *cp)
2511{
2512	struct compressor_entry *ce;
2513	int ret;
2514	spin_lock(&compressor_list_lock);
2515	ret = -EEXIST;
2516	if (find_comp_entry(cp->compress_proto))
2517		goto out;
2518	ret = -ENOMEM;
2519	ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
2520	if (!ce)
2521		goto out;
2522	ret = 0;
2523	ce->comp = cp;
2524	list_add(&ce->list, &compressor_list);
2525 out:
2526	spin_unlock(&compressor_list_lock);
2527	return ret;
2528}
2529
2530/* Unregister a compressor */
2531void
2532ppp_unregister_compressor(struct compressor *cp)
2533{
2534	struct compressor_entry *ce;
2535
2536	spin_lock(&compressor_list_lock);
2537	ce = find_comp_entry(cp->compress_proto);
2538	if (ce && ce->comp == cp) {
2539		list_del(&ce->list);
2540		kfree(ce);
2541	}
2542	spin_unlock(&compressor_list_lock);
2543}
2544
2545/* Find a compressor. */
2546static struct compressor *
2547find_compressor(int type)
2548{
2549	struct compressor_entry *ce;
2550	struct compressor *cp = NULL;
2551
2552	spin_lock(&compressor_list_lock);
2553	ce = find_comp_entry(type);
2554	if (ce) {
2555		cp = ce->comp;
2556		if (!try_module_get(cp->owner))
2557			cp = NULL;
2558	}
2559	spin_unlock(&compressor_list_lock);
2560	return cp;
2561}
2562
2563/*
2564 * Miscelleneous stuff.
2565 */
2566
2567static void
2568ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2569{
2570	struct slcompress *vj = ppp->vj;
2571
2572	memset(st, 0, sizeof(*st));
2573	st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
2574	st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
2575	st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
2576	st->p.ppp_opackets = ppp->dev->stats.tx_packets;
2577	st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
2578	st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
2579	if (!vj)
2580		return;
2581	st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
2582	st->vj.vjs_compressed = vj->sls_o_compressed;
2583	st->vj.vjs_searches = vj->sls_o_searches;
2584	st->vj.vjs_misses = vj->sls_o_misses;
2585	st->vj.vjs_errorin = vj->sls_i_error;
2586	st->vj.vjs_tossed = vj->sls_i_tossed;
2587	st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
2588	st->vj.vjs_compressedin = vj->sls_i_compressed;
2589}
2590
2591/*
2592 * Stuff for handling the lists of ppp units and channels
2593 * and for initialization.
2594 */
2595
2596/*
2597 * Create a new ppp interface unit.  Fails if it can't allocate memory
2598 * or if there is already a unit with the requested number.
2599 * unit == -1 means allocate a new number.
2600 */
2601static struct ppp *
2602ppp_create_interface(struct net *net, int unit, int *retp)
2603{
2604	struct ppp *ppp;
2605	struct ppp_net *pn;
2606	struct net_device *dev = NULL;
2607	int ret = -ENOMEM;
2608	int i;
2609
2610	dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
2611	if (!dev)
2612		goto out1;
2613
2614	pn = ppp_pernet(net);
2615
2616	ppp = netdev_priv(dev);
2617	ppp->dev = dev;
2618	ppp->mru = PPP_MRU;
2619	init_ppp_file(&ppp->file, INTERFACE);
2620	ppp->file.hdrlen = PPP_HDRLEN - 2;	/* don't count proto bytes */
2621	for (i = 0; i < NUM_NP; ++i)
2622		ppp->npmode[i] = NPMODE_PASS;
2623	INIT_LIST_HEAD(&ppp->channels);
2624	spin_lock_init(&ppp->rlock);
2625	spin_lock_init(&ppp->wlock);
2626#ifdef CONFIG_PPP_MULTILINK
2627	ppp->minseq = -1;
2628	skb_queue_head_init(&ppp->mrq);
2629#endif /* CONFIG_PPP_MULTILINK */
 
 
 
 
2630
2631	/*
2632	 * drum roll: don't forget to set
2633	 * the net device is belong to
2634	 */
2635	dev_net_set(dev, net);
2636
2637	mutex_lock(&pn->all_ppp_mutex);
2638
2639	if (unit < 0) {
2640		unit = unit_get(&pn->units_idr, ppp);
2641		if (unit < 0) {
2642			ret = unit;
2643			goto out2;
2644		}
2645	} else {
2646		ret = -EEXIST;
2647		if (unit_find(&pn->units_idr, unit))
2648			goto out2; /* unit already exists */
2649		/*
2650		 * if caller need a specified unit number
2651		 * lets try to satisfy him, otherwise --
2652		 * he should better ask us for new unit number
2653		 *
2654		 * NOTE: yes I know that returning EEXIST it's not
2655		 * fair but at least pppd will ask us to allocate
2656		 * new unit in this case so user is happy :)
2657		 */
2658		unit = unit_set(&pn->units_idr, ppp, unit);
2659		if (unit < 0)
2660			goto out2;
2661	}
2662
2663	/* Initialize the new ppp unit */
2664	ppp->file.index = unit;
2665	sprintf(dev->name, "ppp%d", unit);
2666
2667	ret = register_netdev(dev);
2668	if (ret != 0) {
2669		unit_put(&pn->units_idr, unit);
2670		netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2671			   dev->name, ret);
2672		goto out2;
2673	}
2674
2675	ppp->ppp_net = net;
2676
2677	atomic_inc(&ppp_unit_count);
2678	mutex_unlock(&pn->all_ppp_mutex);
2679
2680	*retp = 0;
2681	return ppp;
2682
2683out2:
2684	mutex_unlock(&pn->all_ppp_mutex);
2685	free_netdev(dev);
2686out1:
2687	*retp = ret;
2688	return NULL;
2689}
2690
2691/*
2692 * Initialize a ppp_file structure.
2693 */
2694static void
2695init_ppp_file(struct ppp_file *pf, int kind)
2696{
2697	pf->kind = kind;
2698	skb_queue_head_init(&pf->xq);
2699	skb_queue_head_init(&pf->rq);
2700	atomic_set(&pf->refcnt, 1);
2701	init_waitqueue_head(&pf->rwait);
2702}
2703
2704/*
2705 * Take down a ppp interface unit - called when the owning file
2706 * (the one that created the unit) is closed or detached.
2707 */
2708static void ppp_shutdown_interface(struct ppp *ppp)
2709{
2710	struct ppp_net *pn;
2711
2712	pn = ppp_pernet(ppp->ppp_net);
2713	mutex_lock(&pn->all_ppp_mutex);
2714
2715	/* This will call dev_close() for us. */
2716	ppp_lock(ppp);
2717	if (!ppp->closing) {
2718		ppp->closing = 1;
2719		ppp_unlock(ppp);
2720		unregister_netdev(ppp->dev);
2721		unit_put(&pn->units_idr, ppp->file.index);
2722	} else
2723		ppp_unlock(ppp);
2724
2725	ppp->file.dead = 1;
2726	ppp->owner = NULL;
2727	wake_up_interruptible(&ppp->file.rwait);
2728
2729	mutex_unlock(&pn->all_ppp_mutex);
2730}
2731
2732/*
2733 * Free the memory used by a ppp unit.  This is only called once
2734 * there are no channels connected to the unit and no file structs
2735 * that reference the unit.
2736 */
2737static void ppp_destroy_interface(struct ppp *ppp)
2738{
2739	atomic_dec(&ppp_unit_count);
2740
2741	if (!ppp->file.dead || ppp->n_channels) {
2742		/* "can't happen" */
2743		netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2744			   "but dead=%d n_channels=%d !\n",
2745			   ppp, ppp->file.dead, ppp->n_channels);
2746		return;
2747	}
2748
2749	ppp_ccp_closed(ppp);
2750	if (ppp->vj) {
2751		slhc_free(ppp->vj);
2752		ppp->vj = NULL;
2753	}
2754	skb_queue_purge(&ppp->file.xq);
2755	skb_queue_purge(&ppp->file.rq);
2756#ifdef CONFIG_PPP_MULTILINK
2757	skb_queue_purge(&ppp->mrq);
2758#endif /* CONFIG_PPP_MULTILINK */
2759#ifdef CONFIG_PPP_FILTER
2760	kfree(ppp->pass_filter);
2761	ppp->pass_filter = NULL;
2762	kfree(ppp->active_filter);
2763	ppp->active_filter = NULL;
 
 
 
 
 
2764#endif /* CONFIG_PPP_FILTER */
2765
2766	kfree_skb(ppp->xmit_pending);
2767
2768	free_netdev(ppp->dev);
2769}
2770
2771/*
2772 * Locate an existing ppp unit.
2773 * The caller should have locked the all_ppp_mutex.
2774 */
2775static struct ppp *
2776ppp_find_unit(struct ppp_net *pn, int unit)
2777{
2778	return unit_find(&pn->units_idr, unit);
2779}
2780
2781/*
2782 * Locate an existing ppp channel.
2783 * The caller should have locked the all_channels_lock.
2784 * First we look in the new_channels list, then in the
2785 * all_channels list.  If found in the new_channels list,
2786 * we move it to the all_channels list.  This is for speed
2787 * when we have a lot of channels in use.
2788 */
2789static struct channel *
2790ppp_find_channel(struct ppp_net *pn, int unit)
2791{
2792	struct channel *pch;
2793
2794	list_for_each_entry(pch, &pn->new_channels, list) {
2795		if (pch->file.index == unit) {
2796			list_move(&pch->list, &pn->all_channels);
2797			return pch;
2798		}
2799	}
2800
2801	list_for_each_entry(pch, &pn->all_channels, list) {
2802		if (pch->file.index == unit)
2803			return pch;
2804	}
2805
2806	return NULL;
2807}
2808
2809/*
2810 * Connect a PPP channel to a PPP interface unit.
2811 */
2812static int
2813ppp_connect_channel(struct channel *pch, int unit)
2814{
2815	struct ppp *ppp;
2816	struct ppp_net *pn;
2817	int ret = -ENXIO;
2818	int hdrlen;
2819
2820	pn = ppp_pernet(pch->chan_net);
2821
2822	mutex_lock(&pn->all_ppp_mutex);
2823	ppp = ppp_find_unit(pn, unit);
2824	if (!ppp)
2825		goto out;
2826	write_lock_bh(&pch->upl);
2827	ret = -EINVAL;
2828	if (pch->ppp)
2829		goto outl;
2830
2831	ppp_lock(ppp);
2832	if (pch->file.hdrlen > ppp->file.hdrlen)
2833		ppp->file.hdrlen = pch->file.hdrlen;
2834	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
2835	if (hdrlen > ppp->dev->hard_header_len)
2836		ppp->dev->hard_header_len = hdrlen;
2837	list_add_tail(&pch->clist, &ppp->channels);
2838	++ppp->n_channels;
2839	pch->ppp = ppp;
2840	atomic_inc(&ppp->file.refcnt);
2841	ppp_unlock(ppp);
2842	ret = 0;
2843
2844 outl:
2845	write_unlock_bh(&pch->upl);
2846 out:
2847	mutex_unlock(&pn->all_ppp_mutex);
2848	return ret;
2849}
2850
2851/*
2852 * Disconnect a channel from its ppp unit.
2853 */
2854static int
2855ppp_disconnect_channel(struct channel *pch)
2856{
2857	struct ppp *ppp;
2858	int err = -EINVAL;
2859
2860	write_lock_bh(&pch->upl);
2861	ppp = pch->ppp;
2862	pch->ppp = NULL;
2863	write_unlock_bh(&pch->upl);
2864	if (ppp) {
2865		/* remove it from the ppp unit's list */
2866		ppp_lock(ppp);
2867		list_del(&pch->clist);
2868		if (--ppp->n_channels == 0)
2869			wake_up_interruptible(&ppp->file.rwait);
2870		ppp_unlock(ppp);
2871		if (atomic_dec_and_test(&ppp->file.refcnt))
2872			ppp_destroy_interface(ppp);
2873		err = 0;
2874	}
2875	return err;
2876}
2877
2878/*
2879 * Free up the resources used by a ppp channel.
2880 */
2881static void ppp_destroy_channel(struct channel *pch)
2882{
2883	atomic_dec(&channel_count);
2884
2885	if (!pch->file.dead) {
2886		/* "can't happen" */
2887		pr_err("ppp: destroying undead channel %p !\n", pch);
2888		return;
2889	}
2890	skb_queue_purge(&pch->file.xq);
2891	skb_queue_purge(&pch->file.rq);
2892	kfree(pch);
2893}
2894
2895static void __exit ppp_cleanup(void)
2896{
2897	/* should never happen */
2898	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2899		pr_err("PPP: removing module but units remain!\n");
2900	unregister_chrdev(PPP_MAJOR, "ppp");
2901	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2902	class_destroy(ppp_class);
2903	unregister_pernet_device(&ppp_net_ops);
2904}
2905
2906/*
2907 * Units handling. Caller must protect concurrent access
2908 * by holding all_ppp_mutex
2909 */
2910
2911static int __unit_alloc(struct idr *p, void *ptr, int n)
2912{
2913	int unit, err;
2914
2915again:
2916	if (!idr_pre_get(p, GFP_KERNEL)) {
2917		pr_err("PPP: No free memory for idr\n");
2918		return -ENOMEM;
2919	}
2920
2921	err = idr_get_new_above(p, ptr, n, &unit);
2922	if (err < 0) {
2923		if (err == -EAGAIN)
2924			goto again;
2925		return err;
2926	}
2927
2928	return unit;
2929}
2930
2931/* associate pointer with specified number */
2932static int unit_set(struct idr *p, void *ptr, int n)
2933{
2934	int unit;
2935
2936	unit = __unit_alloc(p, ptr, n);
2937	if (unit < 0)
2938		return unit;
2939	else if (unit != n) {
2940		idr_remove(p, unit);
2941		return -EINVAL;
2942	}
2943
2944	return unit;
2945}
2946
2947/* get new free unit number and associate pointer with it */
2948static int unit_get(struct idr *p, void *ptr)
2949{
2950	return __unit_alloc(p, ptr, 0);
2951}
2952
2953/* put unit number back to a pool */
2954static void unit_put(struct idr *p, int n)
2955{
2956	idr_remove(p, n);
2957}
2958
2959/* get pointer associated with the number */
2960static void *unit_find(struct idr *p, int n)
2961{
2962	return idr_find(p, n);
2963}
2964
2965/* Module/initialization stuff */
2966
2967module_init(ppp_init);
2968module_exit(ppp_cleanup);
2969
2970EXPORT_SYMBOL(ppp_register_net_channel);
2971EXPORT_SYMBOL(ppp_register_channel);
2972EXPORT_SYMBOL(ppp_unregister_channel);
2973EXPORT_SYMBOL(ppp_channel_index);
2974EXPORT_SYMBOL(ppp_unit_number);
2975EXPORT_SYMBOL(ppp_dev_name);
2976EXPORT_SYMBOL(ppp_input);
2977EXPORT_SYMBOL(ppp_input_error);
2978EXPORT_SYMBOL(ppp_output_wakeup);
2979EXPORT_SYMBOL(ppp_register_compressor);
2980EXPORT_SYMBOL(ppp_unregister_compressor);
2981MODULE_LICENSE("GPL");
2982MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
2983MODULE_ALIAS("devname:ppp");