Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * slip.c	This module implements the SLIP protocol for kernel-based
   4 *		devices like TTY.  It interfaces between a raw TTY, and the
   5 *		kernel's INET protocol layers.
   6 *
   7 * Version:	@(#)slip.c	0.8.3	12/24/94
   8 *
   9 * Authors:	Laurence Culhane, <loz@holmes.demon.co.uk>
  10 *		Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
  11 *
  12 * Fixes:
  13 *		Alan Cox	: 	Sanity checks and avoid tx overruns.
  14 *					Has a new sl->mtu field.
  15 *		Alan Cox	: 	Found cause of overrun. ifconfig sl0
  16 *					mtu upwards. Driver now spots this
  17 *					and grows/shrinks its buffers(hack!).
  18 *					Memory leak if you run out of memory
  19 *					setting up a slip driver fixed.
  20 *		Matt Dillon	:	Printable slip (borrowed from NET2E)
  21 *	Pauline Middelink	:	Slip driver fixes.
  22 *		Alan Cox	:	Honours the old SL_COMPRESSED flag
  23 *		Alan Cox	:	KISS AX.25 and AXUI IP support
  24 *		Michael Riepe	:	Automatic CSLIP recognition added
  25 *		Charles Hedrick :	CSLIP header length problem fix.
  26 *		Alan Cox	:	Corrected non-IP cases of the above.
  27 *		Alan Cox	:	Now uses hardware type as per FvK.
  28 *		Alan Cox	:	Default to 192.168.0.0 (RFC 1597)
  29 *		A.N.Kuznetsov	:	dev_tint() recursion fix.
  30 *	Dmitry Gorodchanin	:	SLIP memory leaks
  31 *      Dmitry Gorodchanin      :       Code cleanup. Reduce tty driver
  32 *                                      buffering from 4096 to 256 bytes.
  33 *                                      Improving SLIP response time.
  34 *                                      CONFIG_SLIP_MODE_SLIP6.
  35 *                                      ifconfig sl? up & down now works
  36 *					correctly.
  37 *					Modularization.
  38 *              Alan Cox        :       Oops - fix AX.25 buffer lengths
  39 *      Dmitry Gorodchanin      :       Even more cleanups. Preserve CSLIP
  40 *                                      statistics. Include CSLIP code only
  41 *                                      if it really needed.
  42 *		Alan Cox	:	Free slhc buffers in the right place.
  43 *		Alan Cox	:	Allow for digipeated IP over AX.25
  44 *		Matti Aarnio	:	Dynamic SLIP devices, with ideas taken
  45 *					from Jim Freeman's <jfree@caldera.com>
  46 *					dynamic PPP devices.  We do NOT kfree()
  47 *					device entries, just reg./unreg. them
  48 *					as they are needed.  We kfree() them
  49 *					at module cleanup.
  50 *					With MODULE-loading ``insmod'', user
  51 *					can issue parameter:  slip_maxdev=1024
  52 *					(Or how much he/she wants.. Default
  53 *					is 256)
  54 *	Stanislav Voronyi	:	Slip line checking, with ideas taken
  55 *					from multislip BSDI driver which was
  56 *					written by Igor Chechik, RELCOM Corp.
  57 *					Only algorithms have been ported to
  58 *					Linux SLIP driver.
  59 *	Vitaly E. Lavrov	:	Sane behaviour on tty hangup.
  60 *	Alexey Kuznetsov	:	Cleanup interfaces to tty & netdevice
  61 *					modules.
  62 */
  63
  64#define SL_CHECK_TRANSMIT
  65#include <linux/compat.h>
  66#include <linux/module.h>
  67#include <linux/moduleparam.h>
  68
  69#include <linux/uaccess.h>
  70#include <linux/bitops.h>
  71#include <linux/sched/signal.h>
  72#include <linux/string.h>
  73#include <linux/mm.h>
  74#include <linux/interrupt.h>
  75#include <linux/in.h>
  76#include <linux/tty.h>
  77#include <linux/errno.h>
  78#include <linux/netdevice.h>
  79#include <linux/etherdevice.h>
  80#include <linux/skbuff.h>
  81#include <linux/rtnetlink.h>
  82#include <linux/if_arp.h>
  83#include <linux/if_slip.h>
  84#include <linux/delay.h>
  85#include <linux/init.h>
  86#include <linux/slab.h>
  87#include <linux/workqueue.h>
  88#include "slip.h"
  89#ifdef CONFIG_INET
  90#include <linux/ip.h>
  91#include <linux/tcp.h>
  92#include <net/slhc_vj.h>
  93#endif
  94
  95#define SLIP_VERSION	"0.8.4-NET3.019-NEWTTY"
  96
  97static struct net_device **slip_devs;
  98
  99static int slip_maxdev = SL_NRUNIT;
 100module_param(slip_maxdev, int, 0);
 101MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices");
 102
 103static int slip_esc(unsigned char *p, unsigned char *d, int len);
 104static void slip_unesc(struct slip *sl, unsigned char c);
 105#ifdef CONFIG_SLIP_MODE_SLIP6
 106static int slip_esc6(unsigned char *p, unsigned char *d, int len);
 107static void slip_unesc6(struct slip *sl, unsigned char c);
 108#endif
 109#ifdef CONFIG_SLIP_SMART
 110static void sl_keepalive(struct timer_list *t);
 111static void sl_outfill(struct timer_list *t);
 112static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd);
 113#endif
 114
 115/********************************
 116*  Buffer administration routines:
 117*	sl_alloc_bufs()
 118*	sl_free_bufs()
 119*	sl_realloc_bufs()
 120*
 121* NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because
 122*	sl_realloc_bufs provides strong atomicity and reallocation
 123*	on actively running device.
 124*********************************/
 125
 126/*
 127   Allocate channel buffers.
 128 */
 129
 130static int sl_alloc_bufs(struct slip *sl, int mtu)
 131{
 132	int err = -ENOBUFS;
 133	unsigned long len;
 134	char *rbuff = NULL;
 135	char *xbuff = NULL;
 136#ifdef SL_INCLUDE_CSLIP
 137	char *cbuff = NULL;
 138	struct slcompress *slcomp = NULL;
 139#endif
 140
 141	/*
 142	 * Allocate the SLIP frame buffers:
 143	 *
 144	 * rbuff	Receive buffer.
 145	 * xbuff	Transmit buffer.
 146	 * cbuff        Temporary compression buffer.
 147	 */
 148	len = mtu * 2;
 149
 150	/*
 151	 * allow for arrival of larger UDP packets, even if we say not to
 152	 * also fixes a bug in which SunOS sends 512-byte packets even with
 153	 * an MSS of 128
 154	 */
 155	if (len < 576 * 2)
 156		len = 576 * 2;
 157	rbuff = kmalloc(len + 4, GFP_KERNEL);
 158	if (rbuff == NULL)
 159		goto err_exit;
 160	xbuff = kmalloc(len + 4, GFP_KERNEL);
 161	if (xbuff == NULL)
 162		goto err_exit;
 163#ifdef SL_INCLUDE_CSLIP
 164	cbuff = kmalloc(len + 4, GFP_KERNEL);
 165	if (cbuff == NULL)
 166		goto err_exit;
 167	slcomp = slhc_init(16, 16);
 168	if (IS_ERR(slcomp))
 169		goto err_exit;
 170#endif
 171	spin_lock_bh(&sl->lock);
 172	if (sl->tty == NULL) {
 173		spin_unlock_bh(&sl->lock);
 174		err = -ENODEV;
 175		goto err_exit;
 176	}
 177	sl->mtu	     = mtu;
 178	sl->buffsize = len;
 179	sl->rcount   = 0;
 180	sl->xleft    = 0;
 181	rbuff = xchg(&sl->rbuff, rbuff);
 182	xbuff = xchg(&sl->xbuff, xbuff);
 183#ifdef SL_INCLUDE_CSLIP
 184	cbuff = xchg(&sl->cbuff, cbuff);
 185	slcomp = xchg(&sl->slcomp, slcomp);
 186#endif
 187#ifdef CONFIG_SLIP_MODE_SLIP6
 188	sl->xdata    = 0;
 189	sl->xbits    = 0;
 190#endif
 191	spin_unlock_bh(&sl->lock);
 192	err = 0;
 193
 194	/* Cleanup */
 195err_exit:
 196#ifdef SL_INCLUDE_CSLIP
 197	kfree(cbuff);
 198	slhc_free(slcomp);
 199#endif
 200	kfree(xbuff);
 201	kfree(rbuff);
 202	return err;
 203}
 204
 205/* Free a SLIP channel buffers. */
 206static void sl_free_bufs(struct slip *sl)
 207{
 208	/* Free all SLIP frame buffers. */
 209	kfree(xchg(&sl->rbuff, NULL));
 210	kfree(xchg(&sl->xbuff, NULL));
 211#ifdef SL_INCLUDE_CSLIP
 212	kfree(xchg(&sl->cbuff, NULL));
 213	slhc_free(xchg(&sl->slcomp, NULL));
 214#endif
 215}
 216
 217/*
 218   Reallocate slip channel buffers.
 219 */
 220
 221static int sl_realloc_bufs(struct slip *sl, int mtu)
 222{
 223	int err = 0;
 224	struct net_device *dev = sl->dev;
 225	unsigned char *xbuff, *rbuff;
 226#ifdef SL_INCLUDE_CSLIP
 227	unsigned char *cbuff;
 228#endif
 229	int len = mtu * 2;
 230
 231/*
 232 * allow for arrival of larger UDP packets, even if we say not to
 233 * also fixes a bug in which SunOS sends 512-byte packets even with
 234 * an MSS of 128
 235 */
 236	if (len < 576 * 2)
 237		len = 576 * 2;
 238
 239	xbuff = kmalloc(len + 4, GFP_ATOMIC);
 240	rbuff = kmalloc(len + 4, GFP_ATOMIC);
 241#ifdef SL_INCLUDE_CSLIP
 242	cbuff = kmalloc(len + 4, GFP_ATOMIC);
 243#endif
 244
 245
 246#ifdef SL_INCLUDE_CSLIP
 247	if (xbuff == NULL || rbuff == NULL || cbuff == NULL)  {
 248#else
 249	if (xbuff == NULL || rbuff == NULL)  {
 250#endif
 251		if (mtu > sl->mtu) {
 252			printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n",
 253			       dev->name);
 254			err = -ENOBUFS;
 255		}
 256		goto done;
 257	}
 258	spin_lock_bh(&sl->lock);
 259
 260	err = -ENODEV;
 261	if (sl->tty == NULL)
 262		goto done_on_bh;
 263
 264	xbuff    = xchg(&sl->xbuff, xbuff);
 265	rbuff    = xchg(&sl->rbuff, rbuff);
 266#ifdef SL_INCLUDE_CSLIP
 267	cbuff    = xchg(&sl->cbuff, cbuff);
 268#endif
 269	if (sl->xleft)  {
 270		if (sl->xleft <= len)  {
 271			memcpy(sl->xbuff, sl->xhead, sl->xleft);
 272		} else  {
 273			sl->xleft = 0;
 274			dev->stats.tx_dropped++;
 275		}
 276	}
 277	sl->xhead = sl->xbuff;
 278
 279	if (sl->rcount)  {
 280		if (sl->rcount <= len) {
 281			memcpy(sl->rbuff, rbuff, sl->rcount);
 282		} else  {
 283			sl->rcount = 0;
 284			dev->stats.rx_over_errors++;
 285			set_bit(SLF_ERROR, &sl->flags);
 286		}
 287	}
 288	sl->mtu      = mtu;
 289	dev->mtu      = mtu;
 290	sl->buffsize = len;
 291	err = 0;
 292
 293done_on_bh:
 294	spin_unlock_bh(&sl->lock);
 295
 296done:
 297	kfree(xbuff);
 298	kfree(rbuff);
 299#ifdef SL_INCLUDE_CSLIP
 300	kfree(cbuff);
 301#endif
 302	return err;
 303}
 304
 305
 306/* Set the "sending" flag.  This must be atomic hence the set_bit. */
 307static inline void sl_lock(struct slip *sl)
 308{
 309	netif_stop_queue(sl->dev);
 310}
 311
 312
 313/* Clear the "sending" flag.  This must be atomic, hence the ASM. */
 314static inline void sl_unlock(struct slip *sl)
 315{
 316	netif_wake_queue(sl->dev);
 317}
 318
 319/* Send one completely decapsulated IP datagram to the IP layer. */
 320static void sl_bump(struct slip *sl)
 321{
 322	struct net_device *dev = sl->dev;
 323	struct sk_buff *skb;
 324	int count;
 325
 326	count = sl->rcount;
 327#ifdef SL_INCLUDE_CSLIP
 328	if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) {
 329		unsigned char c = sl->rbuff[0];
 330		if (c & SL_TYPE_COMPRESSED_TCP) {
 331			/* ignore compressed packets when CSLIP is off */
 332			if (!(sl->mode & SL_MODE_CSLIP)) {
 333				printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
 334				return;
 335			}
 336			/* make sure we've reserved enough space for uncompress
 337			   to use */
 338			if (count + 80 > sl->buffsize) {
 339				dev->stats.rx_over_errors++;
 340				return;
 341			}
 342			count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
 343			if (count <= 0)
 344				return;
 345		} else if (c >= SL_TYPE_UNCOMPRESSED_TCP) {
 346			if (!(sl->mode & SL_MODE_CSLIP)) {
 347				/* turn on header compression */
 348				sl->mode |= SL_MODE_CSLIP;
 349				sl->mode &= ~SL_MODE_ADAPTIVE;
 350				printk(KERN_INFO "%s: header compression turned on\n", dev->name);
 351			}
 352			sl->rbuff[0] &= 0x4f;
 353			if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
 354				return;
 355		}
 356	}
 357#endif  /* SL_INCLUDE_CSLIP */
 358
 359	dev->stats.rx_bytes += count;
 360
 361	skb = dev_alloc_skb(count);
 362	if (skb == NULL) {
 363		printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
 364		dev->stats.rx_dropped++;
 365		return;
 366	}
 367	skb->dev = dev;
 368	skb_put_data(skb, sl->rbuff, count);
 369	skb_reset_mac_header(skb);
 370	skb->protocol = htons(ETH_P_IP);
 371	netif_rx(skb);
 372	dev->stats.rx_packets++;
 373}
 374
 375/* Encapsulate one IP datagram and stuff into a TTY queue. */
 376static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
 377{
 378	unsigned char *p;
 379	int actual, count;
 380
 381	if (len > sl->mtu) {		/* Sigh, shouldn't occur BUT ... */
 382		printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
 383		sl->dev->stats.tx_dropped++;
 384		sl_unlock(sl);
 385		return;
 386	}
 387
 388	p = icp;
 389#ifdef SL_INCLUDE_CSLIP
 390	if (sl->mode & SL_MODE_CSLIP)
 391		len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1);
 392#endif
 393#ifdef CONFIG_SLIP_MODE_SLIP6
 394	if (sl->mode & SL_MODE_SLIP6)
 395		count = slip_esc6(p, sl->xbuff, len);
 396	else
 397#endif
 398		count = slip_esc(p, sl->xbuff, len);
 399
 400	/* Order of next two lines is *very* important.
 401	 * When we are sending a little amount of data,
 402	 * the transfer may be completed inside the ops->write()
 403	 * routine, because it's running with interrupts enabled.
 404	 * In this case we *never* got WRITE_WAKEUP event,
 405	 * if we did not request it before write operation.
 406	 *       14 Oct 1994  Dmitry Gorodchanin.
 407	 */
 408	set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 409	actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
 410#ifdef SL_CHECK_TRANSMIT
 411	netif_trans_update(sl->dev);
 412#endif
 413	sl->xleft = count - actual;
 414	sl->xhead = sl->xbuff + actual;
 415#ifdef CONFIG_SLIP_SMART
 416	/* VSV */
 417	clear_bit(SLF_OUTWAIT, &sl->flags);	/* reset outfill flag */
 418#endif
 419}
 420
 421/* Write out any remaining transmit buffer. Scheduled when tty is writable */
 422static void slip_transmit(struct work_struct *work)
 423{
 424	struct slip *sl = container_of(work, struct slip, tx_work);
 425	int actual;
 426
 427	spin_lock_bh(&sl->lock);
 428	/* First make sure we're connected. */
 429	if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
 430		spin_unlock_bh(&sl->lock);
 431		return;
 432	}
 433
 434	if (sl->xleft <= 0)  {
 435		/* Now serial buffer is almost free & we can start
 436		 * transmission of another packet */
 437		sl->dev->stats.tx_packets++;
 438		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 439		spin_unlock_bh(&sl->lock);
 440		sl_unlock(sl);
 441		return;
 442	}
 443
 444	actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
 445	sl->xleft -= actual;
 446	sl->xhead += actual;
 447	spin_unlock_bh(&sl->lock);
 448}
 449
 450/*
 451 * Called by the driver when there's room for more data.
 452 * Schedule the transmit.
 453 */
 454static void slip_write_wakeup(struct tty_struct *tty)
 455{
 456	struct slip *sl;
 457
 458	rcu_read_lock();
 459	sl = rcu_dereference(tty->disc_data);
 460	if (sl)
 461		schedule_work(&sl->tx_work);
 462	rcu_read_unlock();
 463}
 464
 465static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
 466{
 467	struct slip *sl = netdev_priv(dev);
 468
 469	spin_lock(&sl->lock);
 470
 471	if (netif_queue_stopped(dev)) {
 472		if (!netif_running(dev) || !sl->tty)
 473			goto out;
 474
 475		/* May be we must check transmitter timeout here ?
 476		 *      14 Oct 1994 Dmitry Gorodchanin.
 477		 */
 478#ifdef SL_CHECK_TRANSMIT
 479		if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ))  {
 480			/* 20 sec timeout not reached */
 481			goto out;
 482		}
 483		printk(KERN_WARNING "%s: transmit timed out, %s?\n",
 484			dev->name,
 485			(tty_chars_in_buffer(sl->tty) || sl->xleft) ?
 486				"bad line quality" : "driver error");
 487		sl->xleft = 0;
 488		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 489		sl_unlock(sl);
 490#endif
 491	}
 492out:
 493	spin_unlock(&sl->lock);
 494}
 495
 496
 497/* Encapsulate an IP datagram and kick it into a TTY queue. */
 498static netdev_tx_t
 499sl_xmit(struct sk_buff *skb, struct net_device *dev)
 500{
 501	struct slip *sl = netdev_priv(dev);
 502
 503	spin_lock(&sl->lock);
 504	if (!netif_running(dev)) {
 505		spin_unlock(&sl->lock);
 506		printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name);
 507		dev_kfree_skb(skb);
 508		return NETDEV_TX_OK;
 509	}
 510	if (sl->tty == NULL) {
 511		spin_unlock(&sl->lock);
 512		dev_kfree_skb(skb);
 513		return NETDEV_TX_OK;
 514	}
 515
 516	sl_lock(sl);
 517	dev->stats.tx_bytes += skb->len;
 518	sl_encaps(sl, skb->data, skb->len);
 519	spin_unlock(&sl->lock);
 520
 521	dev_kfree_skb(skb);
 522	return NETDEV_TX_OK;
 523}
 524
 525
 526/******************************************
 527 *   Routines looking at netdevice side.
 528 ******************************************/
 529
 530/* Netdevice UP -> DOWN routine */
 531
 532static int
 533sl_close(struct net_device *dev)
 534{
 535	struct slip *sl = netdev_priv(dev);
 536
 537	spin_lock_bh(&sl->lock);
 538	if (sl->tty)
 539		/* TTY discipline is running. */
 540		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 541	netif_stop_queue(dev);
 542	sl->rcount   = 0;
 543	sl->xleft    = 0;
 544	spin_unlock_bh(&sl->lock);
 545
 546	return 0;
 547}
 548
 549/* Netdevice DOWN -> UP routine */
 550
 551static int sl_open(struct net_device *dev)
 552{
 553	struct slip *sl = netdev_priv(dev);
 554
 555	if (sl->tty == NULL)
 556		return -ENODEV;
 557
 558	sl->flags &= (1 << SLF_INUSE);
 559	netif_start_queue(dev);
 560	return 0;
 561}
 562
 563/* Netdevice change MTU request */
 564
 565static int sl_change_mtu(struct net_device *dev, int new_mtu)
 566{
 567	struct slip *sl = netdev_priv(dev);
 568
 569	return sl_realloc_bufs(sl, new_mtu);
 570}
 571
 572/* Netdevice get statistics request */
 573
 574static void
 575sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 576{
 577	struct net_device_stats *devstats = &dev->stats;
 578#ifdef SL_INCLUDE_CSLIP
 579	struct slip *sl = netdev_priv(dev);
 580	struct slcompress *comp = sl->slcomp;
 581#endif
 582	stats->rx_packets     = devstats->rx_packets;
 583	stats->tx_packets     = devstats->tx_packets;
 584	stats->rx_bytes       = devstats->rx_bytes;
 585	stats->tx_bytes       = devstats->tx_bytes;
 586	stats->rx_dropped     = devstats->rx_dropped;
 587	stats->tx_dropped     = devstats->tx_dropped;
 588	stats->tx_errors      = devstats->tx_errors;
 589	stats->rx_errors      = devstats->rx_errors;
 590	stats->rx_over_errors = devstats->rx_over_errors;
 591
 592#ifdef SL_INCLUDE_CSLIP
 593	if (comp) {
 594		/* Generic compressed statistics */
 595		stats->rx_compressed   = comp->sls_i_compressed;
 596		stats->tx_compressed   = comp->sls_o_compressed;
 597
 598		/* Are we really still needs this? */
 599		stats->rx_fifo_errors += comp->sls_i_compressed;
 600		stats->rx_dropped     += comp->sls_i_tossed;
 601		stats->tx_fifo_errors += comp->sls_o_compressed;
 602		stats->collisions     += comp->sls_o_misses;
 603	}
 604#endif
 605}
 606
 607/* Netdevice register callback */
 608
 609static int sl_init(struct net_device *dev)
 610{
 611	struct slip *sl = netdev_priv(dev);
 612
 613	/*
 614	 *	Finish setting up the DEVICE info.
 615	 */
 616
 617	dev->mtu		= sl->mtu;
 618	dev->type		= ARPHRD_SLIP + sl->mode;
 619#ifdef SL_CHECK_TRANSMIT
 620	dev->watchdog_timeo	= 20*HZ;
 621#endif
 622	return 0;
 623}
 624
 625
 626static void sl_uninit(struct net_device *dev)
 627{
 628	struct slip *sl = netdev_priv(dev);
 629
 630	sl_free_bufs(sl);
 631}
 632
 633/* Hook the destructor so we can free slip devices at the right point in time */
 634static void sl_free_netdev(struct net_device *dev)
 635{
 636	int i = dev->base_addr;
 637
 638	slip_devs[i] = NULL;
 639}
 640
 641static const struct net_device_ops sl_netdev_ops = {
 642	.ndo_init		= sl_init,
 643	.ndo_uninit	  	= sl_uninit,
 644	.ndo_open		= sl_open,
 645	.ndo_stop		= sl_close,
 646	.ndo_start_xmit		= sl_xmit,
 647	.ndo_get_stats64        = sl_get_stats64,
 648	.ndo_change_mtu		= sl_change_mtu,
 649	.ndo_tx_timeout		= sl_tx_timeout,
 650#ifdef CONFIG_SLIP_SMART
 651	.ndo_siocdevprivate	= sl_siocdevprivate,
 652#endif
 653};
 654
 655
 656static void sl_setup(struct net_device *dev)
 657{
 658	dev->netdev_ops		= &sl_netdev_ops;
 659	dev->needs_free_netdev	= true;
 660	dev->priv_destructor	= sl_free_netdev;
 661
 662	dev->hard_header_len	= 0;
 663	dev->addr_len		= 0;
 664	dev->tx_queue_len	= 10;
 665
 666	/* MTU range: 68 - 65534 */
 667	dev->min_mtu = 68;
 668	dev->max_mtu = 65534;
 669
 670	/* New-style flags. */
 671	dev->flags		= IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST;
 672}
 673
 674/******************************************
 675  Routines looking at TTY side.
 676 ******************************************/
 677
 678
 679/*
 680 * Handle the 'receiver data ready' interrupt.
 681 * This function is called by the 'tty_io' module in the kernel when
 682 * a block of SLIP data has been received, which can now be decapsulated
 683 * and sent on to some IP layer for further processing. This will not
 684 * be re-entered while running but other ldisc functions may be called
 685 * in parallel
 686 */
 687
 688static void slip_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
 689			     size_t count)
 690{
 691	struct slip *sl = tty->disc_data;
 692
 693	if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
 694		return;
 695
 696	/* Read the characters out of the buffer */
 697	while (count--) {
 698		if (fp && *fp++) {
 699			if (!test_and_set_bit(SLF_ERROR, &sl->flags))
 700				sl->dev->stats.rx_errors++;
 701			cp++;
 702			continue;
 703		}
 704#ifdef CONFIG_SLIP_MODE_SLIP6
 705		if (sl->mode & SL_MODE_SLIP6)
 706			slip_unesc6(sl, *cp++);
 707		else
 708#endif
 709			slip_unesc(sl, *cp++);
 710	}
 711}
 712
 713/************************************
 714 *  slip_open helper routines.
 715 ************************************/
 716
 717/* Collect hanged up channels */
 718static void sl_sync(void)
 719{
 720	int i;
 721	struct net_device *dev;
 722	struct slip	  *sl;
 723
 724	for (i = 0; i < slip_maxdev; i++) {
 725		dev = slip_devs[i];
 726		if (dev == NULL)
 727			break;
 728
 729		sl = netdev_priv(dev);
 730		if (sl->tty || sl->leased)
 731			continue;
 732		if (dev->flags & IFF_UP)
 733			dev_close(dev);
 734	}
 735}
 736
 737
 738/* Find a free SLIP channel, and link in this `tty' line. */
 739static struct slip *sl_alloc(void)
 740{
 741	int i;
 742	char name[IFNAMSIZ];
 743	struct net_device *dev = NULL;
 744	struct slip       *sl;
 745
 746	for (i = 0; i < slip_maxdev; i++) {
 747		dev = slip_devs[i];
 748		if (dev == NULL)
 749			break;
 750	}
 751	/* Sorry, too many, all slots in use */
 752	if (i >= slip_maxdev)
 753		return NULL;
 754
 755	sprintf(name, "sl%d", i);
 756	dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup);
 757	if (!dev)
 758		return NULL;
 759
 760	dev->base_addr  = i;
 761	sl = netdev_priv(dev);
 762
 763	/* Initialize channel control data */
 764	sl->magic       = SLIP_MAGIC;
 765	sl->dev	      	= dev;
 766	spin_lock_init(&sl->lock);
 767	INIT_WORK(&sl->tx_work, slip_transmit);
 768	sl->mode        = SL_MODE_DEFAULT;
 769#ifdef CONFIG_SLIP_SMART
 770	/* initialize timer_list struct */
 771	timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
 772	timer_setup(&sl->outfill_timer, sl_outfill, 0);
 773#endif
 774	slip_devs[i] = dev;
 775	return sl;
 776}
 777
 778/*
 779 * Open the high-level part of the SLIP channel.
 780 * This function is called by the TTY module when the
 781 * SLIP line discipline is called for.  Because we are
 782 * sure the tty line exists, we only have to link it to
 783 * a free SLIP channel...
 784 *
 785 * Called in process context serialized from other ldisc calls.
 786 */
 787
 788static int slip_open(struct tty_struct *tty)
 789{
 790	struct slip *sl;
 791	int err;
 792
 793	if (!capable(CAP_NET_ADMIN))
 794		return -EPERM;
 795
 796	if (tty->ops->write == NULL)
 797		return -EOPNOTSUPP;
 798
 799	/* RTnetlink lock is misused here to serialize concurrent
 800	   opens of slip channels. There are better ways, but it is
 801	   the simplest one.
 802	 */
 803	rtnl_lock();
 804
 805	/* Collect hanged up channels. */
 806	sl_sync();
 807
 808	sl = tty->disc_data;
 809
 810	err = -EEXIST;
 811	/* First make sure we're not already connected. */
 812	if (sl && sl->magic == SLIP_MAGIC)
 813		goto err_exit;
 814
 815	/* OK.  Find a free SLIP channel to use. */
 816	err = -ENFILE;
 817	sl = sl_alloc();
 818	if (sl == NULL)
 819		goto err_exit;
 820
 821	sl->tty = tty;
 822	tty->disc_data = sl;
 823	sl->pid = current->pid;
 824
 825	if (!test_bit(SLF_INUSE, &sl->flags)) {
 826		/* Perform the low-level SLIP initialization. */
 827		err = sl_alloc_bufs(sl, SL_MTU);
 828		if (err)
 829			goto err_free_chan;
 830
 831		set_bit(SLF_INUSE, &sl->flags);
 832
 833		err = register_netdevice(sl->dev);
 834		if (err)
 835			goto err_free_bufs;
 836	}
 837
 838#ifdef CONFIG_SLIP_SMART
 839	if (sl->keepalive) {
 840		sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ;
 841		add_timer(&sl->keepalive_timer);
 842	}
 843	if (sl->outfill) {
 844		sl->outfill_timer.expires = jiffies + sl->outfill * HZ;
 845		add_timer(&sl->outfill_timer);
 846	}
 847#endif
 848
 849	/* Done.  We have linked the TTY line to a channel. */
 850	rtnl_unlock();
 851	tty->receive_room = 65536;	/* We don't flow control */
 852
 853	/* TTY layer expects 0 on success */
 854	return 0;
 855
 856err_free_bufs:
 857	sl_free_bufs(sl);
 858
 859err_free_chan:
 860	sl->tty = NULL;
 861	tty->disc_data = NULL;
 862	clear_bit(SLF_INUSE, &sl->flags);
 863	sl_free_netdev(sl->dev);
 864	/* do not call free_netdev before rtnl_unlock */
 865	rtnl_unlock();
 866	free_netdev(sl->dev);
 867	return err;
 868
 869err_exit:
 870	rtnl_unlock();
 871
 872	/* Count references from TTY module */
 873	return err;
 874}
 875
 876/*
 877 * Close down a SLIP channel.
 878 * This means flushing out any pending queues, and then returning. This
 879 * call is serialized against other ldisc functions.
 880 *
 881 * We also use this method fo a hangup event
 882 */
 883
 884static void slip_close(struct tty_struct *tty)
 885{
 886	struct slip *sl = tty->disc_data;
 887
 888	/* First make sure we're connected. */
 889	if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
 890		return;
 891
 892	spin_lock_bh(&sl->lock);
 893	rcu_assign_pointer(tty->disc_data, NULL);
 894	sl->tty = NULL;
 895	spin_unlock_bh(&sl->lock);
 896
 897	synchronize_rcu();
 898	flush_work(&sl->tx_work);
 899
 900	/* VSV = very important to remove timers */
 901#ifdef CONFIG_SLIP_SMART
 902	del_timer_sync(&sl->keepalive_timer);
 903	del_timer_sync(&sl->outfill_timer);
 904#endif
 905	/* Flush network side */
 906	unregister_netdev(sl->dev);
 907	/* This will complete via sl_free_netdev */
 908}
 909
 910static void slip_hangup(struct tty_struct *tty)
 911{
 912	slip_close(tty);
 913}
 914 /************************************************************************
 915  *			STANDARD SLIP ENCAPSULATION		  	 *
 916  ************************************************************************/
 917
 918static int slip_esc(unsigned char *s, unsigned char *d, int len)
 919{
 920	unsigned char *ptr = d;
 921	unsigned char c;
 922
 923	/*
 924	 * Send an initial END character to flush out any
 925	 * data that may have accumulated in the receiver
 926	 * due to line noise.
 927	 */
 928
 929	*ptr++ = END;
 930
 931	/*
 932	 * For each byte in the packet, send the appropriate
 933	 * character sequence, according to the SLIP protocol.
 934	 */
 935
 936	while (len-- > 0) {
 937		switch (c = *s++) {
 938		case END:
 939			*ptr++ = ESC;
 940			*ptr++ = ESC_END;
 941			break;
 942		case ESC:
 943			*ptr++ = ESC;
 944			*ptr++ = ESC_ESC;
 945			break;
 946		default:
 947			*ptr++ = c;
 948			break;
 949		}
 950	}
 951	*ptr++ = END;
 952	return ptr - d;
 953}
 954
 955static void slip_unesc(struct slip *sl, unsigned char s)
 956{
 957
 958	switch (s) {
 959	case END:
 960#ifdef CONFIG_SLIP_SMART
 961		/* drop keeptest bit = VSV */
 962		if (test_bit(SLF_KEEPTEST, &sl->flags))
 963			clear_bit(SLF_KEEPTEST, &sl->flags);
 964#endif
 965
 966		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
 967		    (sl->rcount > 2))
 968			sl_bump(sl);
 969		clear_bit(SLF_ESCAPE, &sl->flags);
 970		sl->rcount = 0;
 971		return;
 972
 973	case ESC:
 974		set_bit(SLF_ESCAPE, &sl->flags);
 975		return;
 976	case ESC_ESC:
 977		if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
 978			s = ESC;
 979		break;
 980	case ESC_END:
 981		if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
 982			s = END;
 983		break;
 984	}
 985	if (!test_bit(SLF_ERROR, &sl->flags))  {
 986		if (sl->rcount < sl->buffsize)  {
 987			sl->rbuff[sl->rcount++] = s;
 988			return;
 989		}
 990		sl->dev->stats.rx_over_errors++;
 991		set_bit(SLF_ERROR, &sl->flags);
 992	}
 993}
 994
 995
 996#ifdef CONFIG_SLIP_MODE_SLIP6
 997/************************************************************************
 998 *			 6 BIT SLIP ENCAPSULATION			*
 999 ************************************************************************/
1000
1001static int slip_esc6(unsigned char *s, unsigned char *d, int len)
1002{
1003	unsigned char *ptr = d;
1004	unsigned char c;
1005	int i;
1006	unsigned short v = 0;
1007	short bits = 0;
1008
1009	/*
1010	 * Send an initial END character to flush out any
1011	 * data that may have accumulated in the receiver
1012	 * due to line noise.
1013	 */
1014
1015	*ptr++ = 0x70;
1016
1017	/*
1018	 * Encode the packet into printable ascii characters
1019	 */
1020
1021	for (i = 0; i < len; ++i) {
1022		v = (v << 8) | s[i];
1023		bits += 8;
1024		while (bits >= 6) {
1025			bits -= 6;
1026			c = 0x30 + ((v >> bits) & 0x3F);
1027			*ptr++ = c;
1028		}
1029	}
1030	if (bits) {
1031		c = 0x30 + ((v << (6 - bits)) & 0x3F);
1032		*ptr++ = c;
1033	}
1034	*ptr++ = 0x70;
1035	return ptr - d;
1036}
1037
1038static void slip_unesc6(struct slip *sl, unsigned char s)
1039{
1040	unsigned char c;
1041
1042	if (s == 0x70) {
1043#ifdef CONFIG_SLIP_SMART
1044		/* drop keeptest bit = VSV */
1045		if (test_bit(SLF_KEEPTEST, &sl->flags))
1046			clear_bit(SLF_KEEPTEST, &sl->flags);
1047#endif
1048
1049		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
1050		    (sl->rcount > 2))
1051			sl_bump(sl);
1052		sl->rcount = 0;
1053		sl->xbits = 0;
1054		sl->xdata = 0;
1055	} else if (s >= 0x30 && s < 0x70) {
1056		sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F);
1057		sl->xbits += 6;
1058		if (sl->xbits >= 8) {
1059			sl->xbits -= 8;
1060			c = (unsigned char)(sl->xdata >> sl->xbits);
1061			if (!test_bit(SLF_ERROR, &sl->flags))  {
1062				if (sl->rcount < sl->buffsize)  {
1063					sl->rbuff[sl->rcount++] = c;
1064					return;
1065				}
1066				sl->dev->stats.rx_over_errors++;
1067				set_bit(SLF_ERROR, &sl->flags);
1068			}
1069		}
1070	}
1071}
1072#endif /* CONFIG_SLIP_MODE_SLIP6 */
1073
1074/* Perform I/O control on an active SLIP channel. */
1075static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
1076		unsigned long arg)
1077{
1078	struct slip *sl = tty->disc_data;
1079	unsigned int tmp;
1080	int __user *p = (int __user *)arg;
1081
1082	/* First make sure we're connected. */
1083	if (!sl || sl->magic != SLIP_MAGIC)
1084		return -EINVAL;
1085
1086	switch (cmd) {
1087	case SIOCGIFNAME:
1088		tmp = strlen(sl->dev->name) + 1;
1089		if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
1090			return -EFAULT;
1091		return 0;
1092
1093	case SIOCGIFENCAP:
1094		if (put_user(sl->mode, p))
1095			return -EFAULT;
1096		return 0;
1097
1098	case SIOCSIFENCAP:
1099		if (get_user(tmp, p))
1100			return -EFAULT;
1101#ifndef SL_INCLUDE_CSLIP
1102		if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE))
1103			return -EINVAL;
1104#else
1105		if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) ==
1106		    (SL_MODE_ADAPTIVE | SL_MODE_CSLIP))
1107			/* return -EINVAL; */
1108			tmp &= ~SL_MODE_ADAPTIVE;
1109#endif
1110#ifndef CONFIG_SLIP_MODE_SLIP6
1111		if (tmp & SL_MODE_SLIP6)
1112			return -EINVAL;
1113#endif
1114		sl->mode = tmp;
1115		sl->dev->type = ARPHRD_SLIP + sl->mode;
1116		return 0;
1117
1118	case SIOCSIFHWADDR:
1119		return -EINVAL;
1120
1121#ifdef CONFIG_SLIP_SMART
1122	/* VSV changes start here */
1123	case SIOCSKEEPALIVE:
1124		if (get_user(tmp, p))
1125			return -EFAULT;
1126		if (tmp > 255) /* max for unchar */
1127			return -EINVAL;
1128
1129		spin_lock_bh(&sl->lock);
1130		if (!sl->tty) {
1131			spin_unlock_bh(&sl->lock);
1132			return -ENODEV;
1133		}
1134		sl->keepalive = (u8)tmp;
1135		if (sl->keepalive != 0) {
1136			mod_timer(&sl->keepalive_timer,
1137					jiffies + sl->keepalive * HZ);
1138			set_bit(SLF_KEEPTEST, &sl->flags);
1139		} else
1140			del_timer(&sl->keepalive_timer);
1141		spin_unlock_bh(&sl->lock);
1142		return 0;
1143
1144	case SIOCGKEEPALIVE:
1145		if (put_user(sl->keepalive, p))
1146			return -EFAULT;
1147		return 0;
1148
1149	case SIOCSOUTFILL:
1150		if (get_user(tmp, p))
1151			return -EFAULT;
1152		if (tmp > 255) /* max for unchar */
1153			return -EINVAL;
1154		spin_lock_bh(&sl->lock);
1155		if (!sl->tty) {
1156			spin_unlock_bh(&sl->lock);
1157			return -ENODEV;
1158		}
1159		sl->outfill = (u8)tmp;
1160		if (sl->outfill != 0) {
1161			mod_timer(&sl->outfill_timer,
1162						jiffies + sl->outfill * HZ);
1163			set_bit(SLF_OUTWAIT, &sl->flags);
1164		} else
1165			del_timer(&sl->outfill_timer);
1166		spin_unlock_bh(&sl->lock);
1167		return 0;
1168
1169	case SIOCGOUTFILL:
1170		if (put_user(sl->outfill, p))
1171			return -EFAULT;
1172		return 0;
1173	/* VSV changes end */
1174#endif
1175	default:
1176		return tty_mode_ioctl(tty, cmd, arg);
1177	}
1178}
1179
1180/* VSV changes start here */
1181#ifdef CONFIG_SLIP_SMART
1182/* function sl_siocdevprivate called from net/core/dev.c
1183   to allow get/set outfill/keepalive parameter
1184   by ifconfig                                 */
1185
1186static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1187			     void __user *data, int cmd)
1188{
1189	struct slip *sl = netdev_priv(dev);
1190	unsigned long *p = (unsigned long *)&rq->ifr_ifru;
1191
1192	if (sl == NULL)		/* Allocation failed ?? */
1193		return -ENODEV;
1194
1195	if (in_compat_syscall())
1196		return -EOPNOTSUPP;
1197
1198	spin_lock_bh(&sl->lock);
1199
1200	if (!sl->tty) {
1201		spin_unlock_bh(&sl->lock);
1202		return -ENODEV;
1203	}
1204
1205	switch (cmd) {
1206	case SIOCSKEEPALIVE:
1207		/* max for unchar */
1208		if ((unsigned)*p > 255) {
1209			spin_unlock_bh(&sl->lock);
1210			return -EINVAL;
1211		}
1212		sl->keepalive = (u8)*p;
1213		if (sl->keepalive != 0) {
1214			sl->keepalive_timer.expires =
1215						jiffies + sl->keepalive * HZ;
1216			mod_timer(&sl->keepalive_timer,
1217						jiffies + sl->keepalive * HZ);
1218			set_bit(SLF_KEEPTEST, &sl->flags);
1219		} else
1220			del_timer(&sl->keepalive_timer);
1221		break;
1222
1223	case SIOCGKEEPALIVE:
1224		*p = sl->keepalive;
1225		break;
1226
1227	case SIOCSOUTFILL:
1228		if ((unsigned)*p > 255) { /* max for unchar */
1229			spin_unlock_bh(&sl->lock);
1230			return -EINVAL;
1231		}
1232		sl->outfill = (u8)*p;
1233		if (sl->outfill != 0) {
1234			mod_timer(&sl->outfill_timer,
1235						jiffies + sl->outfill * HZ);
1236			set_bit(SLF_OUTWAIT, &sl->flags);
1237		} else
1238			del_timer(&sl->outfill_timer);
1239		break;
1240
1241	case SIOCGOUTFILL:
1242		*p = sl->outfill;
1243		break;
1244
1245	case SIOCSLEASE:
1246		/* Resolve race condition, when ioctl'ing hanged up
1247		   and opened by another process device.
1248		 */
1249		if (sl->tty != current->signal->tty &&
1250						sl->pid != current->pid) {
1251			spin_unlock_bh(&sl->lock);
1252			return -EPERM;
1253		}
1254		sl->leased = 0;
1255		if (*p)
1256			sl->leased = 1;
1257		break;
1258
1259	case SIOCGLEASE:
1260		*p = sl->leased;
1261	}
1262	spin_unlock_bh(&sl->lock);
1263	return 0;
1264}
1265#endif
1266/* VSV changes end */
1267
1268static struct tty_ldisc_ops sl_ldisc = {
1269	.owner 		= THIS_MODULE,
1270	.num		= N_SLIP,
1271	.name 		= "slip",
1272	.open 		= slip_open,
1273	.close	 	= slip_close,
1274	.hangup	 	= slip_hangup,
1275	.ioctl		= slip_ioctl,
1276	.receive_buf	= slip_receive_buf,
1277	.write_wakeup	= slip_write_wakeup,
1278};
1279
1280static int __init slip_init(void)
1281{
1282	int status;
1283
1284	if (slip_maxdev < 4)
1285		slip_maxdev = 4; /* Sanity */
1286
1287	printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)"
1288#ifdef CONFIG_SLIP_MODE_SLIP6
1289	       " (6 bit encapsulation enabled)"
1290#endif
1291	       ".\n",
1292	       SLIP_VERSION, slip_maxdev);
1293#if defined(SL_INCLUDE_CSLIP)
1294	printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n");
1295#endif
1296#ifdef CONFIG_SLIP_SMART
1297	printk(KERN_INFO "SLIP linefill/keepalive option.\n");
1298#endif
1299
1300	slip_devs = kcalloc(slip_maxdev, sizeof(struct net_device *),
1301								GFP_KERNEL);
1302	if (!slip_devs)
1303		return -ENOMEM;
1304
1305	/* Fill in our line protocol discipline, and register it */
1306	status = tty_register_ldisc(&sl_ldisc);
1307	if (status != 0) {
1308		printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
1309		kfree(slip_devs);
1310	}
1311	return status;
1312}
1313
1314static void __exit slip_exit(void)
1315{
1316	int i;
1317	struct net_device *dev;
1318	struct slip *sl;
1319	unsigned long timeout = jiffies + HZ;
1320	int busy = 0;
1321
1322	if (slip_devs == NULL)
1323		return;
1324
1325	/* First of all: check for active disciplines and hangup them.
1326	 */
1327	do {
1328		if (busy)
1329			msleep_interruptible(100);
1330
1331		busy = 0;
1332		for (i = 0; i < slip_maxdev; i++) {
1333			dev = slip_devs[i];
1334			if (!dev)
1335				continue;
1336			sl = netdev_priv(dev);
1337			spin_lock_bh(&sl->lock);
1338			if (sl->tty) {
1339				busy++;
1340				tty_hangup(sl->tty);
1341			}
1342			spin_unlock_bh(&sl->lock);
1343		}
1344	} while (busy && time_before(jiffies, timeout));
1345
1346	/* FIXME: hangup is async so we should wait when doing this second
1347	   phase */
1348
1349	for (i = 0; i < slip_maxdev; i++) {
1350		dev = slip_devs[i];
1351		if (!dev)
1352			continue;
1353		slip_devs[i] = NULL;
1354
1355		sl = netdev_priv(dev);
1356		if (sl->tty) {
1357			printk(KERN_ERR "%s: tty discipline still running\n",
1358			       dev->name);
1359		}
1360
1361		unregister_netdev(dev);
1362	}
1363
1364	kfree(slip_devs);
1365	slip_devs = NULL;
1366
1367	tty_unregister_ldisc(&sl_ldisc);
1368}
1369
1370module_init(slip_init);
1371module_exit(slip_exit);
1372
1373#ifdef CONFIG_SLIP_SMART
1374/*
1375 * This is start of the code for multislip style line checking
1376 * added by Stanislav Voronyi. All changes before marked VSV
1377 */
1378
1379static void sl_outfill(struct timer_list *t)
1380{
1381	struct slip *sl = from_timer(sl, t, outfill_timer);
1382
1383	spin_lock(&sl->lock);
1384
1385	if (sl->tty == NULL)
1386		goto out;
1387
1388	if (sl->outfill) {
1389		if (test_bit(SLF_OUTWAIT, &sl->flags)) {
1390			/* no packets were transmitted, do outfill */
1391#ifdef CONFIG_SLIP_MODE_SLIP6
1392			unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END;
1393#else
1394			unsigned char s = END;
1395#endif
1396			/* put END into tty queue. Is it right ??? */
1397			if (!netif_queue_stopped(sl->dev)) {
1398				/* if device busy no outfill */
1399				sl->tty->ops->write(sl->tty, &s, 1);
1400			}
1401		} else
1402			set_bit(SLF_OUTWAIT, &sl->flags);
1403
1404		mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ);
1405	}
1406out:
1407	spin_unlock(&sl->lock);
1408}
1409
1410static void sl_keepalive(struct timer_list *t)
1411{
1412	struct slip *sl = from_timer(sl, t, keepalive_timer);
1413
1414	spin_lock(&sl->lock);
1415
1416	if (sl->tty == NULL)
1417		goto out;
1418
1419	if (sl->keepalive) {
1420		if (test_bit(SLF_KEEPTEST, &sl->flags)) {
1421			/* keepalive still high :(, we must hangup */
1422			if (sl->outfill)
1423				/* outfill timer must be deleted too */
1424				(void)del_timer(&sl->outfill_timer);
1425			printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name);
1426			/* this must hangup tty & close slip */
1427			tty_hangup(sl->tty);
1428			/* I think we need not something else */
1429			goto out;
1430		} else
1431			set_bit(SLF_KEEPTEST, &sl->flags);
1432
1433		mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ);
1434	}
1435out:
1436	spin_unlock(&sl->lock);
1437}
1438
1439#endif
1440MODULE_DESCRIPTION("SLIP (serial line) protocol module");
1441MODULE_LICENSE("GPL");
1442MODULE_ALIAS_LDISC(N_SLIP);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * slip.c	This module implements the SLIP protocol for kernel-based
   4 *		devices like TTY.  It interfaces between a raw TTY, and the
   5 *		kernel's INET protocol layers.
   6 *
   7 * Version:	@(#)slip.c	0.8.3	12/24/94
   8 *
   9 * Authors:	Laurence Culhane, <loz@holmes.demon.co.uk>
  10 *		Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
  11 *
  12 * Fixes:
  13 *		Alan Cox	: 	Sanity checks and avoid tx overruns.
  14 *					Has a new sl->mtu field.
  15 *		Alan Cox	: 	Found cause of overrun. ifconfig sl0
  16 *					mtu upwards. Driver now spots this
  17 *					and grows/shrinks its buffers(hack!).
  18 *					Memory leak if you run out of memory
  19 *					setting up a slip driver fixed.
  20 *		Matt Dillon	:	Printable slip (borrowed from NET2E)
  21 *	Pauline Middelink	:	Slip driver fixes.
  22 *		Alan Cox	:	Honours the old SL_COMPRESSED flag
  23 *		Alan Cox	:	KISS AX.25 and AXUI IP support
  24 *		Michael Riepe	:	Automatic CSLIP recognition added
  25 *		Charles Hedrick :	CSLIP header length problem fix.
  26 *		Alan Cox	:	Corrected non-IP cases of the above.
  27 *		Alan Cox	:	Now uses hardware type as per FvK.
  28 *		Alan Cox	:	Default to 192.168.0.0 (RFC 1597)
  29 *		A.N.Kuznetsov	:	dev_tint() recursion fix.
  30 *	Dmitry Gorodchanin	:	SLIP memory leaks
  31 *      Dmitry Gorodchanin      :       Code cleanup. Reduce tty driver
  32 *                                      buffering from 4096 to 256 bytes.
  33 *                                      Improving SLIP response time.
  34 *                                      CONFIG_SLIP_MODE_SLIP6.
  35 *                                      ifconfig sl? up & down now works
  36 *					correctly.
  37 *					Modularization.
  38 *              Alan Cox        :       Oops - fix AX.25 buffer lengths
  39 *      Dmitry Gorodchanin      :       Even more cleanups. Preserve CSLIP
  40 *                                      statistics. Include CSLIP code only
  41 *                                      if it really needed.
  42 *		Alan Cox	:	Free slhc buffers in the right place.
  43 *		Alan Cox	:	Allow for digipeated IP over AX.25
  44 *		Matti Aarnio	:	Dynamic SLIP devices, with ideas taken
  45 *					from Jim Freeman's <jfree@caldera.com>
  46 *					dynamic PPP devices.  We do NOT kfree()
  47 *					device entries, just reg./unreg. them
  48 *					as they are needed.  We kfree() them
  49 *					at module cleanup.
  50 *					With MODULE-loading ``insmod'', user
  51 *					can issue parameter:  slip_maxdev=1024
  52 *					(Or how much he/she wants.. Default
  53 *					is 256)
  54 *	Stanislav Voronyi	:	Slip line checking, with ideas taken
  55 *					from multislip BSDI driver which was
  56 *					written by Igor Chechik, RELCOM Corp.
  57 *					Only algorithms have been ported to
  58 *					Linux SLIP driver.
  59 *	Vitaly E. Lavrov	:	Sane behaviour on tty hangup.
  60 *	Alexey Kuznetsov	:	Cleanup interfaces to tty & netdevice
  61 *					modules.
  62 */
  63
  64#define SL_CHECK_TRANSMIT
  65#include <linux/compat.h>
  66#include <linux/module.h>
  67#include <linux/moduleparam.h>
  68
  69#include <linux/uaccess.h>
  70#include <linux/bitops.h>
  71#include <linux/sched/signal.h>
  72#include <linux/string.h>
  73#include <linux/mm.h>
  74#include <linux/interrupt.h>
  75#include <linux/in.h>
  76#include <linux/tty.h>
  77#include <linux/errno.h>
  78#include <linux/netdevice.h>
  79#include <linux/etherdevice.h>
  80#include <linux/skbuff.h>
  81#include <linux/rtnetlink.h>
  82#include <linux/if_arp.h>
  83#include <linux/if_slip.h>
  84#include <linux/delay.h>
  85#include <linux/init.h>
  86#include <linux/slab.h>
  87#include <linux/workqueue.h>
  88#include "slip.h"
  89#ifdef CONFIG_INET
  90#include <linux/ip.h>
  91#include <linux/tcp.h>
  92#include <net/slhc_vj.h>
  93#endif
  94
  95#define SLIP_VERSION	"0.8.4-NET3.019-NEWTTY"
  96
  97static struct net_device **slip_devs;
  98
  99static int slip_maxdev = SL_NRUNIT;
 100module_param(slip_maxdev, int, 0);
 101MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices");
 102
 103static int slip_esc(unsigned char *p, unsigned char *d, int len);
 104static void slip_unesc(struct slip *sl, unsigned char c);
 105#ifdef CONFIG_SLIP_MODE_SLIP6
 106static int slip_esc6(unsigned char *p, unsigned char *d, int len);
 107static void slip_unesc6(struct slip *sl, unsigned char c);
 108#endif
 109#ifdef CONFIG_SLIP_SMART
 110static void sl_keepalive(struct timer_list *t);
 111static void sl_outfill(struct timer_list *t);
 112static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd);
 113#endif
 114
 115/********************************
 116*  Buffer administration routines:
 117*	sl_alloc_bufs()
 118*	sl_free_bufs()
 119*	sl_realloc_bufs()
 120*
 121* NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because
 122*	sl_realloc_bufs provides strong atomicity and reallocation
 123*	on actively running device.
 124*********************************/
 125
 126/*
 127   Allocate channel buffers.
 128 */
 129
 130static int sl_alloc_bufs(struct slip *sl, int mtu)
 131{
 132	int err = -ENOBUFS;
 133	unsigned long len;
 134	char *rbuff = NULL;
 135	char *xbuff = NULL;
 136#ifdef SL_INCLUDE_CSLIP
 137	char *cbuff = NULL;
 138	struct slcompress *slcomp = NULL;
 139#endif
 140
 141	/*
 142	 * Allocate the SLIP frame buffers:
 143	 *
 144	 * rbuff	Receive buffer.
 145	 * xbuff	Transmit buffer.
 146	 * cbuff        Temporary compression buffer.
 147	 */
 148	len = mtu * 2;
 149
 150	/*
 151	 * allow for arrival of larger UDP packets, even if we say not to
 152	 * also fixes a bug in which SunOS sends 512-byte packets even with
 153	 * an MSS of 128
 154	 */
 155	if (len < 576 * 2)
 156		len = 576 * 2;
 157	rbuff = kmalloc(len + 4, GFP_KERNEL);
 158	if (rbuff == NULL)
 159		goto err_exit;
 160	xbuff = kmalloc(len + 4, GFP_KERNEL);
 161	if (xbuff == NULL)
 162		goto err_exit;
 163#ifdef SL_INCLUDE_CSLIP
 164	cbuff = kmalloc(len + 4, GFP_KERNEL);
 165	if (cbuff == NULL)
 166		goto err_exit;
 167	slcomp = slhc_init(16, 16);
 168	if (IS_ERR(slcomp))
 169		goto err_exit;
 170#endif
 171	spin_lock_bh(&sl->lock);
 172	if (sl->tty == NULL) {
 173		spin_unlock_bh(&sl->lock);
 174		err = -ENODEV;
 175		goto err_exit;
 176	}
 177	sl->mtu	     = mtu;
 178	sl->buffsize = len;
 179	sl->rcount   = 0;
 180	sl->xleft    = 0;
 181	rbuff = xchg(&sl->rbuff, rbuff);
 182	xbuff = xchg(&sl->xbuff, xbuff);
 183#ifdef SL_INCLUDE_CSLIP
 184	cbuff = xchg(&sl->cbuff, cbuff);
 185	slcomp = xchg(&sl->slcomp, slcomp);
 186#endif
 187#ifdef CONFIG_SLIP_MODE_SLIP6
 188	sl->xdata    = 0;
 189	sl->xbits    = 0;
 190#endif
 191	spin_unlock_bh(&sl->lock);
 192	err = 0;
 193
 194	/* Cleanup */
 195err_exit:
 196#ifdef SL_INCLUDE_CSLIP
 197	kfree(cbuff);
 198	slhc_free(slcomp);
 199#endif
 200	kfree(xbuff);
 201	kfree(rbuff);
 202	return err;
 203}
 204
 205/* Free a SLIP channel buffers. */
 206static void sl_free_bufs(struct slip *sl)
 207{
 208	/* Free all SLIP frame buffers. */
 209	kfree(xchg(&sl->rbuff, NULL));
 210	kfree(xchg(&sl->xbuff, NULL));
 211#ifdef SL_INCLUDE_CSLIP
 212	kfree(xchg(&sl->cbuff, NULL));
 213	slhc_free(xchg(&sl->slcomp, NULL));
 214#endif
 215}
 216
 217/*
 218   Reallocate slip channel buffers.
 219 */
 220
 221static int sl_realloc_bufs(struct slip *sl, int mtu)
 222{
 223	int err = 0;
 224	struct net_device *dev = sl->dev;
 225	unsigned char *xbuff, *rbuff;
 226#ifdef SL_INCLUDE_CSLIP
 227	unsigned char *cbuff;
 228#endif
 229	int len = mtu * 2;
 230
 231/*
 232 * allow for arrival of larger UDP packets, even if we say not to
 233 * also fixes a bug in which SunOS sends 512-byte packets even with
 234 * an MSS of 128
 235 */
 236	if (len < 576 * 2)
 237		len = 576 * 2;
 238
 239	xbuff = kmalloc(len + 4, GFP_ATOMIC);
 240	rbuff = kmalloc(len + 4, GFP_ATOMIC);
 241#ifdef SL_INCLUDE_CSLIP
 242	cbuff = kmalloc(len + 4, GFP_ATOMIC);
 243#endif
 244
 245
 246#ifdef SL_INCLUDE_CSLIP
 247	if (xbuff == NULL || rbuff == NULL || cbuff == NULL)  {
 248#else
 249	if (xbuff == NULL || rbuff == NULL)  {
 250#endif
 251		if (mtu > sl->mtu) {
 252			printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n",
 253			       dev->name);
 254			err = -ENOBUFS;
 255		}
 256		goto done;
 257	}
 258	spin_lock_bh(&sl->lock);
 259
 260	err = -ENODEV;
 261	if (sl->tty == NULL)
 262		goto done_on_bh;
 263
 264	xbuff    = xchg(&sl->xbuff, xbuff);
 265	rbuff    = xchg(&sl->rbuff, rbuff);
 266#ifdef SL_INCLUDE_CSLIP
 267	cbuff    = xchg(&sl->cbuff, cbuff);
 268#endif
 269	if (sl->xleft)  {
 270		if (sl->xleft <= len)  {
 271			memcpy(sl->xbuff, sl->xhead, sl->xleft);
 272		} else  {
 273			sl->xleft = 0;
 274			dev->stats.tx_dropped++;
 275		}
 276	}
 277	sl->xhead = sl->xbuff;
 278
 279	if (sl->rcount)  {
 280		if (sl->rcount <= len) {
 281			memcpy(sl->rbuff, rbuff, sl->rcount);
 282		} else  {
 283			sl->rcount = 0;
 284			dev->stats.rx_over_errors++;
 285			set_bit(SLF_ERROR, &sl->flags);
 286		}
 287	}
 288	sl->mtu      = mtu;
 289	dev->mtu      = mtu;
 290	sl->buffsize = len;
 291	err = 0;
 292
 293done_on_bh:
 294	spin_unlock_bh(&sl->lock);
 295
 296done:
 297	kfree(xbuff);
 298	kfree(rbuff);
 299#ifdef SL_INCLUDE_CSLIP
 300	kfree(cbuff);
 301#endif
 302	return err;
 303}
 304
 305
 306/* Set the "sending" flag.  This must be atomic hence the set_bit. */
 307static inline void sl_lock(struct slip *sl)
 308{
 309	netif_stop_queue(sl->dev);
 310}
 311
 312
 313/* Clear the "sending" flag.  This must be atomic, hence the ASM. */
 314static inline void sl_unlock(struct slip *sl)
 315{
 316	netif_wake_queue(sl->dev);
 317}
 318
 319/* Send one completely decapsulated IP datagram to the IP layer. */
 320static void sl_bump(struct slip *sl)
 321{
 322	struct net_device *dev = sl->dev;
 323	struct sk_buff *skb;
 324	int count;
 325
 326	count = sl->rcount;
 327#ifdef SL_INCLUDE_CSLIP
 328	if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) {
 329		unsigned char c = sl->rbuff[0];
 330		if (c & SL_TYPE_COMPRESSED_TCP) {
 331			/* ignore compressed packets when CSLIP is off */
 332			if (!(sl->mode & SL_MODE_CSLIP)) {
 333				printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
 334				return;
 335			}
 336			/* make sure we've reserved enough space for uncompress
 337			   to use */
 338			if (count + 80 > sl->buffsize) {
 339				dev->stats.rx_over_errors++;
 340				return;
 341			}
 342			count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
 343			if (count <= 0)
 344				return;
 345		} else if (c >= SL_TYPE_UNCOMPRESSED_TCP) {
 346			if (!(sl->mode & SL_MODE_CSLIP)) {
 347				/* turn on header compression */
 348				sl->mode |= SL_MODE_CSLIP;
 349				sl->mode &= ~SL_MODE_ADAPTIVE;
 350				printk(KERN_INFO "%s: header compression turned on\n", dev->name);
 351			}
 352			sl->rbuff[0] &= 0x4f;
 353			if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
 354				return;
 355		}
 356	}
 357#endif  /* SL_INCLUDE_CSLIP */
 358
 359	dev->stats.rx_bytes += count;
 360
 361	skb = dev_alloc_skb(count);
 362	if (skb == NULL) {
 363		printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
 364		dev->stats.rx_dropped++;
 365		return;
 366	}
 367	skb->dev = dev;
 368	skb_put_data(skb, sl->rbuff, count);
 369	skb_reset_mac_header(skb);
 370	skb->protocol = htons(ETH_P_IP);
 371	netif_rx(skb);
 372	dev->stats.rx_packets++;
 373}
 374
 375/* Encapsulate one IP datagram and stuff into a TTY queue. */
 376static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
 377{
 378	unsigned char *p;
 379	int actual, count;
 380
 381	if (len > sl->mtu) {		/* Sigh, shouldn't occur BUT ... */
 382		printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
 383		sl->dev->stats.tx_dropped++;
 384		sl_unlock(sl);
 385		return;
 386	}
 387
 388	p = icp;
 389#ifdef SL_INCLUDE_CSLIP
 390	if (sl->mode & SL_MODE_CSLIP)
 391		len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1);
 392#endif
 393#ifdef CONFIG_SLIP_MODE_SLIP6
 394	if (sl->mode & SL_MODE_SLIP6)
 395		count = slip_esc6(p, sl->xbuff, len);
 396	else
 397#endif
 398		count = slip_esc(p, sl->xbuff, len);
 399
 400	/* Order of next two lines is *very* important.
 401	 * When we are sending a little amount of data,
 402	 * the transfer may be completed inside the ops->write()
 403	 * routine, because it's running with interrupts enabled.
 404	 * In this case we *never* got WRITE_WAKEUP event,
 405	 * if we did not request it before write operation.
 406	 *       14 Oct 1994  Dmitry Gorodchanin.
 407	 */
 408	set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 409	actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
 410#ifdef SL_CHECK_TRANSMIT
 411	netif_trans_update(sl->dev);
 412#endif
 413	sl->xleft = count - actual;
 414	sl->xhead = sl->xbuff + actual;
 415#ifdef CONFIG_SLIP_SMART
 416	/* VSV */
 417	clear_bit(SLF_OUTWAIT, &sl->flags);	/* reset outfill flag */
 418#endif
 419}
 420
 421/* Write out any remaining transmit buffer. Scheduled when tty is writable */
 422static void slip_transmit(struct work_struct *work)
 423{
 424	struct slip *sl = container_of(work, struct slip, tx_work);
 425	int actual;
 426
 427	spin_lock_bh(&sl->lock);
 428	/* First make sure we're connected. */
 429	if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
 430		spin_unlock_bh(&sl->lock);
 431		return;
 432	}
 433
 434	if (sl->xleft <= 0)  {
 435		/* Now serial buffer is almost free & we can start
 436		 * transmission of another packet */
 437		sl->dev->stats.tx_packets++;
 438		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 439		spin_unlock_bh(&sl->lock);
 440		sl_unlock(sl);
 441		return;
 442	}
 443
 444	actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
 445	sl->xleft -= actual;
 446	sl->xhead += actual;
 447	spin_unlock_bh(&sl->lock);
 448}
 449
 450/*
 451 * Called by the driver when there's room for more data.
 452 * Schedule the transmit.
 453 */
 454static void slip_write_wakeup(struct tty_struct *tty)
 455{
 456	struct slip *sl;
 457
 458	rcu_read_lock();
 459	sl = rcu_dereference(tty->disc_data);
 460	if (sl)
 461		schedule_work(&sl->tx_work);
 462	rcu_read_unlock();
 463}
 464
 465static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
 466{
 467	struct slip *sl = netdev_priv(dev);
 468
 469	spin_lock(&sl->lock);
 470
 471	if (netif_queue_stopped(dev)) {
 472		if (!netif_running(dev) || !sl->tty)
 473			goto out;
 474
 475		/* May be we must check transmitter timeout here ?
 476		 *      14 Oct 1994 Dmitry Gorodchanin.
 477		 */
 478#ifdef SL_CHECK_TRANSMIT
 479		if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ))  {
 480			/* 20 sec timeout not reached */
 481			goto out;
 482		}
 483		printk(KERN_WARNING "%s: transmit timed out, %s?\n",
 484			dev->name,
 485			(tty_chars_in_buffer(sl->tty) || sl->xleft) ?
 486				"bad line quality" : "driver error");
 487		sl->xleft = 0;
 488		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 489		sl_unlock(sl);
 490#endif
 491	}
 492out:
 493	spin_unlock(&sl->lock);
 494}
 495
 496
 497/* Encapsulate an IP datagram and kick it into a TTY queue. */
 498static netdev_tx_t
 499sl_xmit(struct sk_buff *skb, struct net_device *dev)
 500{
 501	struct slip *sl = netdev_priv(dev);
 502
 503	spin_lock(&sl->lock);
 504	if (!netif_running(dev)) {
 505		spin_unlock(&sl->lock);
 506		printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name);
 507		dev_kfree_skb(skb);
 508		return NETDEV_TX_OK;
 509	}
 510	if (sl->tty == NULL) {
 511		spin_unlock(&sl->lock);
 512		dev_kfree_skb(skb);
 513		return NETDEV_TX_OK;
 514	}
 515
 516	sl_lock(sl);
 517	dev->stats.tx_bytes += skb->len;
 518	sl_encaps(sl, skb->data, skb->len);
 519	spin_unlock(&sl->lock);
 520
 521	dev_kfree_skb(skb);
 522	return NETDEV_TX_OK;
 523}
 524
 525
 526/******************************************
 527 *   Routines looking at netdevice side.
 528 ******************************************/
 529
 530/* Netdevice UP -> DOWN routine */
 531
 532static int
 533sl_close(struct net_device *dev)
 534{
 535	struct slip *sl = netdev_priv(dev);
 536
 537	spin_lock_bh(&sl->lock);
 538	if (sl->tty)
 539		/* TTY discipline is running. */
 540		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 541	netif_stop_queue(dev);
 542	sl->rcount   = 0;
 543	sl->xleft    = 0;
 544	spin_unlock_bh(&sl->lock);
 545
 546	return 0;
 547}
 548
 549/* Netdevice DOWN -> UP routine */
 550
 551static int sl_open(struct net_device *dev)
 552{
 553	struct slip *sl = netdev_priv(dev);
 554
 555	if (sl->tty == NULL)
 556		return -ENODEV;
 557
 558	sl->flags &= (1 << SLF_INUSE);
 559	netif_start_queue(dev);
 560	return 0;
 561}
 562
 563/* Netdevice change MTU request */
 564
 565static int sl_change_mtu(struct net_device *dev, int new_mtu)
 566{
 567	struct slip *sl = netdev_priv(dev);
 568
 569	return sl_realloc_bufs(sl, new_mtu);
 570}
 571
 572/* Netdevice get statistics request */
 573
 574static void
 575sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 576{
 577	struct net_device_stats *devstats = &dev->stats;
 578#ifdef SL_INCLUDE_CSLIP
 579	struct slip *sl = netdev_priv(dev);
 580	struct slcompress *comp = sl->slcomp;
 581#endif
 582	stats->rx_packets     = devstats->rx_packets;
 583	stats->tx_packets     = devstats->tx_packets;
 584	stats->rx_bytes       = devstats->rx_bytes;
 585	stats->tx_bytes       = devstats->tx_bytes;
 586	stats->rx_dropped     = devstats->rx_dropped;
 587	stats->tx_dropped     = devstats->tx_dropped;
 588	stats->tx_errors      = devstats->tx_errors;
 589	stats->rx_errors      = devstats->rx_errors;
 590	stats->rx_over_errors = devstats->rx_over_errors;
 591
 592#ifdef SL_INCLUDE_CSLIP
 593	if (comp) {
 594		/* Generic compressed statistics */
 595		stats->rx_compressed   = comp->sls_i_compressed;
 596		stats->tx_compressed   = comp->sls_o_compressed;
 597
 598		/* Are we really still needs this? */
 599		stats->rx_fifo_errors += comp->sls_i_compressed;
 600		stats->rx_dropped     += comp->sls_i_tossed;
 601		stats->tx_fifo_errors += comp->sls_o_compressed;
 602		stats->collisions     += comp->sls_o_misses;
 603	}
 604#endif
 605}
 606
 607/* Netdevice register callback */
 608
 609static int sl_init(struct net_device *dev)
 610{
 611	struct slip *sl = netdev_priv(dev);
 612
 613	/*
 614	 *	Finish setting up the DEVICE info.
 615	 */
 616
 617	dev->mtu		= sl->mtu;
 618	dev->type		= ARPHRD_SLIP + sl->mode;
 619#ifdef SL_CHECK_TRANSMIT
 620	dev->watchdog_timeo	= 20*HZ;
 621#endif
 622	return 0;
 623}
 624
 625
 626static void sl_uninit(struct net_device *dev)
 627{
 628	struct slip *sl = netdev_priv(dev);
 629
 630	sl_free_bufs(sl);
 631}
 632
 633/* Hook the destructor so we can free slip devices at the right point in time */
 634static void sl_free_netdev(struct net_device *dev)
 635{
 636	int i = dev->base_addr;
 637
 638	slip_devs[i] = NULL;
 639}
 640
 641static const struct net_device_ops sl_netdev_ops = {
 642	.ndo_init		= sl_init,
 643	.ndo_uninit	  	= sl_uninit,
 644	.ndo_open		= sl_open,
 645	.ndo_stop		= sl_close,
 646	.ndo_start_xmit		= sl_xmit,
 647	.ndo_get_stats64        = sl_get_stats64,
 648	.ndo_change_mtu		= sl_change_mtu,
 649	.ndo_tx_timeout		= sl_tx_timeout,
 650#ifdef CONFIG_SLIP_SMART
 651	.ndo_siocdevprivate	= sl_siocdevprivate,
 652#endif
 653};
 654
 655
 656static void sl_setup(struct net_device *dev)
 657{
 658	dev->netdev_ops		= &sl_netdev_ops;
 659	dev->needs_free_netdev	= true;
 660	dev->priv_destructor	= sl_free_netdev;
 661
 662	dev->hard_header_len	= 0;
 663	dev->addr_len		= 0;
 664	dev->tx_queue_len	= 10;
 665
 666	/* MTU range: 68 - 65534 */
 667	dev->min_mtu = 68;
 668	dev->max_mtu = 65534;
 669
 670	/* New-style flags. */
 671	dev->flags		= IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST;
 672}
 673
 674/******************************************
 675  Routines looking at TTY side.
 676 ******************************************/
 677
 678
 679/*
 680 * Handle the 'receiver data ready' interrupt.
 681 * This function is called by the 'tty_io' module in the kernel when
 682 * a block of SLIP data has been received, which can now be decapsulated
 683 * and sent on to some IP layer for further processing. This will not
 684 * be re-entered while running but other ldisc functions may be called
 685 * in parallel
 686 */
 687
 688static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
 689		const char *fp, int count)
 690{
 691	struct slip *sl = tty->disc_data;
 692
 693	if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
 694		return;
 695
 696	/* Read the characters out of the buffer */
 697	while (count--) {
 698		if (fp && *fp++) {
 699			if (!test_and_set_bit(SLF_ERROR, &sl->flags))
 700				sl->dev->stats.rx_errors++;
 701			cp++;
 702			continue;
 703		}
 704#ifdef CONFIG_SLIP_MODE_SLIP6
 705		if (sl->mode & SL_MODE_SLIP6)
 706			slip_unesc6(sl, *cp++);
 707		else
 708#endif
 709			slip_unesc(sl, *cp++);
 710	}
 711}
 712
 713/************************************
 714 *  slip_open helper routines.
 715 ************************************/
 716
 717/* Collect hanged up channels */
 718static void sl_sync(void)
 719{
 720	int i;
 721	struct net_device *dev;
 722	struct slip	  *sl;
 723
 724	for (i = 0; i < slip_maxdev; i++) {
 725		dev = slip_devs[i];
 726		if (dev == NULL)
 727			break;
 728
 729		sl = netdev_priv(dev);
 730		if (sl->tty || sl->leased)
 731			continue;
 732		if (dev->flags & IFF_UP)
 733			dev_close(dev);
 734	}
 735}
 736
 737
 738/* Find a free SLIP channel, and link in this `tty' line. */
 739static struct slip *sl_alloc(void)
 740{
 741	int i;
 742	char name[IFNAMSIZ];
 743	struct net_device *dev = NULL;
 744	struct slip       *sl;
 745
 746	for (i = 0; i < slip_maxdev; i++) {
 747		dev = slip_devs[i];
 748		if (dev == NULL)
 749			break;
 750	}
 751	/* Sorry, too many, all slots in use */
 752	if (i >= slip_maxdev)
 753		return NULL;
 754
 755	sprintf(name, "sl%d", i);
 756	dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup);
 757	if (!dev)
 758		return NULL;
 759
 760	dev->base_addr  = i;
 761	sl = netdev_priv(dev);
 762
 763	/* Initialize channel control data */
 764	sl->magic       = SLIP_MAGIC;
 765	sl->dev	      	= dev;
 766	spin_lock_init(&sl->lock);
 767	INIT_WORK(&sl->tx_work, slip_transmit);
 768	sl->mode        = SL_MODE_DEFAULT;
 769#ifdef CONFIG_SLIP_SMART
 770	/* initialize timer_list struct */
 771	timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
 772	timer_setup(&sl->outfill_timer, sl_outfill, 0);
 773#endif
 774	slip_devs[i] = dev;
 775	return sl;
 776}
 777
 778/*
 779 * Open the high-level part of the SLIP channel.
 780 * This function is called by the TTY module when the
 781 * SLIP line discipline is called for.  Because we are
 782 * sure the tty line exists, we only have to link it to
 783 * a free SLIP channel...
 784 *
 785 * Called in process context serialized from other ldisc calls.
 786 */
 787
 788static int slip_open(struct tty_struct *tty)
 789{
 790	struct slip *sl;
 791	int err;
 792
 793	if (!capable(CAP_NET_ADMIN))
 794		return -EPERM;
 795
 796	if (tty->ops->write == NULL)
 797		return -EOPNOTSUPP;
 798
 799	/* RTnetlink lock is misused here to serialize concurrent
 800	   opens of slip channels. There are better ways, but it is
 801	   the simplest one.
 802	 */
 803	rtnl_lock();
 804
 805	/* Collect hanged up channels. */
 806	sl_sync();
 807
 808	sl = tty->disc_data;
 809
 810	err = -EEXIST;
 811	/* First make sure we're not already connected. */
 812	if (sl && sl->magic == SLIP_MAGIC)
 813		goto err_exit;
 814
 815	/* OK.  Find a free SLIP channel to use. */
 816	err = -ENFILE;
 817	sl = sl_alloc();
 818	if (sl == NULL)
 819		goto err_exit;
 820
 821	sl->tty = tty;
 822	tty->disc_data = sl;
 823	sl->pid = current->pid;
 824
 825	if (!test_bit(SLF_INUSE, &sl->flags)) {
 826		/* Perform the low-level SLIP initialization. */
 827		err = sl_alloc_bufs(sl, SL_MTU);
 828		if (err)
 829			goto err_free_chan;
 830
 831		set_bit(SLF_INUSE, &sl->flags);
 832
 833		err = register_netdevice(sl->dev);
 834		if (err)
 835			goto err_free_bufs;
 836	}
 837
 838#ifdef CONFIG_SLIP_SMART
 839	if (sl->keepalive) {
 840		sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ;
 841		add_timer(&sl->keepalive_timer);
 842	}
 843	if (sl->outfill) {
 844		sl->outfill_timer.expires = jiffies + sl->outfill * HZ;
 845		add_timer(&sl->outfill_timer);
 846	}
 847#endif
 848
 849	/* Done.  We have linked the TTY line to a channel. */
 850	rtnl_unlock();
 851	tty->receive_room = 65536;	/* We don't flow control */
 852
 853	/* TTY layer expects 0 on success */
 854	return 0;
 855
 856err_free_bufs:
 857	sl_free_bufs(sl);
 858
 859err_free_chan:
 860	sl->tty = NULL;
 861	tty->disc_data = NULL;
 862	clear_bit(SLF_INUSE, &sl->flags);
 863	sl_free_netdev(sl->dev);
 864	/* do not call free_netdev before rtnl_unlock */
 865	rtnl_unlock();
 866	free_netdev(sl->dev);
 867	return err;
 868
 869err_exit:
 870	rtnl_unlock();
 871
 872	/* Count references from TTY module */
 873	return err;
 874}
 875
 876/*
 877 * Close down a SLIP channel.
 878 * This means flushing out any pending queues, and then returning. This
 879 * call is serialized against other ldisc functions.
 880 *
 881 * We also use this method fo a hangup event
 882 */
 883
 884static void slip_close(struct tty_struct *tty)
 885{
 886	struct slip *sl = tty->disc_data;
 887
 888	/* First make sure we're connected. */
 889	if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
 890		return;
 891
 892	spin_lock_bh(&sl->lock);
 893	rcu_assign_pointer(tty->disc_data, NULL);
 894	sl->tty = NULL;
 895	spin_unlock_bh(&sl->lock);
 896
 897	synchronize_rcu();
 898	flush_work(&sl->tx_work);
 899
 900	/* VSV = very important to remove timers */
 901#ifdef CONFIG_SLIP_SMART
 902	del_timer_sync(&sl->keepalive_timer);
 903	del_timer_sync(&sl->outfill_timer);
 904#endif
 905	/* Flush network side */
 906	unregister_netdev(sl->dev);
 907	/* This will complete via sl_free_netdev */
 908}
 909
 910static void slip_hangup(struct tty_struct *tty)
 911{
 912	slip_close(tty);
 913}
 914 /************************************************************************
 915  *			STANDARD SLIP ENCAPSULATION		  	 *
 916  ************************************************************************/
 917
 918static int slip_esc(unsigned char *s, unsigned char *d, int len)
 919{
 920	unsigned char *ptr = d;
 921	unsigned char c;
 922
 923	/*
 924	 * Send an initial END character to flush out any
 925	 * data that may have accumulated in the receiver
 926	 * due to line noise.
 927	 */
 928
 929	*ptr++ = END;
 930
 931	/*
 932	 * For each byte in the packet, send the appropriate
 933	 * character sequence, according to the SLIP protocol.
 934	 */
 935
 936	while (len-- > 0) {
 937		switch (c = *s++) {
 938		case END:
 939			*ptr++ = ESC;
 940			*ptr++ = ESC_END;
 941			break;
 942		case ESC:
 943			*ptr++ = ESC;
 944			*ptr++ = ESC_ESC;
 945			break;
 946		default:
 947			*ptr++ = c;
 948			break;
 949		}
 950	}
 951	*ptr++ = END;
 952	return ptr - d;
 953}
 954
 955static void slip_unesc(struct slip *sl, unsigned char s)
 956{
 957
 958	switch (s) {
 959	case END:
 960#ifdef CONFIG_SLIP_SMART
 961		/* drop keeptest bit = VSV */
 962		if (test_bit(SLF_KEEPTEST, &sl->flags))
 963			clear_bit(SLF_KEEPTEST, &sl->flags);
 964#endif
 965
 966		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
 967		    (sl->rcount > 2))
 968			sl_bump(sl);
 969		clear_bit(SLF_ESCAPE, &sl->flags);
 970		sl->rcount = 0;
 971		return;
 972
 973	case ESC:
 974		set_bit(SLF_ESCAPE, &sl->flags);
 975		return;
 976	case ESC_ESC:
 977		if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
 978			s = ESC;
 979		break;
 980	case ESC_END:
 981		if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
 982			s = END;
 983		break;
 984	}
 985	if (!test_bit(SLF_ERROR, &sl->flags))  {
 986		if (sl->rcount < sl->buffsize)  {
 987			sl->rbuff[sl->rcount++] = s;
 988			return;
 989		}
 990		sl->dev->stats.rx_over_errors++;
 991		set_bit(SLF_ERROR, &sl->flags);
 992	}
 993}
 994
 995
 996#ifdef CONFIG_SLIP_MODE_SLIP6
 997/************************************************************************
 998 *			 6 BIT SLIP ENCAPSULATION			*
 999 ************************************************************************/
1000
1001static int slip_esc6(unsigned char *s, unsigned char *d, int len)
1002{
1003	unsigned char *ptr = d;
1004	unsigned char c;
1005	int i;
1006	unsigned short v = 0;
1007	short bits = 0;
1008
1009	/*
1010	 * Send an initial END character to flush out any
1011	 * data that may have accumulated in the receiver
1012	 * due to line noise.
1013	 */
1014
1015	*ptr++ = 0x70;
1016
1017	/*
1018	 * Encode the packet into printable ascii characters
1019	 */
1020
1021	for (i = 0; i < len; ++i) {
1022		v = (v << 8) | s[i];
1023		bits += 8;
1024		while (bits >= 6) {
1025			bits -= 6;
1026			c = 0x30 + ((v >> bits) & 0x3F);
1027			*ptr++ = c;
1028		}
1029	}
1030	if (bits) {
1031		c = 0x30 + ((v << (6 - bits)) & 0x3F);
1032		*ptr++ = c;
1033	}
1034	*ptr++ = 0x70;
1035	return ptr - d;
1036}
1037
1038static void slip_unesc6(struct slip *sl, unsigned char s)
1039{
1040	unsigned char c;
1041
1042	if (s == 0x70) {
1043#ifdef CONFIG_SLIP_SMART
1044		/* drop keeptest bit = VSV */
1045		if (test_bit(SLF_KEEPTEST, &sl->flags))
1046			clear_bit(SLF_KEEPTEST, &sl->flags);
1047#endif
1048
1049		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
1050		    (sl->rcount > 2))
1051			sl_bump(sl);
1052		sl->rcount = 0;
1053		sl->xbits = 0;
1054		sl->xdata = 0;
1055	} else if (s >= 0x30 && s < 0x70) {
1056		sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F);
1057		sl->xbits += 6;
1058		if (sl->xbits >= 8) {
1059			sl->xbits -= 8;
1060			c = (unsigned char)(sl->xdata >> sl->xbits);
1061			if (!test_bit(SLF_ERROR, &sl->flags))  {
1062				if (sl->rcount < sl->buffsize)  {
1063					sl->rbuff[sl->rcount++] = c;
1064					return;
1065				}
1066				sl->dev->stats.rx_over_errors++;
1067				set_bit(SLF_ERROR, &sl->flags);
1068			}
1069		}
1070	}
1071}
1072#endif /* CONFIG_SLIP_MODE_SLIP6 */
1073
1074/* Perform I/O control on an active SLIP channel. */
1075static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
1076		unsigned long arg)
1077{
1078	struct slip *sl = tty->disc_data;
1079	unsigned int tmp;
1080	int __user *p = (int __user *)arg;
1081
1082	/* First make sure we're connected. */
1083	if (!sl || sl->magic != SLIP_MAGIC)
1084		return -EINVAL;
1085
1086	switch (cmd) {
1087	case SIOCGIFNAME:
1088		tmp = strlen(sl->dev->name) + 1;
1089		if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
1090			return -EFAULT;
1091		return 0;
1092
1093	case SIOCGIFENCAP:
1094		if (put_user(sl->mode, p))
1095			return -EFAULT;
1096		return 0;
1097
1098	case SIOCSIFENCAP:
1099		if (get_user(tmp, p))
1100			return -EFAULT;
1101#ifndef SL_INCLUDE_CSLIP
1102		if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE))
1103			return -EINVAL;
1104#else
1105		if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) ==
1106		    (SL_MODE_ADAPTIVE | SL_MODE_CSLIP))
1107			/* return -EINVAL; */
1108			tmp &= ~SL_MODE_ADAPTIVE;
1109#endif
1110#ifndef CONFIG_SLIP_MODE_SLIP6
1111		if (tmp & SL_MODE_SLIP6)
1112			return -EINVAL;
1113#endif
1114		sl->mode = tmp;
1115		sl->dev->type = ARPHRD_SLIP + sl->mode;
1116		return 0;
1117
1118	case SIOCSIFHWADDR:
1119		return -EINVAL;
1120
1121#ifdef CONFIG_SLIP_SMART
1122	/* VSV changes start here */
1123	case SIOCSKEEPALIVE:
1124		if (get_user(tmp, p))
1125			return -EFAULT;
1126		if (tmp > 255) /* max for unchar */
1127			return -EINVAL;
1128
1129		spin_lock_bh(&sl->lock);
1130		if (!sl->tty) {
1131			spin_unlock_bh(&sl->lock);
1132			return -ENODEV;
1133		}
1134		sl->keepalive = (u8)tmp;
1135		if (sl->keepalive != 0) {
1136			mod_timer(&sl->keepalive_timer,
1137					jiffies + sl->keepalive * HZ);
1138			set_bit(SLF_KEEPTEST, &sl->flags);
1139		} else
1140			del_timer(&sl->keepalive_timer);
1141		spin_unlock_bh(&sl->lock);
1142		return 0;
1143
1144	case SIOCGKEEPALIVE:
1145		if (put_user(sl->keepalive, p))
1146			return -EFAULT;
1147		return 0;
1148
1149	case SIOCSOUTFILL:
1150		if (get_user(tmp, p))
1151			return -EFAULT;
1152		if (tmp > 255) /* max for unchar */
1153			return -EINVAL;
1154		spin_lock_bh(&sl->lock);
1155		if (!sl->tty) {
1156			spin_unlock_bh(&sl->lock);
1157			return -ENODEV;
1158		}
1159		sl->outfill = (u8)tmp;
1160		if (sl->outfill != 0) {
1161			mod_timer(&sl->outfill_timer,
1162						jiffies + sl->outfill * HZ);
1163			set_bit(SLF_OUTWAIT, &sl->flags);
1164		} else
1165			del_timer(&sl->outfill_timer);
1166		spin_unlock_bh(&sl->lock);
1167		return 0;
1168
1169	case SIOCGOUTFILL:
1170		if (put_user(sl->outfill, p))
1171			return -EFAULT;
1172		return 0;
1173	/* VSV changes end */
1174#endif
1175	default:
1176		return tty_mode_ioctl(tty, cmd, arg);
1177	}
1178}
1179
1180/* VSV changes start here */
1181#ifdef CONFIG_SLIP_SMART
1182/* function sl_siocdevprivate called from net/core/dev.c
1183   to allow get/set outfill/keepalive parameter
1184   by ifconfig                                 */
1185
1186static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1187			     void __user *data, int cmd)
1188{
1189	struct slip *sl = netdev_priv(dev);
1190	unsigned long *p = (unsigned long *)&rq->ifr_ifru;
1191
1192	if (sl == NULL)		/* Allocation failed ?? */
1193		return -ENODEV;
1194
1195	if (in_compat_syscall())
1196		return -EOPNOTSUPP;
1197
1198	spin_lock_bh(&sl->lock);
1199
1200	if (!sl->tty) {
1201		spin_unlock_bh(&sl->lock);
1202		return -ENODEV;
1203	}
1204
1205	switch (cmd) {
1206	case SIOCSKEEPALIVE:
1207		/* max for unchar */
1208		if ((unsigned)*p > 255) {
1209			spin_unlock_bh(&sl->lock);
1210			return -EINVAL;
1211		}
1212		sl->keepalive = (u8)*p;
1213		if (sl->keepalive != 0) {
1214			sl->keepalive_timer.expires =
1215						jiffies + sl->keepalive * HZ;
1216			mod_timer(&sl->keepalive_timer,
1217						jiffies + sl->keepalive * HZ);
1218			set_bit(SLF_KEEPTEST, &sl->flags);
1219		} else
1220			del_timer(&sl->keepalive_timer);
1221		break;
1222
1223	case SIOCGKEEPALIVE:
1224		*p = sl->keepalive;
1225		break;
1226
1227	case SIOCSOUTFILL:
1228		if ((unsigned)*p > 255) { /* max for unchar */
1229			spin_unlock_bh(&sl->lock);
1230			return -EINVAL;
1231		}
1232		sl->outfill = (u8)*p;
1233		if (sl->outfill != 0) {
1234			mod_timer(&sl->outfill_timer,
1235						jiffies + sl->outfill * HZ);
1236			set_bit(SLF_OUTWAIT, &sl->flags);
1237		} else
1238			del_timer(&sl->outfill_timer);
1239		break;
1240
1241	case SIOCGOUTFILL:
1242		*p = sl->outfill;
1243		break;
1244
1245	case SIOCSLEASE:
1246		/* Resolve race condition, when ioctl'ing hanged up
1247		   and opened by another process device.
1248		 */
1249		if (sl->tty != current->signal->tty &&
1250						sl->pid != current->pid) {
1251			spin_unlock_bh(&sl->lock);
1252			return -EPERM;
1253		}
1254		sl->leased = 0;
1255		if (*p)
1256			sl->leased = 1;
1257		break;
1258
1259	case SIOCGLEASE:
1260		*p = sl->leased;
1261	}
1262	spin_unlock_bh(&sl->lock);
1263	return 0;
1264}
1265#endif
1266/* VSV changes end */
1267
1268static struct tty_ldisc_ops sl_ldisc = {
1269	.owner 		= THIS_MODULE,
1270	.num		= N_SLIP,
1271	.name 		= "slip",
1272	.open 		= slip_open,
1273	.close	 	= slip_close,
1274	.hangup	 	= slip_hangup,
1275	.ioctl		= slip_ioctl,
1276	.receive_buf	= slip_receive_buf,
1277	.write_wakeup	= slip_write_wakeup,
1278};
1279
1280static int __init slip_init(void)
1281{
1282	int status;
1283
1284	if (slip_maxdev < 4)
1285		slip_maxdev = 4; /* Sanity */
1286
1287	printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)"
1288#ifdef CONFIG_SLIP_MODE_SLIP6
1289	       " (6 bit encapsulation enabled)"
1290#endif
1291	       ".\n",
1292	       SLIP_VERSION, slip_maxdev);
1293#if defined(SL_INCLUDE_CSLIP)
1294	printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n");
1295#endif
1296#ifdef CONFIG_SLIP_SMART
1297	printk(KERN_INFO "SLIP linefill/keepalive option.\n");
1298#endif
1299
1300	slip_devs = kcalloc(slip_maxdev, sizeof(struct net_device *),
1301								GFP_KERNEL);
1302	if (!slip_devs)
1303		return -ENOMEM;
1304
1305	/* Fill in our line protocol discipline, and register it */
1306	status = tty_register_ldisc(&sl_ldisc);
1307	if (status != 0) {
1308		printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
1309		kfree(slip_devs);
1310	}
1311	return status;
1312}
1313
1314static void __exit slip_exit(void)
1315{
1316	int i;
1317	struct net_device *dev;
1318	struct slip *sl;
1319	unsigned long timeout = jiffies + HZ;
1320	int busy = 0;
1321
1322	if (slip_devs == NULL)
1323		return;
1324
1325	/* First of all: check for active disciplines and hangup them.
1326	 */
1327	do {
1328		if (busy)
1329			msleep_interruptible(100);
1330
1331		busy = 0;
1332		for (i = 0; i < slip_maxdev; i++) {
1333			dev = slip_devs[i];
1334			if (!dev)
1335				continue;
1336			sl = netdev_priv(dev);
1337			spin_lock_bh(&sl->lock);
1338			if (sl->tty) {
1339				busy++;
1340				tty_hangup(sl->tty);
1341			}
1342			spin_unlock_bh(&sl->lock);
1343		}
1344	} while (busy && time_before(jiffies, timeout));
1345
1346	/* FIXME: hangup is async so we should wait when doing this second
1347	   phase */
1348
1349	for (i = 0; i < slip_maxdev; i++) {
1350		dev = slip_devs[i];
1351		if (!dev)
1352			continue;
1353		slip_devs[i] = NULL;
1354
1355		sl = netdev_priv(dev);
1356		if (sl->tty) {
1357			printk(KERN_ERR "%s: tty discipline still running\n",
1358			       dev->name);
1359		}
1360
1361		unregister_netdev(dev);
1362	}
1363
1364	kfree(slip_devs);
1365	slip_devs = NULL;
1366
1367	tty_unregister_ldisc(&sl_ldisc);
1368}
1369
1370module_init(slip_init);
1371module_exit(slip_exit);
1372
1373#ifdef CONFIG_SLIP_SMART
1374/*
1375 * This is start of the code for multislip style line checking
1376 * added by Stanislav Voronyi. All changes before marked VSV
1377 */
1378
1379static void sl_outfill(struct timer_list *t)
1380{
1381	struct slip *sl = from_timer(sl, t, outfill_timer);
1382
1383	spin_lock(&sl->lock);
1384
1385	if (sl->tty == NULL)
1386		goto out;
1387
1388	if (sl->outfill) {
1389		if (test_bit(SLF_OUTWAIT, &sl->flags)) {
1390			/* no packets were transmitted, do outfill */
1391#ifdef CONFIG_SLIP_MODE_SLIP6
1392			unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END;
1393#else
1394			unsigned char s = END;
1395#endif
1396			/* put END into tty queue. Is it right ??? */
1397			if (!netif_queue_stopped(sl->dev)) {
1398				/* if device busy no outfill */
1399				sl->tty->ops->write(sl->tty, &s, 1);
1400			}
1401		} else
1402			set_bit(SLF_OUTWAIT, &sl->flags);
1403
1404		mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ);
1405	}
1406out:
1407	spin_unlock(&sl->lock);
1408}
1409
1410static void sl_keepalive(struct timer_list *t)
1411{
1412	struct slip *sl = from_timer(sl, t, keepalive_timer);
1413
1414	spin_lock(&sl->lock);
1415
1416	if (sl->tty == NULL)
1417		goto out;
1418
1419	if (sl->keepalive) {
1420		if (test_bit(SLF_KEEPTEST, &sl->flags)) {
1421			/* keepalive still high :(, we must hangup */
1422			if (sl->outfill)
1423				/* outfill timer must be deleted too */
1424				(void)del_timer(&sl->outfill_timer);
1425			printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name);
1426			/* this must hangup tty & close slip */
1427			tty_hangup(sl->tty);
1428			/* I think we need not something else */
1429			goto out;
1430		} else
1431			set_bit(SLF_KEEPTEST, &sl->flags);
1432
1433		mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ);
1434	}
1435out:
1436	spin_unlock(&sl->lock);
1437}
1438
1439#endif
 
1440MODULE_LICENSE("GPL");
1441MODULE_ALIAS_LDISC(N_SLIP);