Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * PPP synchronous tty channel driver for Linux.
  3 *
  4 * This is a ppp channel driver that can be used with tty device drivers
  5 * that are frame oriented, such as synchronous HDLC devices.
  6 *
  7 * Complete PPP frames without encoding/decoding are exchanged between
  8 * the channel driver and the device driver.
  9 *
 10 * The async map IOCTL codes are implemented to keep the user mode
 11 * applications happy if they call them. Synchronous PPP does not use
 12 * the async maps.
 13 *
 14 * Copyright 1999 Paul Mackerras.
 15 *
 16 * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
 17 *
 18 *  This program is free software; you can redistribute it and/or
 19 *  modify it under the terms of the GNU General Public License
 20 *  as published by the Free Software Foundation; either version
 21 *  2 of the License, or (at your option) any later version.
 22 *
 23 * This driver provides the encapsulation and framing for sending
 24 * and receiving PPP frames over sync serial lines.  It relies on
 25 * the generic PPP layer to give it frames to send and to process
 26 * received frames.  It implements the PPP line discipline.
 27 *
 28 * Part of the code in this driver was inspired by the old async-only
 29 * PPP driver, written by Michael Callahan and Al Longyear, and
 30 * subsequently hacked by Paul Mackerras.
 31 *
 32 * ==FILEVERSION 20040616==
 33 */
 34
 35#include <linux/module.h>
 36#include <linux/kernel.h>
 37#include <linux/skbuff.h>
 38#include <linux/tty.h>
 39#include <linux/netdevice.h>
 40#include <linux/poll.h>
 41#include <linux/ppp_defs.h>
 42#include <linux/ppp-ioctl.h>
 43#include <linux/ppp_channel.h>
 44#include <linux/spinlock.h>
 45#include <linux/completion.h>
 46#include <linux/init.h>
 47#include <linux/interrupt.h>
 48#include <linux/slab.h>
 49#include <linux/refcount.h>
 50#include <asm/unaligned.h>
 51#include <linux/uaccess.h>
 52
 53#define PPP_VERSION	"2.4.2"
 54
 55/* Structure for storing local state. */
 56struct syncppp {
 57	struct tty_struct *tty;
 58	unsigned int	flags;
 59	unsigned int	rbits;
 60	int		mru;
 61	spinlock_t	xmit_lock;
 62	spinlock_t	recv_lock;
 63	unsigned long	xmit_flags;
 64	u32		xaccm[8];
 65	u32		raccm;
 66	unsigned int	bytes_sent;
 67	unsigned int	bytes_rcvd;
 68
 69	struct sk_buff	*tpkt;
 70	unsigned long	last_xmit;
 71
 72	struct sk_buff_head rqueue;
 73
 74	struct tasklet_struct tsk;
 75
 76	refcount_t	refcnt;
 77	struct completion dead_cmp;
 78	struct ppp_channel chan;	/* interface to generic ppp layer */
 79};
 80
 81/* Bit numbers in xmit_flags */
 82#define XMIT_WAKEUP	0
 83#define XMIT_FULL	1
 84
 85/* Bits in rbits */
 86#define SC_RCV_BITS	(SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
 87
 88#define PPPSYNC_MAX_RQLEN	32	/* arbitrary */
 89
 90/*
 91 * Prototypes.
 92 */
 93static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
 94static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
 95static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
 96			  unsigned long arg);
 97static void ppp_sync_process(unsigned long arg);
 98static int ppp_sync_push(struct syncppp *ap);
 99static void ppp_sync_flush_output(struct syncppp *ap);
100static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
101			   char *flags, int count);
102
103static const struct ppp_channel_ops sync_ops = {
104	.start_xmit = ppp_sync_send,
105	.ioctl      = ppp_sync_ioctl,
106};
107
108/*
109 * Utility procedure to print a buffer in hex/ascii
110 */
111static void
112ppp_print_buffer (const char *name, const __u8 *buf, int count)
113{
114	if (name != NULL)
115		printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
116
117	print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
118}
119
120
121/*
122 * Routines implementing the synchronous PPP line discipline.
123 */
124
125/*
126 * We have a potential race on dereferencing tty->disc_data,
127 * because the tty layer provides no locking at all - thus one
128 * cpu could be running ppp_synctty_receive while another
129 * calls ppp_synctty_close, which zeroes tty->disc_data and
130 * frees the memory that ppp_synctty_receive is using.  The best
131 * way to fix this is to use a rwlock in the tty struct, but for now
132 * we use a single global rwlock for all ttys in ppp line discipline.
133 *
134 * FIXME: Fixed in tty_io nowadays.
135 */
136static DEFINE_RWLOCK(disc_data_lock);
137
138static struct syncppp *sp_get(struct tty_struct *tty)
139{
140	struct syncppp *ap;
141
142	read_lock(&disc_data_lock);
143	ap = tty->disc_data;
144	if (ap != NULL)
145		refcount_inc(&ap->refcnt);
146	read_unlock(&disc_data_lock);
147	return ap;
148}
149
150static void sp_put(struct syncppp *ap)
151{
152	if (refcount_dec_and_test(&ap->refcnt))
153		complete(&ap->dead_cmp);
154}
155
156/*
157 * Called when a tty is put into sync-PPP line discipline.
158 */
159static int
160ppp_sync_open(struct tty_struct *tty)
161{
162	struct syncppp *ap;
163	int err;
164	int speed;
165
166	if (tty->ops->write == NULL)
167		return -EOPNOTSUPP;
168
169	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
170	err = -ENOMEM;
171	if (!ap)
172		goto out;
173
174	/* initialize the syncppp structure */
175	ap->tty = tty;
176	ap->mru = PPP_MRU;
177	spin_lock_init(&ap->xmit_lock);
178	spin_lock_init(&ap->recv_lock);
179	ap->xaccm[0] = ~0U;
180	ap->xaccm[3] = 0x60000000U;
181	ap->raccm = ~0U;
182
183	skb_queue_head_init(&ap->rqueue);
184	tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
185
186	refcount_set(&ap->refcnt, 1);
187	init_completion(&ap->dead_cmp);
188
189	ap->chan.private = ap;
190	ap->chan.ops = &sync_ops;
191	ap->chan.mtu = PPP_MRU;
192	ap->chan.hdrlen = 2;	/* for A/C bytes */
193	speed = tty_get_baud_rate(tty);
194	ap->chan.speed = speed;
195	err = ppp_register_channel(&ap->chan);
196	if (err)
197		goto out_free;
198
199	tty->disc_data = ap;
200	tty->receive_room = 65536;
201	return 0;
202
203 out_free:
204	kfree(ap);
205 out:
206	return err;
207}
208
209/*
210 * Called when the tty is put into another line discipline
211 * or it hangs up.  We have to wait for any cpu currently
212 * executing in any of the other ppp_synctty_* routines to
213 * finish before we can call ppp_unregister_channel and free
214 * the syncppp struct.  This routine must be called from
215 * process context, not interrupt or softirq context.
216 */
217static void
218ppp_sync_close(struct tty_struct *tty)
219{
220	struct syncppp *ap;
221
222	write_lock_irq(&disc_data_lock);
223	ap = tty->disc_data;
224	tty->disc_data = NULL;
225	write_unlock_irq(&disc_data_lock);
226	if (!ap)
227		return;
228
229	/*
230	 * We have now ensured that nobody can start using ap from now
231	 * on, but we have to wait for all existing users to finish.
232	 * Note that ppp_unregister_channel ensures that no calls to
233	 * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
234	 * by the time it returns.
235	 */
236	if (!refcount_dec_and_test(&ap->refcnt))
237		wait_for_completion(&ap->dead_cmp);
238	tasklet_kill(&ap->tsk);
239
240	ppp_unregister_channel(&ap->chan);
241	skb_queue_purge(&ap->rqueue);
242	kfree_skb(ap->tpkt);
243	kfree(ap);
244}
245
246/*
247 * Called on tty hangup in process context.
248 *
249 * Wait for I/O to driver to complete and unregister PPP channel.
250 * This is already done by the close routine, so just call that.
251 */
252static int ppp_sync_hangup(struct tty_struct *tty)
253{
254	ppp_sync_close(tty);
255	return 0;
256}
257
258/*
259 * Read does nothing - no data is ever available this way.
260 * Pppd reads and writes packets via /dev/ppp instead.
261 */
262static ssize_t
263ppp_sync_read(struct tty_struct *tty, struct file *file,
264	       unsigned char __user *buf, size_t count)
265{
266	return -EAGAIN;
267}
268
269/*
270 * Write on the tty does nothing, the packets all come in
271 * from the ppp generic stuff.
272 */
273static ssize_t
274ppp_sync_write(struct tty_struct *tty, struct file *file,
275		const unsigned char *buf, size_t count)
276{
277	return -EAGAIN;
278}
279
280static int
281ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
282		  unsigned int cmd, unsigned long arg)
283{
284	struct syncppp *ap = sp_get(tty);
285	int __user *p = (int __user *)arg;
286	int err, val;
287
288	if (!ap)
289		return -ENXIO;
290	err = -EFAULT;
291	switch (cmd) {
292	case PPPIOCGCHAN:
293		err = -EFAULT;
294		if (put_user(ppp_channel_index(&ap->chan), p))
295			break;
296		err = 0;
297		break;
298
299	case PPPIOCGUNIT:
300		err = -EFAULT;
301		if (put_user(ppp_unit_number(&ap->chan), p))
302			break;
303		err = 0;
304		break;
305
306	case TCFLSH:
307		/* flush our buffers and the serial port's buffer */
308		if (arg == TCIOFLUSH || arg == TCOFLUSH)
309			ppp_sync_flush_output(ap);
310		err = n_tty_ioctl_helper(tty, file, cmd, arg);
311		break;
312
313	case FIONREAD:
314		val = 0;
315		if (put_user(val, p))
316			break;
317		err = 0;
318		break;
319
320	default:
321		err = tty_mode_ioctl(tty, file, cmd, arg);
322		break;
323	}
324
325	sp_put(ap);
326	return err;
327}
328
329/* No kernel lock - fine */
330static __poll_t
331ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
332{
333	return 0;
334}
335
336/* May sleep, don't call from interrupt level or with interrupts disabled */
337static void
338ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
339		  char *cflags, int count)
340{
341	struct syncppp *ap = sp_get(tty);
342	unsigned long flags;
343
344	if (!ap)
345		return;
346	spin_lock_irqsave(&ap->recv_lock, flags);
347	ppp_sync_input(ap, buf, cflags, count);
348	spin_unlock_irqrestore(&ap->recv_lock, flags);
349	if (!skb_queue_empty(&ap->rqueue))
350		tasklet_schedule(&ap->tsk);
351	sp_put(ap);
352	tty_unthrottle(tty);
353}
354
355static void
356ppp_sync_wakeup(struct tty_struct *tty)
357{
358	struct syncppp *ap = sp_get(tty);
359
360	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
361	if (!ap)
362		return;
363	set_bit(XMIT_WAKEUP, &ap->xmit_flags);
364	tasklet_schedule(&ap->tsk);
365	sp_put(ap);
366}
367
368
369static struct tty_ldisc_ops ppp_sync_ldisc = {
370	.owner	= THIS_MODULE,
371	.magic	= TTY_LDISC_MAGIC,
372	.name	= "pppsync",
373	.open	= ppp_sync_open,
374	.close	= ppp_sync_close,
375	.hangup	= ppp_sync_hangup,
376	.read	= ppp_sync_read,
377	.write	= ppp_sync_write,
378	.ioctl	= ppp_synctty_ioctl,
379	.poll	= ppp_sync_poll,
380	.receive_buf = ppp_sync_receive,
381	.write_wakeup = ppp_sync_wakeup,
382};
383
384static int __init
385ppp_sync_init(void)
386{
387	int err;
388
389	err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
390	if (err != 0)
391		printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
392		       err);
393	return err;
394}
395
396/*
397 * The following routines provide the PPP channel interface.
398 */
399static int
400ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
401{
402	struct syncppp *ap = chan->private;
403	int err, val;
404	u32 accm[8];
405	void __user *argp = (void __user *)arg;
406	u32 __user *p = argp;
407
408	err = -EFAULT;
409	switch (cmd) {
410	case PPPIOCGFLAGS:
411		val = ap->flags | ap->rbits;
412		if (put_user(val, (int __user *) argp))
413			break;
414		err = 0;
415		break;
416	case PPPIOCSFLAGS:
417		if (get_user(val, (int __user *) argp))
418			break;
419		ap->flags = val & ~SC_RCV_BITS;
420		spin_lock_irq(&ap->recv_lock);
421		ap->rbits = val & SC_RCV_BITS;
422		spin_unlock_irq(&ap->recv_lock);
423		err = 0;
424		break;
425
426	case PPPIOCGASYNCMAP:
427		if (put_user(ap->xaccm[0], p))
428			break;
429		err = 0;
430		break;
431	case PPPIOCSASYNCMAP:
432		if (get_user(ap->xaccm[0], p))
433			break;
434		err = 0;
435		break;
436
437	case PPPIOCGRASYNCMAP:
438		if (put_user(ap->raccm, p))
439			break;
440		err = 0;
441		break;
442	case PPPIOCSRASYNCMAP:
443		if (get_user(ap->raccm, p))
444			break;
445		err = 0;
446		break;
447
448	case PPPIOCGXASYNCMAP:
449		if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
450			break;
451		err = 0;
452		break;
453	case PPPIOCSXASYNCMAP:
454		if (copy_from_user(accm, argp, sizeof(accm)))
455			break;
456		accm[2] &= ~0x40000000U;	/* can't escape 0x5e */
457		accm[3] |= 0x60000000U;		/* must escape 0x7d, 0x7e */
458		memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
459		err = 0;
460		break;
461
462	case PPPIOCGMRU:
463		if (put_user(ap->mru, (int __user *) argp))
464			break;
465		err = 0;
466		break;
467	case PPPIOCSMRU:
468		if (get_user(val, (int __user *) argp))
469			break;
470		if (val < PPP_MRU)
471			val = PPP_MRU;
472		ap->mru = val;
473		err = 0;
474		break;
475
476	default:
477		err = -ENOTTY;
478	}
479	return err;
480}
481
482/*
483 * This is called at softirq level to deliver received packets
484 * to the ppp_generic code, and to tell the ppp_generic code
485 * if we can accept more output now.
486 */
487static void ppp_sync_process(unsigned long arg)
488{
489	struct syncppp *ap = (struct syncppp *) arg;
490	struct sk_buff *skb;
491
492	/* process received packets */
493	while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
494		if (skb->len == 0) {
495			/* zero length buffers indicate error */
496			ppp_input_error(&ap->chan, 0);
497			kfree_skb(skb);
498		}
499		else
500			ppp_input(&ap->chan, skb);
501	}
502
503	/* try to push more stuff out */
504	if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
505		ppp_output_wakeup(&ap->chan);
506}
507
508/*
509 * Procedures for encapsulation and framing.
510 */
511
512static struct sk_buff*
513ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
514{
515	int proto;
516	unsigned char *data;
517	int islcp;
518
519	data  = skb->data;
520	proto = get_unaligned_be16(data);
521
522	/* LCP packets with codes between 1 (configure-request)
523	 * and 7 (code-reject) must be sent as though no options
524	 * have been negotiated.
525	 */
526	islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
527
528	/* compress protocol field if option enabled */
529	if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
530		skb_pull(skb,1);
531
532	/* prepend address/control fields if necessary */
533	if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
534		if (skb_headroom(skb) < 2) {
535			struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
536			if (npkt == NULL) {
537				kfree_skb(skb);
538				return NULL;
539			}
540			skb_reserve(npkt,2);
541			skb_copy_from_linear_data(skb,
542				      skb_put(npkt, skb->len), skb->len);
543			consume_skb(skb);
544			skb = npkt;
545		}
546		skb_push(skb,2);
547		skb->data[0] = PPP_ALLSTATIONS;
548		skb->data[1] = PPP_UI;
549	}
550
551	ap->last_xmit = jiffies;
552
553	if (skb && ap->flags & SC_LOG_OUTPKT)
554		ppp_print_buffer ("send buffer", skb->data, skb->len);
555
556	return skb;
557}
558
559/*
560 * Transmit-side routines.
561 */
562
563/*
564 * Send a packet to the peer over an sync tty line.
565 * Returns 1 iff the packet was accepted.
566 * If the packet was not accepted, we will call ppp_output_wakeup
567 * at some later time.
568 */
569static int
570ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
571{
572	struct syncppp *ap = chan->private;
573
574	ppp_sync_push(ap);
575
576	if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
577		return 0;	/* already full */
578	skb = ppp_sync_txmunge(ap, skb);
579	if (skb != NULL)
580		ap->tpkt = skb;
581	else
582		clear_bit(XMIT_FULL, &ap->xmit_flags);
583
584	ppp_sync_push(ap);
585	return 1;
586}
587
588/*
589 * Push as much data as possible out to the tty.
590 */
591static int
592ppp_sync_push(struct syncppp *ap)
593{
594	int sent, done = 0;
595	struct tty_struct *tty = ap->tty;
596	int tty_stuffed = 0;
597
598	if (!spin_trylock_bh(&ap->xmit_lock))
599		return 0;
600	for (;;) {
601		if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
602			tty_stuffed = 0;
603		if (!tty_stuffed && ap->tpkt) {
604			set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
605			sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
606			if (sent < 0)
607				goto flush;	/* error, e.g. loss of CD */
608			if (sent < ap->tpkt->len) {
609				tty_stuffed = 1;
610			} else {
611				consume_skb(ap->tpkt);
612				ap->tpkt = NULL;
613				clear_bit(XMIT_FULL, &ap->xmit_flags);
614				done = 1;
615			}
616			continue;
617		}
618		/* haven't made any progress */
619		spin_unlock_bh(&ap->xmit_lock);
620		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
621		      (!tty_stuffed && ap->tpkt)))
622			break;
623		if (!spin_trylock_bh(&ap->xmit_lock))
624			break;
625	}
626	return done;
627
628flush:
629	if (ap->tpkt) {
630		kfree_skb(ap->tpkt);
631		ap->tpkt = NULL;
632		clear_bit(XMIT_FULL, &ap->xmit_flags);
633		done = 1;
634	}
635	spin_unlock_bh(&ap->xmit_lock);
636	return done;
637}
638
639/*
640 * Flush output from our internal buffers.
641 * Called for the TCFLSH ioctl.
642 */
643static void
644ppp_sync_flush_output(struct syncppp *ap)
645{
646	int done = 0;
647
648	spin_lock_bh(&ap->xmit_lock);
649	if (ap->tpkt != NULL) {
650		kfree_skb(ap->tpkt);
651		ap->tpkt = NULL;
652		clear_bit(XMIT_FULL, &ap->xmit_flags);
653		done = 1;
654	}
655	spin_unlock_bh(&ap->xmit_lock);
656	if (done)
657		ppp_output_wakeup(&ap->chan);
658}
659
660/*
661 * Receive-side routines.
662 */
663
664/* called when the tty driver has data for us.
665 *
666 * Data is frame oriented: each call to ppp_sync_input is considered
667 * a whole frame. If the 1st flag byte is non-zero then the whole
668 * frame is considered to be in error and is tossed.
669 */
670static void
671ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
672		char *flags, int count)
673{
674	struct sk_buff *skb;
675	unsigned char *p;
676
677	if (count == 0)
678		return;
679
680	if (ap->flags & SC_LOG_INPKT)
681		ppp_print_buffer ("receive buffer", buf, count);
682
683	/* stuff the chars in the skb */
684	skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
685	if (!skb) {
686		printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
687		goto err;
688	}
689	/* Try to get the payload 4-byte aligned */
690	if (buf[0] != PPP_ALLSTATIONS)
691		skb_reserve(skb, 2 + (buf[0] & 1));
692
693	if (flags && *flags) {
694		/* error flag set, ignore frame */
695		goto err;
696	} else if (count > skb_tailroom(skb)) {
697		/* packet overflowed MRU */
698		goto err;
699	}
700
701	skb_put_data(skb, buf, count);
702
703	/* strip address/control field if present */
704	p = skb->data;
705	if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
706		/* chop off address/control */
707		if (skb->len < 3)
708			goto err;
709		p = skb_pull(skb, 2);
710	}
711
712	/* decompress protocol field if compressed */
713	if (p[0] & 1) {
714		/* protocol is compressed */
715		*(u8 *)skb_push(skb, 1) = 0;
716	} else if (skb->len < 2)
717		goto err;
718
719	/* queue the frame to be processed */
720	skb_queue_tail(&ap->rqueue, skb);
721	return;
722
723err:
724	/* queue zero length packet as error indication */
725	if (skb || (skb = dev_alloc_skb(0))) {
726		skb_trim(skb, 0);
727		skb_queue_tail(&ap->rqueue, skb);
728	}
729}
730
731static void __exit
732ppp_sync_cleanup(void)
733{
734	if (tty_unregister_ldisc(N_SYNC_PPP) != 0)
735		printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
736}
737
738module_init(ppp_sync_init);
739module_exit(ppp_sync_cleanup);
740MODULE_LICENSE("GPL");
741MODULE_ALIAS_LDISC(N_SYNC_PPP);