Linux Audio

Check our new training course

Loading...
  1/*
  2 * SCLP VT220 terminal driver.
  3 *
  4 * Copyright IBM Corp. 2003, 2009
  5 *
  6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 11#include <linux/list.h>
 12#include <linux/wait.h>
 13#include <linux/timer.h>
 14#include <linux/kernel.h>
 15#include <linux/tty.h>
 16#include <linux/tty_driver.h>
 17#include <linux/tty_flip.h>
 18#include <linux/errno.h>
 19#include <linux/mm.h>
 20#include <linux/major.h>
 21#include <linux/console.h>
 22#include <linux/kdev_t.h>
 23#include <linux/interrupt.h>
 24#include <linux/init.h>
 25#include <linux/reboot.h>
 26#include <linux/slab.h>
 27
 28#include <asm/uaccess.h>
 29#include "sclp.h"
 30
 31#define SCLP_VT220_MAJOR		TTY_MAJOR
 32#define SCLP_VT220_MINOR		65
 33#define SCLP_VT220_DRIVER_NAME		"sclp_vt220"
 34#define SCLP_VT220_DEVICE_NAME		"ttysclp"
 35#define SCLP_VT220_CONSOLE_NAME		"ttyS"
 36#define SCLP_VT220_CONSOLE_INDEX	1	/* console=ttyS1 */
 37
 38/* Representation of a single write request */
 39struct sclp_vt220_request {
 40	struct list_head list;
 41	struct sclp_req sclp_req;
 42	int retry_count;
 43};
 44
 45/* VT220 SCCB */
 46struct sclp_vt220_sccb {
 47	struct sccb_header header;
 48	struct evbuf_header evbuf;
 49};
 50
 51#define SCLP_VT220_MAX_CHARS_PER_BUFFER	(PAGE_SIZE - \
 52					 sizeof(struct sclp_vt220_request) - \
 53					 sizeof(struct sclp_vt220_sccb))
 54
 55/* Structures and data needed to register tty driver */
 56static struct tty_driver *sclp_vt220_driver;
 57
 58static struct tty_port sclp_vt220_port;
 59
 60/* Lock to protect internal data from concurrent access */
 61static spinlock_t sclp_vt220_lock;
 62
 63/* List of empty pages to be used as write request buffers */
 64static struct list_head sclp_vt220_empty;
 65
 66/* List of pending requests */
 67static struct list_head sclp_vt220_outqueue;
 68
 69/* Suspend mode flag */
 70static int sclp_vt220_suspended;
 71
 72/* Flag that output queue is currently running */
 73static int sclp_vt220_queue_running;
 74
 75/* Timer used for delaying write requests to merge subsequent messages into
 76 * a single buffer */
 77static struct timer_list sclp_vt220_timer;
 78
 79/* Pointer to current request buffer which has been partially filled but not
 80 * yet sent */
 81static struct sclp_vt220_request *sclp_vt220_current_request;
 82
 83/* Number of characters in current request buffer */
 84static int sclp_vt220_buffered_chars;
 85
 86/* Counter controlling core driver initialization. */
 87static int __initdata sclp_vt220_init_count;
 88
 89/* Flag indicating that sclp_vt220_current_request should really
 90 * have been already queued but wasn't because the SCLP was processing
 91 * another buffer */
 92static int sclp_vt220_flush_later;
 93
 94static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
 95static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
 96				   enum sclp_pm_event sclp_pm_event);
 97static int __sclp_vt220_emit(struct sclp_vt220_request *request);
 98static void sclp_vt220_emit_current(void);
 99
100/* Registration structure for our interest in SCLP event buffers */
101static struct sclp_register sclp_vt220_register = {
102	.send_mask		= EVTYP_VT220MSG_MASK,
103	.receive_mask		= EVTYP_VT220MSG_MASK,
104	.state_change_fn	= NULL,
105	.receiver_fn		= sclp_vt220_receiver_fn,
106	.pm_event_fn		= sclp_vt220_pm_event_fn,
107};
108
109
110/*
111 * Put provided request buffer back into queue and check emit pending
112 * buffers if necessary.
113 */
114static void
115sclp_vt220_process_queue(struct sclp_vt220_request *request)
116{
117	struct tty_struct *tty;
118	unsigned long flags;
119	void *page;
120
121	do {
122		/* Put buffer back to list of empty buffers */
123		page = request->sclp_req.sccb;
124		spin_lock_irqsave(&sclp_vt220_lock, flags);
125		/* Move request from outqueue to empty queue */
126		list_del(&request->list);
127		list_add_tail((struct list_head *) page, &sclp_vt220_empty);
128		/* Check if there is a pending buffer on the out queue. */
129		request = NULL;
130		if (!list_empty(&sclp_vt220_outqueue))
131			request = list_entry(sclp_vt220_outqueue.next,
132					     struct sclp_vt220_request, list);
133		if (!request || sclp_vt220_suspended) {
134			sclp_vt220_queue_running = 0;
135			spin_unlock_irqrestore(&sclp_vt220_lock, flags);
136			break;
137		}
138		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
139	} while (__sclp_vt220_emit(request));
140	if (request == NULL && sclp_vt220_flush_later)
141		sclp_vt220_emit_current();
142	/* Check if the tty needs a wake up call */
143	tty = tty_port_tty_get(&sclp_vt220_port);
144	if (tty) {
145		tty_wakeup(tty);
146		tty_kref_put(tty);
147	}
148}
149
150#define SCLP_BUFFER_MAX_RETRY		1
151
152/*
153 * Callback through which the result of a write request is reported by the
154 * SCLP.
155 */
156static void
157sclp_vt220_callback(struct sclp_req *request, void *data)
158{
159	struct sclp_vt220_request *vt220_request;
160	struct sclp_vt220_sccb *sccb;
161
162	vt220_request = (struct sclp_vt220_request *) data;
163	if (request->status == SCLP_REQ_FAILED) {
164		sclp_vt220_process_queue(vt220_request);
165		return;
166	}
167	sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
168
169	/* Check SCLP response code and choose suitable action	*/
170	switch (sccb->header.response_code) {
171	case 0x0020 :
172		break;
173
174	case 0x05f0: /* Target resource in improper state */
175		break;
176
177	case 0x0340: /* Contained SCLP equipment check */
178		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
179			break;
180		/* Remove processed buffers and requeue rest */
181		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
182			/* Not all buffers were processed */
183			sccb->header.response_code = 0x0000;
184			vt220_request->sclp_req.status = SCLP_REQ_FILLED;
185			if (sclp_add_request(request) == 0)
186				return;
187		}
188		break;
189
190	case 0x0040: /* SCLP equipment check */
191		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
192			break;
193		sccb->header.response_code = 0x0000;
194		vt220_request->sclp_req.status = SCLP_REQ_FILLED;
195		if (sclp_add_request(request) == 0)
196			return;
197		break;
198
199	default:
200		break;
201	}
202	sclp_vt220_process_queue(vt220_request);
203}
204
205/*
206 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
207 * otherwise.
208 */
209static int
210__sclp_vt220_emit(struct sclp_vt220_request *request)
211{
212	if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
213		request->sclp_req.status = SCLP_REQ_FAILED;
214		return -EIO;
215	}
216	request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
217	request->sclp_req.status = SCLP_REQ_FILLED;
218	request->sclp_req.callback = sclp_vt220_callback;
219	request->sclp_req.callback_data = (void *) request;
220
221	return sclp_add_request(&request->sclp_req);
222}
223
224/*
225 * Queue and emit current request.
226 */
227static void
228sclp_vt220_emit_current(void)
229{
230	unsigned long flags;
231	struct sclp_vt220_request *request;
232	struct sclp_vt220_sccb *sccb;
233
234	spin_lock_irqsave(&sclp_vt220_lock, flags);
235	if (sclp_vt220_current_request) {
236		sccb = (struct sclp_vt220_sccb *) 
237				sclp_vt220_current_request->sclp_req.sccb;
238		/* Only emit buffers with content */
239		if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
240			list_add_tail(&sclp_vt220_current_request->list,
241				      &sclp_vt220_outqueue);
242			sclp_vt220_current_request = NULL;
243			if (timer_pending(&sclp_vt220_timer))
244				del_timer(&sclp_vt220_timer);
245		}
246		sclp_vt220_flush_later = 0;
247	}
248	if (sclp_vt220_queue_running || sclp_vt220_suspended)
249		goto out_unlock;
250	if (list_empty(&sclp_vt220_outqueue))
251		goto out_unlock;
252	request = list_first_entry(&sclp_vt220_outqueue,
253				   struct sclp_vt220_request, list);
254	sclp_vt220_queue_running = 1;
255	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
256
257	if (__sclp_vt220_emit(request))
258		sclp_vt220_process_queue(request);
259	return;
260out_unlock:
261	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
262}
263
264#define SCLP_NORMAL_WRITE	0x00
265
266/*
267 * Helper function to initialize a page with the sclp request structure.
268 */
269static struct sclp_vt220_request *
270sclp_vt220_initialize_page(void *page)
271{
272	struct sclp_vt220_request *request;
273	struct sclp_vt220_sccb *sccb;
274
275	/* Place request structure at end of page */
276	request = ((struct sclp_vt220_request *)
277			((addr_t) page + PAGE_SIZE)) - 1;
278	request->retry_count = 0;
279	request->sclp_req.sccb = page;
280	/* SCCB goes at start of page */
281	sccb = (struct sclp_vt220_sccb *) page;
282	memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
283	sccb->header.length = sizeof(struct sclp_vt220_sccb);
284	sccb->header.function_code = SCLP_NORMAL_WRITE;
285	sccb->header.response_code = 0x0000;
286	sccb->evbuf.type = EVTYP_VT220MSG;
287	sccb->evbuf.length = sizeof(struct evbuf_header);
288
289	return request;
290}
291
292static inline unsigned int
293sclp_vt220_space_left(struct sclp_vt220_request *request)
294{
295	struct sclp_vt220_sccb *sccb;
296	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
297	return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
298	       sccb->header.length;
299}
300
301static inline unsigned int
302sclp_vt220_chars_stored(struct sclp_vt220_request *request)
303{
304	struct sclp_vt220_sccb *sccb;
305	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
306	return sccb->evbuf.length - sizeof(struct evbuf_header);
307}
308
309/*
310 * Add msg to buffer associated with request. Return the number of characters
311 * added.
312 */
313static int
314sclp_vt220_add_msg(struct sclp_vt220_request *request,
315		   const unsigned char *msg, int count, int convertlf)
316{
317	struct sclp_vt220_sccb *sccb;
318	void *buffer;
319	unsigned char c;
320	int from;
321	int to;
322
323	if (count > sclp_vt220_space_left(request))
324		count = sclp_vt220_space_left(request);
325	if (count <= 0)
326		return 0;
327
328	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
329	buffer = (void *) ((addr_t) sccb + sccb->header.length);
330
331	if (convertlf) {
332		/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
333		for (from=0, to=0;
334		     (from < count) && (to < sclp_vt220_space_left(request));
335		     from++) {
336			/* Retrieve character */
337			c = msg[from];
338			/* Perform conversion */
339			if (c == 0x0a) {
340				if (to + 1 < sclp_vt220_space_left(request)) {
341					((unsigned char *) buffer)[to++] = c;
342					((unsigned char *) buffer)[to++] = 0x0d;
343				} else
344					break;
345
346			} else
347				((unsigned char *) buffer)[to++] = c;
348		}
349		sccb->header.length += to;
350		sccb->evbuf.length += to;
351		return from;
352	} else {
353		memcpy(buffer, (const void *) msg, count);
354		sccb->header.length += count;
355		sccb->evbuf.length += count;
356		return count;
357	}
358}
359
360/*
361 * Emit buffer after having waited long enough for more data to arrive.
362 */
363static void
364sclp_vt220_timeout(unsigned long data)
365{
366	sclp_vt220_emit_current();
367}
368
369#define BUFFER_MAX_DELAY	HZ/20
370
371/* 
372 * Internal implementation of the write function. Write COUNT bytes of data
373 * from memory at BUF
374 * to the SCLP interface. In case that the data does not fit into the current
375 * write buffer, emit the current one and allocate a new one. If there are no
376 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
377 * is non-zero, the buffer will be scheduled for emitting after a timeout -
378 * otherwise the user has to explicitly call the flush function.
379 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
380 * buffer should be converted to 0x0a 0x0d. After completion, return the number
381 * of bytes written.
382 */
383static int
384__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
385		   int convertlf, int may_fail)
386{
387	unsigned long flags;
388	void *page;
389	int written;
390	int overall_written;
391
392	if (count <= 0)
393		return 0;
394	overall_written = 0;
395	spin_lock_irqsave(&sclp_vt220_lock, flags);
396	do {
397		/* Create an sclp output buffer if none exists yet */
398		if (sclp_vt220_current_request == NULL) {
399			while (list_empty(&sclp_vt220_empty)) {
400				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
401				if (may_fail || sclp_vt220_suspended)
402					goto out;
403				else
404					sclp_sync_wait();
405				spin_lock_irqsave(&sclp_vt220_lock, flags);
406			}
407			page = (void *) sclp_vt220_empty.next;
408			list_del((struct list_head *) page);
409			sclp_vt220_current_request =
410				sclp_vt220_initialize_page(page);
411		}
412		/* Try to write the string to the current request buffer */
413		written = sclp_vt220_add_msg(sclp_vt220_current_request,
414					     buf, count, convertlf);
415		overall_written += written;
416		if (written == count)
417			break;
418		/*
419		 * Not all characters could be written to the current
420		 * output buffer. Emit the buffer, create a new buffer
421		 * and then output the rest of the string.
422		 */
423		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
424		sclp_vt220_emit_current();
425		spin_lock_irqsave(&sclp_vt220_lock, flags);
426		buf += written;
427		count -= written;
428	} while (count > 0);
429	/* Setup timer to output current console buffer after some time */
430	if (sclp_vt220_current_request != NULL &&
431	    !timer_pending(&sclp_vt220_timer) && do_schedule) {
432		sclp_vt220_timer.function = sclp_vt220_timeout;
433		sclp_vt220_timer.data = 0UL;
434		sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
435		add_timer(&sclp_vt220_timer);
436	}
437	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
438out:
439	return overall_written;
440}
441
442/*
443 * This routine is called by the kernel to write a series of
444 * characters to the tty device.  The characters may come from
445 * user space or kernel space.  This routine will return the
446 * number of characters actually accepted for writing.
447 */
448static int
449sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
450{
451	return __sclp_vt220_write(buf, count, 1, 0, 1);
452}
453
454#define SCLP_VT220_SESSION_ENDED	0x01
455#define	SCLP_VT220_SESSION_STARTED	0x80
456#define SCLP_VT220_SESSION_DATA		0x00
457
458/*
459 * Called by the SCLP to report incoming event buffers.
460 */
461static void
462sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
463{
464	struct tty_struct *tty = tty_port_tty_get(&sclp_vt220_port);
465	char *buffer;
466	unsigned int count;
467
468	/* Ignore input if device is not open */
469	if (tty == NULL)
470		return;
471
472	buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
473	count = evbuf->length - sizeof(struct evbuf_header);
474
475	switch (*buffer) {
476	case SCLP_VT220_SESSION_ENDED:
477	case SCLP_VT220_SESSION_STARTED:
478		break;
479	case SCLP_VT220_SESSION_DATA:
480		/* Send input to line discipline */
481		buffer++;
482		count--;
483		tty_insert_flip_string(tty, buffer, count);
484		tty_flip_buffer_push(tty);
485		break;
486	}
487	tty_kref_put(tty);
488}
489
490/*
491 * This routine is called when a particular tty device is opened.
492 */
493static int
494sclp_vt220_open(struct tty_struct *tty, struct file *filp)
495{
496	if (tty->count == 1) {
497		tty_port_tty_set(&sclp_vt220_port, tty);
498		tty->low_latency = 0;
499		if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
500			tty->winsize.ws_row = 24;
501			tty->winsize.ws_col = 80;
502		}
503	}
504	return 0;
505}
506
507/*
508 * This routine is called when a particular tty device is closed.
509 */
510static void
511sclp_vt220_close(struct tty_struct *tty, struct file *filp)
512{
513	if (tty->count == 1)
514		tty_port_tty_set(&sclp_vt220_port, NULL);
515}
516
517/*
518 * This routine is called by the kernel to write a single
519 * character to the tty device.  If the kernel uses this routine,
520 * it must call the flush_chars() routine (if defined) when it is
521 * done stuffing characters into the driver.
522 */
523static int
524sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
525{
526	return __sclp_vt220_write(&ch, 1, 0, 0, 1);
527}
528
529/*
530 * This routine is called by the kernel after it has written a
531 * series of characters to the tty device using put_char().  
532 */
533static void
534sclp_vt220_flush_chars(struct tty_struct *tty)
535{
536	if (!sclp_vt220_queue_running)
537		sclp_vt220_emit_current();
538	else
539		sclp_vt220_flush_later = 1;
540}
541
542/*
543 * This routine returns the numbers of characters the tty driver
544 * will accept for queuing to be written.  This number is subject
545 * to change as output buffers get emptied, or if the output flow
546 * control is acted.
547 */
548static int
549sclp_vt220_write_room(struct tty_struct *tty)
550{
551	unsigned long flags;
552	struct list_head *l;
553	int count;
554
555	spin_lock_irqsave(&sclp_vt220_lock, flags);
556	count = 0;
557	if (sclp_vt220_current_request != NULL)
558		count = sclp_vt220_space_left(sclp_vt220_current_request);
559	list_for_each(l, &sclp_vt220_empty)
560		count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
561	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
562	return count;
563}
564
565/*
566 * Return number of buffered chars.
567 */
568static int
569sclp_vt220_chars_in_buffer(struct tty_struct *tty)
570{
571	unsigned long flags;
572	struct list_head *l;
573	struct sclp_vt220_request *r;
574	int count;
575
576	spin_lock_irqsave(&sclp_vt220_lock, flags);
577	count = 0;
578	if (sclp_vt220_current_request != NULL)
579		count = sclp_vt220_chars_stored(sclp_vt220_current_request);
580	list_for_each(l, &sclp_vt220_outqueue) {
581		r = list_entry(l, struct sclp_vt220_request, list);
582		count += sclp_vt220_chars_stored(r);
583	}
584	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
585	return count;
586}
587
588/*
589 * Pass on all buffers to the hardware. Return only when there are no more
590 * buffers pending.
591 */
592static void
593sclp_vt220_flush_buffer(struct tty_struct *tty)
594{
595	sclp_vt220_emit_current();
596}
597
598/* Release allocated pages. */
599static void __init __sclp_vt220_free_pages(void)
600{
601	struct list_head *page, *p;
602
603	list_for_each_safe(page, p, &sclp_vt220_empty) {
604		list_del(page);
605		free_page((unsigned long) page);
606	}
607}
608
609/* Release memory and unregister from sclp core. Controlled by init counting -
610 * only the last invoker will actually perform these actions. */
611static void __init __sclp_vt220_cleanup(void)
612{
613	sclp_vt220_init_count--;
614	if (sclp_vt220_init_count != 0)
615		return;
616	sclp_unregister(&sclp_vt220_register);
617	__sclp_vt220_free_pages();
618}
619
620/* Allocate buffer pages and register with sclp core. Controlled by init
621 * counting - only the first invoker will actually perform these actions. */
622static int __init __sclp_vt220_init(int num_pages)
623{
624	void *page;
625	int i;
626	int rc;
627
628	sclp_vt220_init_count++;
629	if (sclp_vt220_init_count != 1)
630		return 0;
631	spin_lock_init(&sclp_vt220_lock);
632	INIT_LIST_HEAD(&sclp_vt220_empty);
633	INIT_LIST_HEAD(&sclp_vt220_outqueue);
634	init_timer(&sclp_vt220_timer);
635	tty_port_init(&sclp_vt220_port);
636	sclp_vt220_current_request = NULL;
637	sclp_vt220_buffered_chars = 0;
638	sclp_vt220_flush_later = 0;
639
640	/* Allocate pages for output buffering */
641	rc = -ENOMEM;
642	for (i = 0; i < num_pages; i++) {
643		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
644		if (!page)
645			goto out;
646		list_add_tail(page, &sclp_vt220_empty);
647	}
648	rc = sclp_register(&sclp_vt220_register);
649out:
650	if (rc) {
651		__sclp_vt220_free_pages();
652		sclp_vt220_init_count--;
653	}
654	return rc;
655}
656
657static const struct tty_operations sclp_vt220_ops = {
658	.open = sclp_vt220_open,
659	.close = sclp_vt220_close,
660	.write = sclp_vt220_write,
661	.put_char = sclp_vt220_put_char,
662	.flush_chars = sclp_vt220_flush_chars,
663	.write_room = sclp_vt220_write_room,
664	.chars_in_buffer = sclp_vt220_chars_in_buffer,
665	.flush_buffer = sclp_vt220_flush_buffer,
666};
667
668/*
669 * Register driver with SCLP and Linux and initialize internal tty structures.
670 */
671static int __init sclp_vt220_tty_init(void)
672{
673	struct tty_driver *driver;
674	int rc;
675
676	/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
677	 * symmetry between VM and LPAR systems regarding ttyS1. */
678	driver = alloc_tty_driver(1);
679	if (!driver)
680		return -ENOMEM;
681	rc = __sclp_vt220_init(MAX_KMEM_PAGES);
682	if (rc)
683		goto out_driver;
684
685	driver->driver_name = SCLP_VT220_DRIVER_NAME;
686	driver->name = SCLP_VT220_DEVICE_NAME;
687	driver->major = SCLP_VT220_MAJOR;
688	driver->minor_start = SCLP_VT220_MINOR;
689	driver->type = TTY_DRIVER_TYPE_SYSTEM;
690	driver->subtype = SYSTEM_TYPE_TTY;
691	driver->init_termios = tty_std_termios;
692	driver->flags = TTY_DRIVER_REAL_RAW;
693	tty_set_operations(driver, &sclp_vt220_ops);
694
695	rc = tty_register_driver(driver);
696	if (rc)
697		goto out_init;
698	sclp_vt220_driver = driver;
699	return 0;
700
701out_init:
702	__sclp_vt220_cleanup();
703out_driver:
704	put_tty_driver(driver);
705	return rc;
706}
707__initcall(sclp_vt220_tty_init);
708
709static void __sclp_vt220_flush_buffer(void)
710{
711	unsigned long flags;
712
713	sclp_vt220_emit_current();
714	spin_lock_irqsave(&sclp_vt220_lock, flags);
715	if (timer_pending(&sclp_vt220_timer))
716		del_timer(&sclp_vt220_timer);
717	while (sclp_vt220_queue_running) {
718		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
719		sclp_sync_wait();
720		spin_lock_irqsave(&sclp_vt220_lock, flags);
721	}
722	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
723}
724
725/*
726 * Resume console: If there are cached messages, emit them.
727 */
728static void sclp_vt220_resume(void)
729{
730	unsigned long flags;
731
732	spin_lock_irqsave(&sclp_vt220_lock, flags);
733	sclp_vt220_suspended = 0;
734	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
735	sclp_vt220_emit_current();
736}
737
738/*
739 * Suspend console: Set suspend flag and flush console
740 */
741static void sclp_vt220_suspend(void)
742{
743	unsigned long flags;
744
745	spin_lock_irqsave(&sclp_vt220_lock, flags);
746	sclp_vt220_suspended = 1;
747	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
748	__sclp_vt220_flush_buffer();
749}
750
751static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
752				   enum sclp_pm_event sclp_pm_event)
753{
754	switch (sclp_pm_event) {
755	case SCLP_PM_EVENT_FREEZE:
756		sclp_vt220_suspend();
757		break;
758	case SCLP_PM_EVENT_RESTORE:
759	case SCLP_PM_EVENT_THAW:
760		sclp_vt220_resume();
761		break;
762	}
763}
764
765#ifdef CONFIG_SCLP_VT220_CONSOLE
766
767static void
768sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
769{
770	__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
771}
772
773static struct tty_driver *
774sclp_vt220_con_device(struct console *c, int *index)
775{
776	*index = 0;
777	return sclp_vt220_driver;
778}
779
780static int
781sclp_vt220_notify(struct notifier_block *self,
782			  unsigned long event, void *data)
783{
784	__sclp_vt220_flush_buffer();
785	return NOTIFY_OK;
786}
787
788static struct notifier_block on_panic_nb = {
789	.notifier_call = sclp_vt220_notify,
790	.priority = 1,
791};
792
793static struct notifier_block on_reboot_nb = {
794	.notifier_call = sclp_vt220_notify,
795	.priority = 1,
796};
797
798/* Structure needed to register with printk */
799static struct console sclp_vt220_console =
800{
801	.name = SCLP_VT220_CONSOLE_NAME,
802	.write = sclp_vt220_con_write,
803	.device = sclp_vt220_con_device,
804	.flags = CON_PRINTBUFFER,
805	.index = SCLP_VT220_CONSOLE_INDEX
806};
807
808static int __init
809sclp_vt220_con_init(void)
810{
811	int rc;
812
813	if (!CONSOLE_IS_SCLP)
814		return 0;
815	rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
816	if (rc)
817		return rc;
818	/* Attach linux console */
819	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
820	register_reboot_notifier(&on_reboot_nb);
821	register_console(&sclp_vt220_console);
822	return 0;
823}
824
825console_initcall(sclp_vt220_con_init);
826#endif /* CONFIG_SCLP_VT220_CONSOLE */
827