Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
   3 *
   4 * This HVC device driver provides terminal access using
   5 * z/VM IUCV communication paths.
   6 *
   7 * Copyright IBM Corp. 2008, 2009
   8 *
   9 * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  10 */
  11#define KMSG_COMPONENT		"hvc_iucv"
  12#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
  13
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <asm/ebcdic.h>
  17#include <linux/ctype.h>
  18#include <linux/delay.h>
  19#include <linux/device.h>
  20#include <linux/init.h>
  21#include <linux/mempool.h>
  22#include <linux/moduleparam.h>
  23#include <linux/tty.h>
  24#include <linux/wait.h>
  25#include <net/iucv/iucv.h>
  26
  27#include "hvc_console.h"
  28
  29
  30/* General device driver settings */
  31#define HVC_IUCV_MAGIC		0xc9e4c3e5
  32#define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS
  33#define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  34
  35/* IUCV TTY message  */
  36#define MSG_VERSION		0x02	/* Message version */
  37#define MSG_TYPE_ERROR		0x01	/* Error message */
  38#define MSG_TYPE_TERMENV	0x02	/* Terminal environment variable */
  39#define MSG_TYPE_TERMIOS	0x04	/* Terminal IO struct update */
  40#define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */
  41#define MSG_TYPE_DATA		0x10	/* Terminal data */
  42
  43struct iucv_tty_msg {
  44	u8	version;		/* Message version */
  45	u8	type;			/* Message type */
  46#define MSG_MAX_DATALEN		((u16)(~0))
  47	u16	datalen;		/* Payload length */
  48	u8	data[];			/* Payload buffer */
  49} __attribute__((packed));
  50#define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
  51
  52enum iucv_state_t {
  53	IUCV_DISCONN	= 0,
  54	IUCV_CONNECTED	= 1,
  55	IUCV_SEVERED	= 2,
  56};
  57
  58enum tty_state_t {
  59	TTY_CLOSED	= 0,
  60	TTY_OPENED	= 1,
  61};
  62
  63struct hvc_iucv_private {
  64	struct hvc_struct	*hvc;		/* HVC struct reference */
  65	u8			srv_name[8];	/* IUCV service name (ebcdic) */
  66	unsigned char		is_console;	/* Linux console usage flag */
  67	enum iucv_state_t	iucv_state;	/* IUCV connection status */
  68	enum tty_state_t	tty_state;	/* TTY status */
  69	struct iucv_path	*path;		/* IUCV path pointer */
  70	spinlock_t		lock;		/* hvc_iucv_private lock */
  71#define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */
  72	void			*sndbuf;	/* send buffer		  */
  73	size_t			sndbuf_len;	/* length of send buffer  */
  74#define QUEUE_SNDBUF_DELAY	(HZ / 25)
  75	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */
  76	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
  77	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
  78	struct list_head	tty_inqueue;	/* incoming IUCV messages */
  79	struct device		*dev;		/* device structure */
 
  80};
  81
  82struct iucv_tty_buffer {
  83	struct list_head	list;	/* list pointer */
  84	struct iucv_message	msg;	/* store an IUCV message */
  85	size_t			offset;	/* data buffer offset */
  86	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */
  87};
  88
  89/* IUCV callback handler */
  90static	int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
  91static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
  92static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  93static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  94
  95
  96/* Kernel module parameter: use one terminal device as default */
  97static unsigned long hvc_iucv_devices = 1;
  98
  99/* Array of allocated hvc iucv tty lines... */
 100static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
 101#define IUCV_HVC_CON_IDX	(0)
 102/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
 103#define MAX_VMID_FILTER		(500)
 104static size_t hvc_iucv_filter_size;
 105static void *hvc_iucv_filter;
 106static const char *hvc_iucv_filter_string;
 107static DEFINE_RWLOCK(hvc_iucv_filter_lock);
 108
 109/* Kmem cache and mempool for iucv_tty_buffer elements */
 110static struct kmem_cache *hvc_iucv_buffer_cache;
 111static mempool_t *hvc_iucv_mempool;
 112
 113/* IUCV handler callback functions */
 114static struct iucv_handler hvc_iucv_handler = {
 115	.path_pending  = hvc_iucv_path_pending,
 116	.path_severed  = hvc_iucv_path_severed,
 117	.message_complete = hvc_iucv_msg_complete,
 118	.message_pending  = hvc_iucv_msg_pending,
 119};
 120
 121
 122/**
 123 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
 124 * @num:	The HVC virtual terminal number (vtermno)
 125 *
 126 * This function returns the struct hvc_iucv_private instance that corresponds
 127 * to the HVC virtual terminal number specified as parameter @num.
 128 */
 129struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
 130{
 131	if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
 132		return NULL;
 133	return hvc_iucv_table[num - HVC_IUCV_MAGIC];
 134}
 135
 136/**
 137 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
 138 * @size:	Size of the internal buffer used to store data.
 139 * @flags:	Memory allocation flags passed to mempool.
 140 *
 141 * This function allocates a new struct iucv_tty_buffer element and, optionally,
 142 * allocates an internal data buffer with the specified size @size.
 143 * The internal data buffer is always allocated with GFP_DMA which is
 144 * required for receiving and sending data with IUCV.
 145 * Note: The total message size arises from the internal buffer size and the
 146 *	 members of the iucv_tty_msg structure.
 147 * The function returns NULL if memory allocation has failed.
 148 */
 149static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
 150{
 151	struct iucv_tty_buffer *bufp;
 152
 153	bufp = mempool_alloc(hvc_iucv_mempool, flags);
 154	if (!bufp)
 155		return NULL;
 156	memset(bufp, 0, sizeof(*bufp));
 157
 158	if (size > 0) {
 159		bufp->msg.length = MSG_SIZE(size);
 160		bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
 161		if (!bufp->mbuf) {
 162			mempool_free(bufp, hvc_iucv_mempool);
 163			return NULL;
 164		}
 165		bufp->mbuf->version = MSG_VERSION;
 166		bufp->mbuf->type    = MSG_TYPE_DATA;
 167		bufp->mbuf->datalen = (u16) size;
 168	}
 169	return bufp;
 170}
 171
 172/**
 173 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
 174 * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
 175 */
 176static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
 177{
 178	kfree(bufp->mbuf);
 179	mempool_free(bufp, hvc_iucv_mempool);
 180}
 181
 182/**
 183 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
 184 * @list:	List containing struct iucv_tty_buffer elements.
 185 */
 186static void destroy_tty_buffer_list(struct list_head *list)
 187{
 188	struct iucv_tty_buffer *ent, *next;
 189
 190	list_for_each_entry_safe(ent, next, list, list) {
 191		list_del(&ent->list);
 192		destroy_tty_buffer(ent);
 193	}
 194}
 195
 196/**
 197 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
 198 * @priv:		Pointer to struct hvc_iucv_private
 199 * @buf:		HVC buffer for writing received terminal data.
 200 * @count:		HVC buffer size.
 201 * @has_more_data:	Pointer to an int variable.
 202 *
 203 * The function picks up pending messages from the input queue and receives
 204 * the message data that is then written to the specified buffer @buf.
 205 * If the buffer size @count is less than the data message size, the
 206 * message is kept on the input queue and @has_more_data is set to 1.
 207 * If all message data has been written, the message is removed from
 208 * the input queue.
 209 *
 210 * The function returns the number of bytes written to the terminal, zero if
 211 * there are no pending data messages available or if there is no established
 212 * IUCV path.
 213 * If the IUCV path has been severed, then -EPIPE is returned to cause a
 214 * hang up (that is issued by the HVC layer).
 215 */
 216static int hvc_iucv_write(struct hvc_iucv_private *priv,
 217			  char *buf, int count, int *has_more_data)
 218{
 219	struct iucv_tty_buffer *rb;
 220	int written;
 221	int rc;
 222
 223	/* immediately return if there is no IUCV connection */
 224	if (priv->iucv_state == IUCV_DISCONN)
 225		return 0;
 226
 227	/* if the IUCV path has been severed, return -EPIPE to inform the
 228	 * HVC layer to hang up the tty device. */
 229	if (priv->iucv_state == IUCV_SEVERED)
 230		return -EPIPE;
 231
 232	/* check if there are pending messages */
 233	if (list_empty(&priv->tty_inqueue))
 234		return 0;
 235
 236	/* receive an iucv message and flip data to the tty (ldisc) */
 237	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
 238
 239	written = 0;
 240	if (!rb->mbuf) { /* message not yet received ... */
 241		/* allocate mem to store msg data; if no memory is available
 242		 * then leave the buffer on the list and re-try later */
 243		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
 244		if (!rb->mbuf)
 245			return -ENOMEM;
 246
 247		rc = __iucv_message_receive(priv->path, &rb->msg, 0,
 248					    rb->mbuf, rb->msg.length, NULL);
 249		switch (rc) {
 250		case 0: /* Successful	    */
 251			break;
 252		case 2:	/* No message found */
 253		case 9: /* Message purged   */
 254			break;
 255		default:
 256			written = -EIO;
 257		}
 258		/* remove buffer if an error has occurred or received data
 259		 * is not correct */
 260		if (rc || (rb->mbuf->version != MSG_VERSION) ||
 261			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen)))
 262			goto out_remove_buffer;
 263	}
 264
 265	switch (rb->mbuf->type) {
 266	case MSG_TYPE_DATA:
 267		written = min_t(int, rb->mbuf->datalen - rb->offset, count);
 268		memcpy(buf, rb->mbuf->data + rb->offset, written);
 269		if (written < (rb->mbuf->datalen - rb->offset)) {
 270			rb->offset += written;
 271			*has_more_data = 1;
 272			goto out_written;
 273		}
 274		break;
 275
 276	case MSG_TYPE_WINSIZE:
 277		if (rb->mbuf->datalen != sizeof(struct winsize))
 278			break;
 279		/* The caller must ensure that the hvc is locked, which
 280		 * is the case when called from hvc_iucv_get_chars() */
 281		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
 282		break;
 283
 284	case MSG_TYPE_ERROR:	/* ignored ... */
 285	case MSG_TYPE_TERMENV:	/* ignored ... */
 286	case MSG_TYPE_TERMIOS:	/* ignored ... */
 287		break;
 288	}
 289
 290out_remove_buffer:
 291	list_del(&rb->list);
 292	destroy_tty_buffer(rb);
 293	*has_more_data = !list_empty(&priv->tty_inqueue);
 294
 295out_written:
 296	return written;
 297}
 298
 299/**
 300 * hvc_iucv_get_chars() - HVC get_chars operation.
 301 * @vtermno:	HVC virtual terminal number.
 302 * @buf:	Pointer to a buffer to store data
 303 * @count:	Size of buffer available for writing
 304 *
 305 * The HVC thread calls this method to read characters from the back-end.
 306 * If an IUCV communication path has been established, pending IUCV messages
 307 * are received and data is copied into buffer @buf up to @count bytes.
 308 *
 309 * Locking:	The routine gets called under an irqsave() spinlock; and
 310 *		the routine locks the struct hvc_iucv_private->lock to call
 311 *		helper functions.
 312 */
 313static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
 314{
 315	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 316	int written;
 317	int has_more_data;
 318
 319	if (count <= 0)
 320		return 0;
 321
 322	if (!priv)
 323		return -ENODEV;
 324
 325	spin_lock(&priv->lock);
 326	has_more_data = 0;
 327	written = hvc_iucv_write(priv, buf, count, &has_more_data);
 328	spin_unlock(&priv->lock);
 329
 330	/* if there are still messages on the queue... schedule another run */
 331	if (has_more_data)
 332		hvc_kick();
 333
 334	return written;
 335}
 336
 337/**
 338 * hvc_iucv_queue() - Buffer terminal data for sending.
 339 * @priv:	Pointer to struct hvc_iucv_private instance.
 340 * @buf:	Buffer containing data to send.
 341 * @count:	Size of buffer and amount of data to send.
 342 *
 343 * The function queues data for sending. To actually send the buffered data,
 344 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
 345 * The function returns the number of data bytes that has been buffered.
 346 *
 347 * If the device is not connected, data is ignored and the function returns
 348 * @count.
 349 * If the buffer is full, the function returns 0.
 350 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
 351 * (that can be passed to HVC layer to cause a tty hangup).
 352 */
 353static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
 354			  int count)
 355{
 356	size_t len;
 357
 358	if (priv->iucv_state == IUCV_DISCONN)
 359		return count;			/* ignore data */
 360
 361	if (priv->iucv_state == IUCV_SEVERED)
 362		return -EPIPE;
 363
 364	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
 365	if (!len)
 366		return 0;
 367
 368	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
 369	priv->sndbuf_len += len;
 370
 371	if (priv->iucv_state == IUCV_CONNECTED)
 372		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
 373
 374	return len;
 375}
 376
 377/**
 378 * hvc_iucv_send() - Send an IUCV message containing terminal data.
 379 * @priv:	Pointer to struct hvc_iucv_private instance.
 380 *
 381 * If an IUCV communication path has been established, the buffered output data
 382 * is sent via an IUCV message and the number of bytes sent is returned.
 383 * Returns 0 if there is no established IUCV communication path or
 384 * -EPIPE if an existing IUCV communicaton path has been severed.
 385 */
 386static int hvc_iucv_send(struct hvc_iucv_private *priv)
 387{
 388	struct iucv_tty_buffer *sb;
 389	int rc, len;
 390
 391	if (priv->iucv_state == IUCV_SEVERED)
 392		return -EPIPE;
 393
 394	if (priv->iucv_state == IUCV_DISCONN)
 395		return -EIO;
 396
 397	if (!priv->sndbuf_len)
 398		return 0;
 399
 400	/* allocate internal buffer to store msg data and also compute total
 401	 * message length */
 402	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
 403	if (!sb)
 404		return -ENOMEM;
 405
 406	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
 407	sb->mbuf->datalen = (u16) priv->sndbuf_len;
 408	sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
 409
 410	list_add_tail(&sb->list, &priv->tty_outqueue);
 411
 412	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
 413				 (void *) sb->mbuf, sb->msg.length);
 414	if (rc) {
 415		/* drop the message here; however we might want to handle
 416		 * 0x03 (msg limit reached) by trying again... */
 417		list_del(&sb->list);
 418		destroy_tty_buffer(sb);
 419	}
 420	len = priv->sndbuf_len;
 421	priv->sndbuf_len = 0;
 422
 423	return len;
 424}
 425
 426/**
 427 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
 428 * @work:	Work structure.
 429 *
 430 * This work queue function sends buffered output data over IUCV and,
 431 * if not all buffered data could be sent, reschedules itself.
 432 */
 433static void hvc_iucv_sndbuf_work(struct work_struct *work)
 434{
 435	struct hvc_iucv_private *priv;
 436
 437	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
 438	if (!priv)
 439		return;
 440
 441	spin_lock_bh(&priv->lock);
 442	hvc_iucv_send(priv);
 443	spin_unlock_bh(&priv->lock);
 444}
 445
 446/**
 447 * hvc_iucv_put_chars() - HVC put_chars operation.
 448 * @vtermno:	HVC virtual terminal number.
 449 * @buf:	Pointer to an buffer to read data from
 450 * @count:	Size of buffer available for reading
 451 *
 452 * The HVC thread calls this method to write characters to the back-end.
 453 * The function calls hvc_iucv_queue() to queue terminal data for sending.
 454 *
 455 * Locking:	The method gets called under an irqsave() spinlock; and
 456 *		locks struct hvc_iucv_private->lock.
 457 */
 458static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
 459{
 460	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 461	int queued;
 462
 463	if (count <= 0)
 464		return 0;
 465
 466	if (!priv)
 467		return -ENODEV;
 468
 469	spin_lock(&priv->lock);
 470	queued = hvc_iucv_queue(priv, buf, count);
 471	spin_unlock(&priv->lock);
 472
 473	return queued;
 474}
 475
 476/**
 477 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
 478 * @hp:	Pointer to the HVC device (struct hvc_struct)
 479 * @id:	Additional data (originally passed to hvc_alloc): the index of an struct
 480 *	hvc_iucv_private instance.
 481 *
 482 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
 483 * instance that is derived from @id. Always returns 0.
 484 *
 485 * Locking:	struct hvc_iucv_private->lock, spin_lock_bh
 486 */
 487static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
 488{
 489	struct hvc_iucv_private *priv;
 490
 491	priv = hvc_iucv_get_private(id);
 492	if (!priv)
 493		return 0;
 494
 495	spin_lock_bh(&priv->lock);
 496	priv->tty_state = TTY_OPENED;
 497	spin_unlock_bh(&priv->lock);
 498
 499	return 0;
 500}
 501
 502/**
 503 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
 504 * @priv:	Pointer to the struct hvc_iucv_private instance.
 505 */
 506static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
 507{
 508	destroy_tty_buffer_list(&priv->tty_outqueue);
 509	destroy_tty_buffer_list(&priv->tty_inqueue);
 510
 511	priv->tty_state = TTY_CLOSED;
 512	priv->iucv_state = IUCV_DISCONN;
 513
 514	priv->sndbuf_len = 0;
 515}
 516
 517/**
 518 * tty_outqueue_empty() - Test if the tty outq is empty
 519 * @priv:	Pointer to struct hvc_iucv_private instance.
 520 */
 521static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
 522{
 523	int rc;
 524
 525	spin_lock_bh(&priv->lock);
 526	rc = list_empty(&priv->tty_outqueue);
 527	spin_unlock_bh(&priv->lock);
 528
 529	return rc;
 530}
 531
 532/**
 533 * flush_sndbuf_sync() - Flush send buffer and wait for completion
 534 * @priv:	Pointer to struct hvc_iucv_private instance.
 535 *
 536 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
 537 * to flush any buffered terminal output data and waits for completion.
 538 */
 539static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
 540{
 541	int sync_wait;
 542
 543	cancel_delayed_work_sync(&priv->sndbuf_work);
 544
 545	spin_lock_bh(&priv->lock);
 546	hvc_iucv_send(priv);		/* force sending buffered data */
 547	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
 548	spin_unlock_bh(&priv->lock);
 549
 550	if (sync_wait)
 551		wait_event_timeout(priv->sndbuf_waitq,
 552				   tty_outqueue_empty(priv), HZ/10);
 553}
 554
 555/**
 556 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
 557 * @priv:	Pointer to hvc_iucv_private structure
 558 *
 559 * This routine severs an existing IUCV communication path and hangs
 560 * up the underlying HVC terminal device.
 561 * The hang-up occurs only if an IUCV communication path is established;
 562 * otherwise there is no need to hang up the terminal device.
 563 *
 564 * The IUCV HVC hang-up is separated into two steps:
 565 * 1. After the IUCV path has been severed, the iucv_state is set to
 566 *    IUCV_SEVERED.
 567 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
 568 *    IUCV_SEVERED state causes the tty hang-up in the HVC layer.
 569 *
 570 * If the tty has not yet been opened, clean up the hvc_iucv_private
 571 * structure to allow re-connects.
 572 * If the tty has been opened, let get_chars() return -EPIPE to signal
 573 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
 574 * to call get_chars()...
 575 *
 576 * Special notes on hanging up a HVC terminal instantiated as console:
 577 * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
 578 *		2. do_tty_hangup() calls tty->ops->close() for console_filp
 579 *			=> no hangup notifier is called by HVC (default)
 580 *		2. hvc_close() returns because of tty_hung_up_p(filp)
 581 *			=> no delete notifier is called!
 582 * Finally, the back-end is not being notified, thus, the tty session is
 583 * kept active (TTY_OPEN) to be ready for re-connects.
 584 *
 585 * Locking:	spin_lock(&priv->lock) w/o disabling bh
 586 */
 587static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
 588{
 589	struct iucv_path *path;
 590
 591	path = NULL;
 592	spin_lock(&priv->lock);
 593	if (priv->iucv_state == IUCV_CONNECTED) {
 594		path = priv->path;
 595		priv->path = NULL;
 596		priv->iucv_state = IUCV_SEVERED;
 597		if (priv->tty_state == TTY_CLOSED)
 598			hvc_iucv_cleanup(priv);
 599		else
 600			/* console is special (see above) */
 601			if (priv->is_console) {
 602				hvc_iucv_cleanup(priv);
 603				priv->tty_state = TTY_OPENED;
 604			} else
 605				hvc_kick();
 606	}
 607	spin_unlock(&priv->lock);
 608
 609	/* finally sever path (outside of priv->lock due to lock ordering) */
 610	if (path) {
 611		iucv_path_sever(path, NULL);
 612		iucv_path_free(path);
 613	}
 614}
 615
 616/**
 617 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
 618 * @hp:		Pointer to the HVC device (struct hvc_struct)
 619 * @id:		Additional data (originally passed to hvc_alloc):
 620 *		the index of an struct hvc_iucv_private instance.
 621 *
 622 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
 623 * virtual or otherwise) has occurred.
 624 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
 625 * to keep an existing IUCV communication path established.
 626 * (Background: vhangup() is called from user space (by getty or login) to
 627 *		disable writing to the tty by other applications).
 628 * If the tty has been opened and an established IUCV path has been severed
 629 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
 630 *
 631 * Locking:	struct hvc_iucv_private->lock
 632 */
 633static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
 634{
 635	struct hvc_iucv_private *priv;
 636
 637	priv = hvc_iucv_get_private(id);
 638	if (!priv)
 639		return;
 640
 641	flush_sndbuf_sync(priv);
 642
 643	spin_lock_bh(&priv->lock);
 644	/* NOTE: If the hangup was scheduled by ourself (from the iucv
 645	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up
 646	 *	 our structure and to set state to TTY_CLOSED.
 647	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we
 648	 *	 ignore this hangup and keep an established IUCV path open...
 649	 *	 (...the reason is that we are not able to connect back to the
 650	 *	 client if we disconnect on hang up) */
 651	priv->tty_state = TTY_CLOSED;
 652
 653	if (priv->iucv_state == IUCV_SEVERED)
 654		hvc_iucv_cleanup(priv);
 655	spin_unlock_bh(&priv->lock);
 656}
 657
 658/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 659 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
 660 * @hp:		Pointer to the HVC device (struct hvc_struct)
 661 * @id:		Additional data (originally passed to hvc_alloc):
 662 *		the index of an struct hvc_iucv_private instance.
 663 *
 664 * This routine notifies the HVC back-end that the last tty device fd has been
 665 * closed.  The function calls hvc_iucv_cleanup() to clean up the struct
 666 * hvc_iucv_private instance.
 
 667 *
 668 * Locking:	struct hvc_iucv_private->lock
 669 */
 670static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
 671{
 672	struct hvc_iucv_private *priv;
 673	struct iucv_path	*path;
 674
 675	priv = hvc_iucv_get_private(id);
 676	if (!priv)
 677		return;
 678
 679	flush_sndbuf_sync(priv);
 680
 681	spin_lock_bh(&priv->lock);
 682	path = priv->path;		/* save reference to IUCV path */
 683	priv->path = NULL;
 684	hvc_iucv_cleanup(priv);
 
 685	spin_unlock_bh(&priv->lock);
 686
 687	/* sever IUCV path outside of priv->lock due to lock ordering of:
 688	 * priv->lock <--> iucv_table_lock */
 689	if (path) {
 690		iucv_path_sever(path, NULL);
 691		iucv_path_free(path);
 692	}
 693}
 694
 695/**
 696 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
 697 * @ipvmid:	Originating z/VM user ID (right padded with blanks)
 698 *
 699 * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
 700 * non-zero.
 701 */
 702static int hvc_iucv_filter_connreq(u8 ipvmid[8])
 703{
 704	size_t i;
 705
 706	/* Note: default policy is ACCEPT if no filter is set */
 707	if (!hvc_iucv_filter_size)
 708		return 0;
 709
 710	for (i = 0; i < hvc_iucv_filter_size; i++)
 711		if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
 712			return 0;
 713	return 1;
 714}
 715
 716/**
 717 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
 718 * @path:	Pending path (struct iucv_path)
 719 * @ipvmid:	z/VM system identifier of originator
 720 * @ipuser:	User specified data for this path
 721 *		(AF_IUCV: port/service name and originator port)
 722 *
 723 * The function uses the @ipuser data to determine if the pending path belongs
 724 * to a terminal managed by this device driver.
 725 * If the path belongs to this driver, ensure that the terminal is not accessed
 726 * multiple times (only one connection to a terminal is allowed).
 727 * If the terminal is not yet connected, the pending path is accepted and is
 728 * associated to the appropriate struct hvc_iucv_private instance.
 729 *
 730 * Returns 0 if @path belongs to a terminal managed by the this device driver;
 731 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
 732 *
 733 * Locking:	struct hvc_iucv_private->lock
 734 */
 735static	int hvc_iucv_path_pending(struct iucv_path *path,
 736				  u8 ipvmid[8], u8 ipuser[16])
 737{
 738	struct hvc_iucv_private *priv;
 
 
 739	u8 nuser_data[16];
 740	u8 vm_user_id[9];
 741	int i, rc;
 742
 
 
 
 
 
 
 
 
 743	priv = NULL;
 744	for (i = 0; i < hvc_iucv_devices; i++)
 745		if (hvc_iucv_table[i] &&
 746		    (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
 747			priv = hvc_iucv_table[i];
 
 
 
 
 
 
 
 
 
 
 748			break;
 749		}
 750	if (!priv)
 751		return -ENODEV;
 752
 753	/* Enforce that ipvmid is allowed to connect to us */
 754	read_lock(&hvc_iucv_filter_lock);
 755	rc = hvc_iucv_filter_connreq(ipvmid);
 756	read_unlock(&hvc_iucv_filter_lock);
 757	if (rc) {
 758		iucv_path_sever(path, ipuser);
 759		iucv_path_free(path);
 760		memcpy(vm_user_id, ipvmid, 8);
 761		vm_user_id[8] = 0;
 762		pr_info("A connection request from z/VM user ID %s "
 763			"was refused\n", vm_user_id);
 764		return 0;
 765	}
 766
 767	spin_lock(&priv->lock);
 768
 769	/* If the terminal is already connected or being severed, then sever
 770	 * this path to enforce that there is only ONE established communication
 771	 * path per terminal. */
 772	if (priv->iucv_state != IUCV_DISCONN) {
 773		iucv_path_sever(path, ipuser);
 774		iucv_path_free(path);
 775		goto out_path_handled;
 776	}
 777
 778	/* accept path */
 779	memcpy(nuser_data, ipuser + 8, 8);  /* remote service (for af_iucv) */
 780	memcpy(nuser_data + 8, ipuser, 8);  /* local service  (for af_iucv) */
 781	path->msglim = 0xffff;		    /* IUCV MSGLIMIT */
 782	path->flags &= ~IUCV_IPRMDATA;	    /* TODO: use IUCV_IPRMDATA */
 783	rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
 784	if (rc) {
 785		iucv_path_sever(path, ipuser);
 786		iucv_path_free(path);
 787		goto out_path_handled;
 788	}
 789	priv->path = path;
 790	priv->iucv_state = IUCV_CONNECTED;
 791
 
 
 
 
 792	/* flush buffered output data... */
 793	schedule_delayed_work(&priv->sndbuf_work, 5);
 794
 795out_path_handled:
 796	spin_unlock(&priv->lock);
 797	return 0;
 798}
 799
 800/**
 801 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
 802 * @path:	Pending path (struct iucv_path)
 803 * @ipuser:	User specified data for this path
 804 *		(AF_IUCV: port/service name and originator port)
 805 *
 806 * This function calls the hvc_iucv_hangup() function for the
 807 * respective IUCV HVC terminal.
 808 *
 809 * Locking:	struct hvc_iucv_private->lock
 810 */
 811static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
 812{
 813	struct hvc_iucv_private *priv = path->private;
 814
 815	hvc_iucv_hangup(priv);
 816}
 817
 818/**
 819 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
 820 * @path:	Pending path (struct iucv_path)
 821 * @msg:	Pointer to the IUCV message
 822 *
 823 * The function puts an incoming message on the input queue for later
 824 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
 825 * If the tty has not yet been opened, the message is rejected.
 826 *
 827 * Locking:	struct hvc_iucv_private->lock
 828 */
 829static void hvc_iucv_msg_pending(struct iucv_path *path,
 830				 struct iucv_message *msg)
 831{
 832	struct hvc_iucv_private *priv = path->private;
 833	struct iucv_tty_buffer *rb;
 834
 835	/* reject messages that exceed max size of iucv_tty_msg->datalen */
 836	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
 837		iucv_message_reject(path, msg);
 838		return;
 839	}
 840
 841	spin_lock(&priv->lock);
 842
 843	/* reject messages if tty has not yet been opened */
 844	if (priv->tty_state == TTY_CLOSED) {
 845		iucv_message_reject(path, msg);
 846		goto unlock_return;
 847	}
 848
 849	/* allocate tty buffer to save iucv msg only */
 850	rb = alloc_tty_buffer(0, GFP_ATOMIC);
 851	if (!rb) {
 852		iucv_message_reject(path, msg);
 853		goto unlock_return;	/* -ENOMEM */
 854	}
 855	rb->msg = *msg;
 856
 857	list_add_tail(&rb->list, &priv->tty_inqueue);
 858
 859	hvc_kick();	/* wake up hvc thread */
 860
 861unlock_return:
 862	spin_unlock(&priv->lock);
 863}
 864
 865/**
 866 * hvc_iucv_msg_complete() - IUCV handler to process message completion
 867 * @path:	Pending path (struct iucv_path)
 868 * @msg:	Pointer to the IUCV message
 869 *
 870 * The function is called upon completion of message delivery to remove the
 871 * message from the outqueue. Additional delivery information can be found
 872 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
 873 *	       purged messages	 (0x010000 (IPADPGNR)).
 874 *
 875 * Locking:	struct hvc_iucv_private->lock
 876 */
 877static void hvc_iucv_msg_complete(struct iucv_path *path,
 878				  struct iucv_message *msg)
 879{
 880	struct hvc_iucv_private *priv = path->private;
 881	struct iucv_tty_buffer	*ent, *next;
 882	LIST_HEAD(list_remove);
 883
 884	spin_lock(&priv->lock);
 885	list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
 886		if (ent->msg.id == msg->id) {
 887			list_move(&ent->list, &list_remove);
 888			break;
 889		}
 890	wake_up(&priv->sndbuf_waitq);
 891	spin_unlock(&priv->lock);
 892	destroy_tty_buffer_list(&list_remove);
 893}
 894
 895/**
 896 * hvc_iucv_pm_freeze() - Freeze PM callback
 897 * @dev:	IUVC HVC terminal device
 898 *
 899 * Sever an established IUCV communication path and
 900 * trigger a hang-up of the underlying HVC terminal.
 901 */
 902static int hvc_iucv_pm_freeze(struct device *dev)
 903{
 904	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 905
 906	local_bh_disable();
 907	hvc_iucv_hangup(priv);
 908	local_bh_enable();
 909
 910	return 0;
 911}
 912
 913/**
 914 * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
 915 * @dev:	IUVC HVC terminal device
 916 *
 917 * Wake up the HVC thread to trigger hang-up and respective
 918 * HVC back-end notifier invocations.
 919 */
 920static int hvc_iucv_pm_restore_thaw(struct device *dev)
 921{
 922	hvc_kick();
 923	return 0;
 924}
 925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926
 927/* HVC operations */
 928static const struct hv_ops hvc_iucv_ops = {
 929	.get_chars = hvc_iucv_get_chars,
 930	.put_chars = hvc_iucv_put_chars,
 931	.notifier_add = hvc_iucv_notifier_add,
 932	.notifier_del = hvc_iucv_notifier_del,
 933	.notifier_hangup = hvc_iucv_notifier_hangup,
 
 934};
 935
 936/* Suspend / resume device operations */
 937static const struct dev_pm_ops hvc_iucv_pm_ops = {
 938	.freeze	  = hvc_iucv_pm_freeze,
 939	.thaw	  = hvc_iucv_pm_restore_thaw,
 940	.restore  = hvc_iucv_pm_restore_thaw,
 941};
 942
 943/* IUCV HVC device driver */
 944static struct device_driver hvc_iucv_driver = {
 945	.name = KMSG_COMPONENT,
 946	.bus  = &iucv_bus,
 947	.pm   = &hvc_iucv_pm_ops,
 948};
 949
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950/**
 951 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
 952 * @id:			hvc_iucv_table index
 953 * @is_console:		Flag if the instance is used as Linux console
 954 *
 955 * This function allocates a new hvc_iucv_private structure and stores
 956 * the instance in hvc_iucv_table at index @id.
 957 * Returns 0 on success; otherwise non-zero.
 958 */
 959static int __init hvc_iucv_alloc(int id, unsigned int is_console)
 960{
 961	struct hvc_iucv_private *priv;
 962	char name[9];
 963	int rc;
 964
 965	priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
 966	if (!priv)
 967		return -ENOMEM;
 968
 969	spin_lock_init(&priv->lock);
 970	INIT_LIST_HEAD(&priv->tty_outqueue);
 971	INIT_LIST_HEAD(&priv->tty_inqueue);
 972	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
 973	init_waitqueue_head(&priv->sndbuf_waitq);
 974
 975	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
 976	if (!priv->sndbuf) {
 977		kfree(priv);
 978		return -ENOMEM;
 979	}
 980
 981	/* set console flag */
 982	priv->is_console = is_console;
 983
 984	/* allocate hvc device */
 985	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*		  PAGE_SIZE */
 986			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
 987	if (IS_ERR(priv->hvc)) {
 988		rc = PTR_ERR(priv->hvc);
 989		goto out_error_hvc;
 990	}
 991
 992	/* notify HVC thread instead of using polling */
 993	priv->hvc->irq_requested = 1;
 994
 995	/* setup iucv related information */
 996	snprintf(name, 9, "lnxhvc%-2d", id);
 997	memcpy(priv->srv_name, name, 8);
 998	ASCEBC(priv->srv_name, 8);
 999
1000	/* create and setup device */
1001	priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1002	if (!priv->dev) {
1003		rc = -ENOMEM;
1004		goto out_error_dev;
1005	}
1006	dev_set_name(priv->dev, "hvc_iucv%d", id);
1007	dev_set_drvdata(priv->dev, priv);
1008	priv->dev->bus = &iucv_bus;
1009	priv->dev->parent = iucv_root;
1010	priv->dev->driver = &hvc_iucv_driver;
 
1011	priv->dev->release = (void (*)(struct device *)) kfree;
1012	rc = device_register(priv->dev);
1013	if (rc) {
1014		put_device(priv->dev);
1015		goto out_error_dev;
1016	}
1017
1018	hvc_iucv_table[id] = priv;
1019	return 0;
1020
1021out_error_dev:
1022	hvc_remove(priv->hvc);
1023out_error_hvc:
1024	free_page((unsigned long) priv->sndbuf);
1025	kfree(priv);
1026
1027	return rc;
1028}
1029
1030/**
1031 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1032 */
1033static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1034{
1035	hvc_remove(priv->hvc);
1036	device_unregister(priv->dev);
1037	free_page((unsigned long) priv->sndbuf);
1038	kfree(priv);
1039}
1040
1041/**
1042 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1043 * @filter:	String containing a comma-separated list of z/VM user IDs
1044 */
1045static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1046{
1047	const char *nextdelim, *residual;
1048	size_t len;
1049
1050	nextdelim = strchr(filter, ',');
1051	if (nextdelim) {
1052		len = nextdelim - filter;
1053		residual = nextdelim + 1;
1054	} else {
1055		len = strlen(filter);
1056		residual = filter + len;
1057	}
1058
1059	if (len == 0)
1060		return ERR_PTR(-EINVAL);
1061
1062	/* check for '\n' (if called from sysfs) */
1063	if (filter[len - 1] == '\n')
1064		len--;
1065
1066	if (len > 8)
1067		return ERR_PTR(-EINVAL);
1068
1069	/* pad with blanks and save upper case version of user ID */
1070	memset(dest, ' ', 8);
1071	while (len--)
1072		dest[len] = toupper(filter[len]);
1073	return residual;
1074}
1075
1076/**
1077 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1078 * @filter:	String consisting of a comma-separated list of z/VM user IDs
1079 *
1080 * The function parses the @filter string and creates an array containing
1081 * the list of z/VM user ID filter entries.
1082 * Return code 0 means success, -EINVAL if the filter is syntactically
1083 * incorrect, -ENOMEM if there was not enough memory to allocate the
1084 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1085 */
1086static int hvc_iucv_setup_filter(const char *val)
1087{
1088	const char *residual;
1089	int err;
1090	size_t size, count;
1091	void *array, *old_filter;
1092
1093	count = strlen(val);
1094	if (count == 0 || (count == 1 && val[0] == '\n')) {
1095		size  = 0;
1096		array = NULL;
1097		goto out_replace_filter;	/* clear filter */
1098	}
1099
1100	/* count user IDs in order to allocate sufficient memory */
1101	size = 1;
1102	residual = val;
1103	while ((residual = strchr(residual, ',')) != NULL) {
1104		residual++;
1105		size++;
1106	}
1107
1108	/* check if the specified list exceeds the filter limit */
1109	if (size > MAX_VMID_FILTER)
1110		return -ENOSPC;
1111
1112	array = kzalloc(size * 8, GFP_KERNEL);
1113	if (!array)
1114		return -ENOMEM;
1115
1116	count = size;
1117	residual = val;
1118	while (*residual && count) {
1119		residual = hvc_iucv_parse_filter(residual,
1120						 array + ((size - count) * 8));
1121		if (IS_ERR(residual)) {
1122			err = PTR_ERR(residual);
1123			kfree(array);
1124			goto out_err;
1125		}
1126		count--;
1127	}
1128
1129out_replace_filter:
1130	write_lock_bh(&hvc_iucv_filter_lock);
1131	old_filter = hvc_iucv_filter;
1132	hvc_iucv_filter_size = size;
1133	hvc_iucv_filter = array;
1134	write_unlock_bh(&hvc_iucv_filter_lock);
1135	kfree(old_filter);
1136
1137	err = 0;
1138out_err:
1139	return err;
1140}
1141
1142/**
1143 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1144 * @val:	String consisting of a comma-separated list of z/VM user IDs
1145 * @kp:		Kernel parameter pointing to hvc_iucv_filter array
1146 *
1147 * The function sets up the z/VM user ID filter specified as comma-separated
1148 * list of user IDs in @val.
1149 * Note: If it is called early in the boot process, @val is stored and
1150 *	 parsed later in hvc_iucv_init().
1151 */
1152static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1153{
1154	int rc;
1155
1156	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1157		return -ENODEV;
1158
1159	if (!val)
1160		return -EINVAL;
1161
1162	rc = 0;
1163	if (slab_is_available())
1164		rc = hvc_iucv_setup_filter(val);
1165	else
1166		hvc_iucv_filter_string = val;	/* defer... */
1167	return rc;
1168}
1169
1170/**
1171 * param_get_vmidfilter() - Get z/VM user ID filter
1172 * @buffer:	Buffer to store z/VM user ID filter,
1173 *		(buffer size assumption PAGE_SIZE)
1174 * @kp:		Kernel parameter pointing to the hvc_iucv_filter array
1175 *
1176 * The function stores the filter as a comma-separated list of z/VM user IDs
1177 * in @buffer. Typically, sysfs routines call this function for attr show.
1178 */
1179static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1180{
1181	int rc;
1182	size_t index, len;
1183	void *start, *end;
1184
1185	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1186		return -ENODEV;
1187
1188	rc = 0;
1189	read_lock_bh(&hvc_iucv_filter_lock);
1190	for (index = 0; index < hvc_iucv_filter_size; index++) {
1191		start = hvc_iucv_filter + (8 * index);
1192		end   = memchr(start, ' ', 8);
1193		len   = (end) ? end - start : 8;
1194		memcpy(buffer + rc, start, len);
1195		rc += len;
1196		buffer[rc++] = ',';
1197	}
1198	read_unlock_bh(&hvc_iucv_filter_lock);
1199	if (rc)
1200		buffer[--rc] = '\0';	/* replace last comma and update rc */
1201	return rc;
1202}
1203
1204#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1205
1206static struct kernel_param_ops param_ops_vmidfilter = {
1207	.set = param_set_vmidfilter,
1208	.get = param_get_vmidfilter,
1209};
1210
1211/**
1212 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1213 */
1214static int __init hvc_iucv_init(void)
1215{
1216	int rc;
1217	unsigned int i;
1218
1219	if (!hvc_iucv_devices)
1220		return -ENODEV;
1221
1222	if (!MACHINE_IS_VM) {
1223		pr_notice("The z/VM IUCV HVC device driver cannot "
1224			   "be used without z/VM\n");
1225		rc = -ENODEV;
1226		goto out_error;
1227	}
1228
1229	if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1230		pr_err("%lu is not a valid value for the hvc_iucv= "
1231			"kernel parameter\n", hvc_iucv_devices);
1232		rc = -EINVAL;
1233		goto out_error;
1234	}
1235
1236	/* register IUCV HVC device driver */
1237	rc = driver_register(&hvc_iucv_driver);
1238	if (rc)
1239		goto out_error;
1240
1241	/* parse hvc_iucv_allow string and create z/VM user ID filter list */
1242	if (hvc_iucv_filter_string) {
1243		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1244		switch (rc) {
1245		case 0:
1246			break;
1247		case -ENOMEM:
1248			pr_err("Allocating memory failed with "
1249				"reason code=%d\n", 3);
1250			goto out_error;
1251		case -EINVAL:
1252			pr_err("hvc_iucv_allow= does not specify a valid "
1253				"z/VM user ID list\n");
1254			goto out_error;
1255		case -ENOSPC:
1256			pr_err("hvc_iucv_allow= specifies too many "
1257				"z/VM user IDs\n");
1258			goto out_error;
1259		default:
1260			goto out_error;
1261		}
1262	}
1263
1264	hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1265					   sizeof(struct iucv_tty_buffer),
1266					   0, 0, NULL);
1267	if (!hvc_iucv_buffer_cache) {
1268		pr_err("Allocating memory failed with reason code=%d\n", 1);
1269		rc = -ENOMEM;
1270		goto out_error;
1271	}
1272
1273	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1274						    hvc_iucv_buffer_cache);
1275	if (!hvc_iucv_mempool) {
1276		pr_err("Allocating memory failed with reason code=%d\n", 2);
1277		kmem_cache_destroy(hvc_iucv_buffer_cache);
1278		rc = -ENOMEM;
1279		goto out_error;
1280	}
1281
1282	/* register the first terminal device as console
1283	 * (must be done before allocating hvc terminal devices) */
1284	rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1285	if (rc) {
1286		pr_err("Registering HVC terminal device as "
1287		       "Linux console failed\n");
1288		goto out_error_memory;
1289	}
1290
1291	/* allocate hvc_iucv_private structs */
1292	for (i = 0; i < hvc_iucv_devices; i++) {
1293		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1294		if (rc) {
1295			pr_err("Creating a new HVC terminal device "
1296				"failed with error code=%d\n", rc);
1297			goto out_error_hvc;
1298		}
1299	}
1300
1301	/* register IUCV callback handler */
1302	rc = iucv_register(&hvc_iucv_handler, 0);
1303	if (rc) {
1304		pr_err("Registering IUCV handlers failed with error code=%d\n",
1305			rc);
1306		goto out_error_hvc;
1307	}
1308
1309	return 0;
1310
1311out_error_hvc:
1312	for (i = 0; i < hvc_iucv_devices; i++)
1313		if (hvc_iucv_table[i])
1314			hvc_iucv_destroy(hvc_iucv_table[i]);
1315out_error_memory:
1316	mempool_destroy(hvc_iucv_mempool);
1317	kmem_cache_destroy(hvc_iucv_buffer_cache);
1318out_error:
1319	if (hvc_iucv_filter)
1320		kfree(hvc_iucv_filter);
1321	hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1322	return rc;
1323}
1324
1325/**
1326 * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
1327 * @val:	Parameter value (numeric)
1328 */
1329static	int __init hvc_iucv_config(char *val)
1330{
1331	 return strict_strtoul(val, 10, &hvc_iucv_devices);
1332}
1333
1334
1335device_initcall(hvc_iucv_init);
1336__setup("hvc_iucv=", hvc_iucv_config);
1337core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
v3.15
   1/*
   2 * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
   3 *
   4 * This HVC device driver provides terminal access using
   5 * z/VM IUCV communication paths.
   6 *
   7 * Copyright IBM Corp. 2008, 2009
   8 *
   9 * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  10 */
  11#define KMSG_COMPONENT		"hvc_iucv"
  12#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
  13
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <asm/ebcdic.h>
  17#include <linux/ctype.h>
  18#include <linux/delay.h>
  19#include <linux/device.h>
  20#include <linux/init.h>
  21#include <linux/mempool.h>
  22#include <linux/moduleparam.h>
  23#include <linux/tty.h>
  24#include <linux/wait.h>
  25#include <net/iucv/iucv.h>
  26
  27#include "hvc_console.h"
  28
  29
  30/* General device driver settings */
  31#define HVC_IUCV_MAGIC		0xc9e4c3e5
  32#define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS
  33#define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  34
  35/* IUCV TTY message  */
  36#define MSG_VERSION		0x02	/* Message version */
  37#define MSG_TYPE_ERROR		0x01	/* Error message */
  38#define MSG_TYPE_TERMENV	0x02	/* Terminal environment variable */
  39#define MSG_TYPE_TERMIOS	0x04	/* Terminal IO struct update */
  40#define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */
  41#define MSG_TYPE_DATA		0x10	/* Terminal data */
  42
  43struct iucv_tty_msg {
  44	u8	version;		/* Message version */
  45	u8	type;			/* Message type */
  46#define MSG_MAX_DATALEN		((u16)(~0))
  47	u16	datalen;		/* Payload length */
  48	u8	data[];			/* Payload buffer */
  49} __attribute__((packed));
  50#define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
  51
  52enum iucv_state_t {
  53	IUCV_DISCONN	= 0,
  54	IUCV_CONNECTED	= 1,
  55	IUCV_SEVERED	= 2,
  56};
  57
  58enum tty_state_t {
  59	TTY_CLOSED	= 0,
  60	TTY_OPENED	= 1,
  61};
  62
  63struct hvc_iucv_private {
  64	struct hvc_struct	*hvc;		/* HVC struct reference */
  65	u8			srv_name[8];	/* IUCV service name (ebcdic) */
  66	unsigned char		is_console;	/* Linux console usage flag */
  67	enum iucv_state_t	iucv_state;	/* IUCV connection status */
  68	enum tty_state_t	tty_state;	/* TTY status */
  69	struct iucv_path	*path;		/* IUCV path pointer */
  70	spinlock_t		lock;		/* hvc_iucv_private lock */
  71#define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */
  72	void			*sndbuf;	/* send buffer		  */
  73	size_t			sndbuf_len;	/* length of send buffer  */
  74#define QUEUE_SNDBUF_DELAY	(HZ / 25)
  75	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */
  76	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
  77	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
  78	struct list_head	tty_inqueue;	/* incoming IUCV messages */
  79	struct device		*dev;		/* device structure */
  80	u8			info_path[16];	/* IUCV path info (dev attr) */
  81};
  82
  83struct iucv_tty_buffer {
  84	struct list_head	list;	/* list pointer */
  85	struct iucv_message	msg;	/* store an IUCV message */
  86	size_t			offset;	/* data buffer offset */
  87	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */
  88};
  89
  90/* IUCV callback handler */
  91static	int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
  92static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
  93static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  94static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  95
  96
  97/* Kernel module parameter: use one terminal device as default */
  98static unsigned long hvc_iucv_devices = 1;
  99
 100/* Array of allocated hvc iucv tty lines... */
 101static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
 102#define IUCV_HVC_CON_IDX	(0)
 103/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
 104#define MAX_VMID_FILTER		(500)
 105static size_t hvc_iucv_filter_size;
 106static void *hvc_iucv_filter;
 107static const char *hvc_iucv_filter_string;
 108static DEFINE_RWLOCK(hvc_iucv_filter_lock);
 109
 110/* Kmem cache and mempool for iucv_tty_buffer elements */
 111static struct kmem_cache *hvc_iucv_buffer_cache;
 112static mempool_t *hvc_iucv_mempool;
 113
 114/* IUCV handler callback functions */
 115static struct iucv_handler hvc_iucv_handler = {
 116	.path_pending  = hvc_iucv_path_pending,
 117	.path_severed  = hvc_iucv_path_severed,
 118	.message_complete = hvc_iucv_msg_complete,
 119	.message_pending  = hvc_iucv_msg_pending,
 120};
 121
 122
 123/**
 124 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
 125 * @num:	The HVC virtual terminal number (vtermno)
 126 *
 127 * This function returns the struct hvc_iucv_private instance that corresponds
 128 * to the HVC virtual terminal number specified as parameter @num.
 129 */
 130static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
 131{
 132	if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
 133		return NULL;
 134	return hvc_iucv_table[num - HVC_IUCV_MAGIC];
 135}
 136
 137/**
 138 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
 139 * @size:	Size of the internal buffer used to store data.
 140 * @flags:	Memory allocation flags passed to mempool.
 141 *
 142 * This function allocates a new struct iucv_tty_buffer element and, optionally,
 143 * allocates an internal data buffer with the specified size @size.
 144 * The internal data buffer is always allocated with GFP_DMA which is
 145 * required for receiving and sending data with IUCV.
 146 * Note: The total message size arises from the internal buffer size and the
 147 *	 members of the iucv_tty_msg structure.
 148 * The function returns NULL if memory allocation has failed.
 149 */
 150static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
 151{
 152	struct iucv_tty_buffer *bufp;
 153
 154	bufp = mempool_alloc(hvc_iucv_mempool, flags);
 155	if (!bufp)
 156		return NULL;
 157	memset(bufp, 0, sizeof(*bufp));
 158
 159	if (size > 0) {
 160		bufp->msg.length = MSG_SIZE(size);
 161		bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
 162		if (!bufp->mbuf) {
 163			mempool_free(bufp, hvc_iucv_mempool);
 164			return NULL;
 165		}
 166		bufp->mbuf->version = MSG_VERSION;
 167		bufp->mbuf->type    = MSG_TYPE_DATA;
 168		bufp->mbuf->datalen = (u16) size;
 169	}
 170	return bufp;
 171}
 172
 173/**
 174 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
 175 * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
 176 */
 177static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
 178{
 179	kfree(bufp->mbuf);
 180	mempool_free(bufp, hvc_iucv_mempool);
 181}
 182
 183/**
 184 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
 185 * @list:	List containing struct iucv_tty_buffer elements.
 186 */
 187static void destroy_tty_buffer_list(struct list_head *list)
 188{
 189	struct iucv_tty_buffer *ent, *next;
 190
 191	list_for_each_entry_safe(ent, next, list, list) {
 192		list_del(&ent->list);
 193		destroy_tty_buffer(ent);
 194	}
 195}
 196
 197/**
 198 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
 199 * @priv:		Pointer to struct hvc_iucv_private
 200 * @buf:		HVC buffer for writing received terminal data.
 201 * @count:		HVC buffer size.
 202 * @has_more_data:	Pointer to an int variable.
 203 *
 204 * The function picks up pending messages from the input queue and receives
 205 * the message data that is then written to the specified buffer @buf.
 206 * If the buffer size @count is less than the data message size, the
 207 * message is kept on the input queue and @has_more_data is set to 1.
 208 * If all message data has been written, the message is removed from
 209 * the input queue.
 210 *
 211 * The function returns the number of bytes written to the terminal, zero if
 212 * there are no pending data messages available or if there is no established
 213 * IUCV path.
 214 * If the IUCV path has been severed, then -EPIPE is returned to cause a
 215 * hang up (that is issued by the HVC layer).
 216 */
 217static int hvc_iucv_write(struct hvc_iucv_private *priv,
 218			  char *buf, int count, int *has_more_data)
 219{
 220	struct iucv_tty_buffer *rb;
 221	int written;
 222	int rc;
 223
 224	/* immediately return if there is no IUCV connection */
 225	if (priv->iucv_state == IUCV_DISCONN)
 226		return 0;
 227
 228	/* if the IUCV path has been severed, return -EPIPE to inform the
 229	 * HVC layer to hang up the tty device. */
 230	if (priv->iucv_state == IUCV_SEVERED)
 231		return -EPIPE;
 232
 233	/* check if there are pending messages */
 234	if (list_empty(&priv->tty_inqueue))
 235		return 0;
 236
 237	/* receive an iucv message and flip data to the tty (ldisc) */
 238	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
 239
 240	written = 0;
 241	if (!rb->mbuf) { /* message not yet received ... */
 242		/* allocate mem to store msg data; if no memory is available
 243		 * then leave the buffer on the list and re-try later */
 244		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
 245		if (!rb->mbuf)
 246			return -ENOMEM;
 247
 248		rc = __iucv_message_receive(priv->path, &rb->msg, 0,
 249					    rb->mbuf, rb->msg.length, NULL);
 250		switch (rc) {
 251		case 0: /* Successful	    */
 252			break;
 253		case 2:	/* No message found */
 254		case 9: /* Message purged   */
 255			break;
 256		default:
 257			written = -EIO;
 258		}
 259		/* remove buffer if an error has occurred or received data
 260		 * is not correct */
 261		if (rc || (rb->mbuf->version != MSG_VERSION) ||
 262			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen)))
 263			goto out_remove_buffer;
 264	}
 265
 266	switch (rb->mbuf->type) {
 267	case MSG_TYPE_DATA:
 268		written = min_t(int, rb->mbuf->datalen - rb->offset, count);
 269		memcpy(buf, rb->mbuf->data + rb->offset, written);
 270		if (written < (rb->mbuf->datalen - rb->offset)) {
 271			rb->offset += written;
 272			*has_more_data = 1;
 273			goto out_written;
 274		}
 275		break;
 276
 277	case MSG_TYPE_WINSIZE:
 278		if (rb->mbuf->datalen != sizeof(struct winsize))
 279			break;
 280		/* The caller must ensure that the hvc is locked, which
 281		 * is the case when called from hvc_iucv_get_chars() */
 282		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
 283		break;
 284
 285	case MSG_TYPE_ERROR:	/* ignored ... */
 286	case MSG_TYPE_TERMENV:	/* ignored ... */
 287	case MSG_TYPE_TERMIOS:	/* ignored ... */
 288		break;
 289	}
 290
 291out_remove_buffer:
 292	list_del(&rb->list);
 293	destroy_tty_buffer(rb);
 294	*has_more_data = !list_empty(&priv->tty_inqueue);
 295
 296out_written:
 297	return written;
 298}
 299
 300/**
 301 * hvc_iucv_get_chars() - HVC get_chars operation.
 302 * @vtermno:	HVC virtual terminal number.
 303 * @buf:	Pointer to a buffer to store data
 304 * @count:	Size of buffer available for writing
 305 *
 306 * The HVC thread calls this method to read characters from the back-end.
 307 * If an IUCV communication path has been established, pending IUCV messages
 308 * are received and data is copied into buffer @buf up to @count bytes.
 309 *
 310 * Locking:	The routine gets called under an irqsave() spinlock; and
 311 *		the routine locks the struct hvc_iucv_private->lock to call
 312 *		helper functions.
 313 */
 314static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
 315{
 316	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 317	int written;
 318	int has_more_data;
 319
 320	if (count <= 0)
 321		return 0;
 322
 323	if (!priv)
 324		return -ENODEV;
 325
 326	spin_lock(&priv->lock);
 327	has_more_data = 0;
 328	written = hvc_iucv_write(priv, buf, count, &has_more_data);
 329	spin_unlock(&priv->lock);
 330
 331	/* if there are still messages on the queue... schedule another run */
 332	if (has_more_data)
 333		hvc_kick();
 334
 335	return written;
 336}
 337
 338/**
 339 * hvc_iucv_queue() - Buffer terminal data for sending.
 340 * @priv:	Pointer to struct hvc_iucv_private instance.
 341 * @buf:	Buffer containing data to send.
 342 * @count:	Size of buffer and amount of data to send.
 343 *
 344 * The function queues data for sending. To actually send the buffered data,
 345 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
 346 * The function returns the number of data bytes that has been buffered.
 347 *
 348 * If the device is not connected, data is ignored and the function returns
 349 * @count.
 350 * If the buffer is full, the function returns 0.
 351 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
 352 * (that can be passed to HVC layer to cause a tty hangup).
 353 */
 354static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
 355			  int count)
 356{
 357	size_t len;
 358
 359	if (priv->iucv_state == IUCV_DISCONN)
 360		return count;			/* ignore data */
 361
 362	if (priv->iucv_state == IUCV_SEVERED)
 363		return -EPIPE;
 364
 365	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
 366	if (!len)
 367		return 0;
 368
 369	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
 370	priv->sndbuf_len += len;
 371
 372	if (priv->iucv_state == IUCV_CONNECTED)
 373		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
 374
 375	return len;
 376}
 377
 378/**
 379 * hvc_iucv_send() - Send an IUCV message containing terminal data.
 380 * @priv:	Pointer to struct hvc_iucv_private instance.
 381 *
 382 * If an IUCV communication path has been established, the buffered output data
 383 * is sent via an IUCV message and the number of bytes sent is returned.
 384 * Returns 0 if there is no established IUCV communication path or
 385 * -EPIPE if an existing IUCV communicaton path has been severed.
 386 */
 387static int hvc_iucv_send(struct hvc_iucv_private *priv)
 388{
 389	struct iucv_tty_buffer *sb;
 390	int rc, len;
 391
 392	if (priv->iucv_state == IUCV_SEVERED)
 393		return -EPIPE;
 394
 395	if (priv->iucv_state == IUCV_DISCONN)
 396		return -EIO;
 397
 398	if (!priv->sndbuf_len)
 399		return 0;
 400
 401	/* allocate internal buffer to store msg data and also compute total
 402	 * message length */
 403	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
 404	if (!sb)
 405		return -ENOMEM;
 406
 407	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
 408	sb->mbuf->datalen = (u16) priv->sndbuf_len;
 409	sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
 410
 411	list_add_tail(&sb->list, &priv->tty_outqueue);
 412
 413	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
 414				 (void *) sb->mbuf, sb->msg.length);
 415	if (rc) {
 416		/* drop the message here; however we might want to handle
 417		 * 0x03 (msg limit reached) by trying again... */
 418		list_del(&sb->list);
 419		destroy_tty_buffer(sb);
 420	}
 421	len = priv->sndbuf_len;
 422	priv->sndbuf_len = 0;
 423
 424	return len;
 425}
 426
 427/**
 428 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
 429 * @work:	Work structure.
 430 *
 431 * This work queue function sends buffered output data over IUCV and,
 432 * if not all buffered data could be sent, reschedules itself.
 433 */
 434static void hvc_iucv_sndbuf_work(struct work_struct *work)
 435{
 436	struct hvc_iucv_private *priv;
 437
 438	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
 439	if (!priv)
 440		return;
 441
 442	spin_lock_bh(&priv->lock);
 443	hvc_iucv_send(priv);
 444	spin_unlock_bh(&priv->lock);
 445}
 446
 447/**
 448 * hvc_iucv_put_chars() - HVC put_chars operation.
 449 * @vtermno:	HVC virtual terminal number.
 450 * @buf:	Pointer to an buffer to read data from
 451 * @count:	Size of buffer available for reading
 452 *
 453 * The HVC thread calls this method to write characters to the back-end.
 454 * The function calls hvc_iucv_queue() to queue terminal data for sending.
 455 *
 456 * Locking:	The method gets called under an irqsave() spinlock; and
 457 *		locks struct hvc_iucv_private->lock.
 458 */
 459static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
 460{
 461	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 462	int queued;
 463
 464	if (count <= 0)
 465		return 0;
 466
 467	if (!priv)
 468		return -ENODEV;
 469
 470	spin_lock(&priv->lock);
 471	queued = hvc_iucv_queue(priv, buf, count);
 472	spin_unlock(&priv->lock);
 473
 474	return queued;
 475}
 476
 477/**
 478 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
 479 * @hp:	Pointer to the HVC device (struct hvc_struct)
 480 * @id:	Additional data (originally passed to hvc_alloc): the index of an struct
 481 *	hvc_iucv_private instance.
 482 *
 483 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
 484 * instance that is derived from @id. Always returns 0.
 485 *
 486 * Locking:	struct hvc_iucv_private->lock, spin_lock_bh
 487 */
 488static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
 489{
 490	struct hvc_iucv_private *priv;
 491
 492	priv = hvc_iucv_get_private(id);
 493	if (!priv)
 494		return 0;
 495
 496	spin_lock_bh(&priv->lock);
 497	priv->tty_state = TTY_OPENED;
 498	spin_unlock_bh(&priv->lock);
 499
 500	return 0;
 501}
 502
 503/**
 504 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
 505 * @priv:	Pointer to the struct hvc_iucv_private instance.
 506 */
 507static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
 508{
 509	destroy_tty_buffer_list(&priv->tty_outqueue);
 510	destroy_tty_buffer_list(&priv->tty_inqueue);
 511
 512	priv->tty_state = TTY_CLOSED;
 513	priv->iucv_state = IUCV_DISCONN;
 514
 515	priv->sndbuf_len = 0;
 516}
 517
 518/**
 519 * tty_outqueue_empty() - Test if the tty outq is empty
 520 * @priv:	Pointer to struct hvc_iucv_private instance.
 521 */
 522static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
 523{
 524	int rc;
 525
 526	spin_lock_bh(&priv->lock);
 527	rc = list_empty(&priv->tty_outqueue);
 528	spin_unlock_bh(&priv->lock);
 529
 530	return rc;
 531}
 532
 533/**
 534 * flush_sndbuf_sync() - Flush send buffer and wait for completion
 535 * @priv:	Pointer to struct hvc_iucv_private instance.
 536 *
 537 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
 538 * to flush any buffered terminal output data and waits for completion.
 539 */
 540static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
 541{
 542	int sync_wait;
 543
 544	cancel_delayed_work_sync(&priv->sndbuf_work);
 545
 546	spin_lock_bh(&priv->lock);
 547	hvc_iucv_send(priv);		/* force sending buffered data */
 548	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
 549	spin_unlock_bh(&priv->lock);
 550
 551	if (sync_wait)
 552		wait_event_timeout(priv->sndbuf_waitq,
 553				   tty_outqueue_empty(priv), HZ/10);
 554}
 555
 556/**
 557 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
 558 * @priv:	Pointer to hvc_iucv_private structure
 559 *
 560 * This routine severs an existing IUCV communication path and hangs
 561 * up the underlying HVC terminal device.
 562 * The hang-up occurs only if an IUCV communication path is established;
 563 * otherwise there is no need to hang up the terminal device.
 564 *
 565 * The IUCV HVC hang-up is separated into two steps:
 566 * 1. After the IUCV path has been severed, the iucv_state is set to
 567 *    IUCV_SEVERED.
 568 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
 569 *    IUCV_SEVERED state causes the tty hang-up in the HVC layer.
 570 *
 571 * If the tty has not yet been opened, clean up the hvc_iucv_private
 572 * structure to allow re-connects.
 573 * If the tty has been opened, let get_chars() return -EPIPE to signal
 574 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
 575 * to call get_chars()...
 576 *
 577 * Special notes on hanging up a HVC terminal instantiated as console:
 578 * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
 579 *		2. do_tty_hangup() calls tty->ops->close() for console_filp
 580 *			=> no hangup notifier is called by HVC (default)
 581 *		2. hvc_close() returns because of tty_hung_up_p(filp)
 582 *			=> no delete notifier is called!
 583 * Finally, the back-end is not being notified, thus, the tty session is
 584 * kept active (TTY_OPEN) to be ready for re-connects.
 585 *
 586 * Locking:	spin_lock(&priv->lock) w/o disabling bh
 587 */
 588static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
 589{
 590	struct iucv_path *path;
 591
 592	path = NULL;
 593	spin_lock(&priv->lock);
 594	if (priv->iucv_state == IUCV_CONNECTED) {
 595		path = priv->path;
 596		priv->path = NULL;
 597		priv->iucv_state = IUCV_SEVERED;
 598		if (priv->tty_state == TTY_CLOSED)
 599			hvc_iucv_cleanup(priv);
 600		else
 601			/* console is special (see above) */
 602			if (priv->is_console) {
 603				hvc_iucv_cleanup(priv);
 604				priv->tty_state = TTY_OPENED;
 605			} else
 606				hvc_kick();
 607	}
 608	spin_unlock(&priv->lock);
 609
 610	/* finally sever path (outside of priv->lock due to lock ordering) */
 611	if (path) {
 612		iucv_path_sever(path, NULL);
 613		iucv_path_free(path);
 614	}
 615}
 616
 617/**
 618 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
 619 * @hp:		Pointer to the HVC device (struct hvc_struct)
 620 * @id:		Additional data (originally passed to hvc_alloc):
 621 *		the index of an struct hvc_iucv_private instance.
 622 *
 623 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
 624 * virtual or otherwise) has occurred.
 625 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
 626 * to keep an existing IUCV communication path established.
 627 * (Background: vhangup() is called from user space (by getty or login) to
 628 *		disable writing to the tty by other applications).
 629 * If the tty has been opened and an established IUCV path has been severed
 630 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
 631 *
 632 * Locking:	struct hvc_iucv_private->lock
 633 */
 634static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
 635{
 636	struct hvc_iucv_private *priv;
 637
 638	priv = hvc_iucv_get_private(id);
 639	if (!priv)
 640		return;
 641
 642	flush_sndbuf_sync(priv);
 643
 644	spin_lock_bh(&priv->lock);
 645	/* NOTE: If the hangup was scheduled by ourself (from the iucv
 646	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up
 647	 *	 our structure and to set state to TTY_CLOSED.
 648	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we
 649	 *	 ignore this hangup and keep an established IUCV path open...
 650	 *	 (...the reason is that we are not able to connect back to the
 651	 *	 client if we disconnect on hang up) */
 652	priv->tty_state = TTY_CLOSED;
 653
 654	if (priv->iucv_state == IUCV_SEVERED)
 655		hvc_iucv_cleanup(priv);
 656	spin_unlock_bh(&priv->lock);
 657}
 658
 659/**
 660 * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
 661 * @hp:		Pointer the HVC device (struct hvc_struct)
 662 * @raise:	Non-zero to raise or zero to lower DTR/RTS lines
 663 *
 664 * This routine notifies the HVC back-end to raise or lower DTR/RTS
 665 * lines.  Raising DTR/RTS is ignored.  Lowering DTR/RTS indicates to
 666 * drop the IUCV connection (similar to hang up the modem).
 667 */
 668static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
 669{
 670	struct hvc_iucv_private *priv;
 671	struct iucv_path        *path;
 672
 673	/* Raising the DTR/RTS is ignored as IUCV connections can be
 674	 * established at any times.
 675	 */
 676	if (raise)
 677		return;
 678
 679	priv = hvc_iucv_get_private(hp->vtermno);
 680	if (!priv)
 681		return;
 682
 683	/* Lowering the DTR/RTS lines disconnects an established IUCV
 684	 * connection.
 685	 */
 686	flush_sndbuf_sync(priv);
 687
 688	spin_lock_bh(&priv->lock);
 689	path = priv->path;		/* save reference to IUCV path */
 690	priv->path = NULL;
 691	priv->iucv_state = IUCV_DISCONN;
 692	spin_unlock_bh(&priv->lock);
 693
 694	/* Sever IUCV path outside of priv->lock due to lock ordering of:
 695	 * priv->lock <--> iucv_table_lock */
 696	if (path) {
 697		iucv_path_sever(path, NULL);
 698		iucv_path_free(path);
 699	}
 700}
 701
 702/**
 703 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
 704 * @hp:		Pointer to the HVC device (struct hvc_struct)
 705 * @id:		Additional data (originally passed to hvc_alloc):
 706 *		the index of an struct hvc_iucv_private instance.
 707 *
 708 * This routine notifies the HVC back-end that the last tty device fd has been
 709 * closed.  The function cleans up tty resources.  The clean-up of the IUCV
 710 * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
 711 * control setting.
 712 *
 713 * Locking:	struct hvc_iucv_private->lock
 714 */
 715static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
 716{
 717	struct hvc_iucv_private *priv;
 
 718
 719	priv = hvc_iucv_get_private(id);
 720	if (!priv)
 721		return;
 722
 723	flush_sndbuf_sync(priv);
 724
 725	spin_lock_bh(&priv->lock);
 726	destroy_tty_buffer_list(&priv->tty_outqueue);
 727	destroy_tty_buffer_list(&priv->tty_inqueue);
 728	priv->tty_state = TTY_CLOSED;
 729	priv->sndbuf_len = 0;
 730	spin_unlock_bh(&priv->lock);
 
 
 
 
 
 
 
 731}
 732
 733/**
 734 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
 735 * @ipvmid:	Originating z/VM user ID (right padded with blanks)
 736 *
 737 * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
 738 * non-zero.
 739 */
 740static int hvc_iucv_filter_connreq(u8 ipvmid[8])
 741{
 742	size_t i;
 743
 744	/* Note: default policy is ACCEPT if no filter is set */
 745	if (!hvc_iucv_filter_size)
 746		return 0;
 747
 748	for (i = 0; i < hvc_iucv_filter_size; i++)
 749		if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
 750			return 0;
 751	return 1;
 752}
 753
 754/**
 755 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
 756 * @path:	Pending path (struct iucv_path)
 757 * @ipvmid:	z/VM system identifier of originator
 758 * @ipuser:	User specified data for this path
 759 *		(AF_IUCV: port/service name and originator port)
 760 *
 761 * The function uses the @ipuser data to determine if the pending path belongs
 762 * to a terminal managed by this device driver.
 763 * If the path belongs to this driver, ensure that the terminal is not accessed
 764 * multiple times (only one connection to a terminal is allowed).
 765 * If the terminal is not yet connected, the pending path is accepted and is
 766 * associated to the appropriate struct hvc_iucv_private instance.
 767 *
 768 * Returns 0 if @path belongs to a terminal managed by the this device driver;
 769 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
 770 *
 771 * Locking:	struct hvc_iucv_private->lock
 772 */
 773static	int hvc_iucv_path_pending(struct iucv_path *path,
 774				  u8 ipvmid[8], u8 ipuser[16])
 775{
 776	struct hvc_iucv_private *priv, *tmp;
 777	u8 wildcard[9] = "lnxhvc  ";
 778	int i, rc, find_unused;
 779	u8 nuser_data[16];
 780	u8 vm_user_id[9];
 
 781
 782	ASCEBC(wildcard, sizeof(wildcard));
 783	find_unused = !memcmp(wildcard, ipuser, 8);
 784
 785	/* First, check if the pending path request is managed by this
 786	 * IUCV handler:
 787	 * - find a disconnected device if ipuser contains the wildcard
 788	 * - find the device that matches the terminal ID in ipuser
 789	 */
 790	priv = NULL;
 791	for (i = 0; i < hvc_iucv_devices; i++) {
 792		tmp = hvc_iucv_table[i];
 793		if (!tmp)
 794			continue;
 795
 796		if (find_unused) {
 797			spin_lock(&tmp->lock);
 798			if (tmp->iucv_state == IUCV_DISCONN)
 799				priv = tmp;
 800			spin_unlock(&tmp->lock);
 801
 802		} else if (!memcmp(tmp->srv_name, ipuser, 8))
 803				priv = tmp;
 804		if (priv)
 805			break;
 806	}
 807	if (!priv)
 808		return -ENODEV;
 809
 810	/* Enforce that ipvmid is allowed to connect to us */
 811	read_lock(&hvc_iucv_filter_lock);
 812	rc = hvc_iucv_filter_connreq(ipvmid);
 813	read_unlock(&hvc_iucv_filter_lock);
 814	if (rc) {
 815		iucv_path_sever(path, ipuser);
 816		iucv_path_free(path);
 817		memcpy(vm_user_id, ipvmid, 8);
 818		vm_user_id[8] = 0;
 819		pr_info("A connection request from z/VM user ID %s "
 820			"was refused\n", vm_user_id);
 821		return 0;
 822	}
 823
 824	spin_lock(&priv->lock);
 825
 826	/* If the terminal is already connected or being severed, then sever
 827	 * this path to enforce that there is only ONE established communication
 828	 * path per terminal. */
 829	if (priv->iucv_state != IUCV_DISCONN) {
 830		iucv_path_sever(path, ipuser);
 831		iucv_path_free(path);
 832		goto out_path_handled;
 833	}
 834
 835	/* accept path */
 836	memcpy(nuser_data, ipuser + 8, 8);  /* remote service (for af_iucv) */
 837	memcpy(nuser_data + 8, ipuser, 8);  /* local service  (for af_iucv) */
 838	path->msglim = 0xffff;		    /* IUCV MSGLIMIT */
 839	path->flags &= ~IUCV_IPRMDATA;	    /* TODO: use IUCV_IPRMDATA */
 840	rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
 841	if (rc) {
 842		iucv_path_sever(path, ipuser);
 843		iucv_path_free(path);
 844		goto out_path_handled;
 845	}
 846	priv->path = path;
 847	priv->iucv_state = IUCV_CONNECTED;
 848
 849	/* store path information */
 850	memcpy(priv->info_path, ipvmid, 8);
 851	memcpy(priv->info_path + 8, ipuser + 8, 8);
 852
 853	/* flush buffered output data... */
 854	schedule_delayed_work(&priv->sndbuf_work, 5);
 855
 856out_path_handled:
 857	spin_unlock(&priv->lock);
 858	return 0;
 859}
 860
 861/**
 862 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
 863 * @path:	Pending path (struct iucv_path)
 864 * @ipuser:	User specified data for this path
 865 *		(AF_IUCV: port/service name and originator port)
 866 *
 867 * This function calls the hvc_iucv_hangup() function for the
 868 * respective IUCV HVC terminal.
 869 *
 870 * Locking:	struct hvc_iucv_private->lock
 871 */
 872static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
 873{
 874	struct hvc_iucv_private *priv = path->private;
 875
 876	hvc_iucv_hangup(priv);
 877}
 878
 879/**
 880 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
 881 * @path:	Pending path (struct iucv_path)
 882 * @msg:	Pointer to the IUCV message
 883 *
 884 * The function puts an incoming message on the input queue for later
 885 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
 886 * If the tty has not yet been opened, the message is rejected.
 887 *
 888 * Locking:	struct hvc_iucv_private->lock
 889 */
 890static void hvc_iucv_msg_pending(struct iucv_path *path,
 891				 struct iucv_message *msg)
 892{
 893	struct hvc_iucv_private *priv = path->private;
 894	struct iucv_tty_buffer *rb;
 895
 896	/* reject messages that exceed max size of iucv_tty_msg->datalen */
 897	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
 898		iucv_message_reject(path, msg);
 899		return;
 900	}
 901
 902	spin_lock(&priv->lock);
 903
 904	/* reject messages if tty has not yet been opened */
 905	if (priv->tty_state == TTY_CLOSED) {
 906		iucv_message_reject(path, msg);
 907		goto unlock_return;
 908	}
 909
 910	/* allocate tty buffer to save iucv msg only */
 911	rb = alloc_tty_buffer(0, GFP_ATOMIC);
 912	if (!rb) {
 913		iucv_message_reject(path, msg);
 914		goto unlock_return;	/* -ENOMEM */
 915	}
 916	rb->msg = *msg;
 917
 918	list_add_tail(&rb->list, &priv->tty_inqueue);
 919
 920	hvc_kick();	/* wake up hvc thread */
 921
 922unlock_return:
 923	spin_unlock(&priv->lock);
 924}
 925
 926/**
 927 * hvc_iucv_msg_complete() - IUCV handler to process message completion
 928 * @path:	Pending path (struct iucv_path)
 929 * @msg:	Pointer to the IUCV message
 930 *
 931 * The function is called upon completion of message delivery to remove the
 932 * message from the outqueue. Additional delivery information can be found
 933 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
 934 *	       purged messages	 (0x010000 (IPADPGNR)).
 935 *
 936 * Locking:	struct hvc_iucv_private->lock
 937 */
 938static void hvc_iucv_msg_complete(struct iucv_path *path,
 939				  struct iucv_message *msg)
 940{
 941	struct hvc_iucv_private *priv = path->private;
 942	struct iucv_tty_buffer	*ent, *next;
 943	LIST_HEAD(list_remove);
 944
 945	spin_lock(&priv->lock);
 946	list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
 947		if (ent->msg.id == msg->id) {
 948			list_move(&ent->list, &list_remove);
 949			break;
 950		}
 951	wake_up(&priv->sndbuf_waitq);
 952	spin_unlock(&priv->lock);
 953	destroy_tty_buffer_list(&list_remove);
 954}
 955
 956/**
 957 * hvc_iucv_pm_freeze() - Freeze PM callback
 958 * @dev:	IUVC HVC terminal device
 959 *
 960 * Sever an established IUCV communication path and
 961 * trigger a hang-up of the underlying HVC terminal.
 962 */
 963static int hvc_iucv_pm_freeze(struct device *dev)
 964{
 965	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 966
 967	local_bh_disable();
 968	hvc_iucv_hangup(priv);
 969	local_bh_enable();
 970
 971	return 0;
 972}
 973
 974/**
 975 * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
 976 * @dev:	IUVC HVC terminal device
 977 *
 978 * Wake up the HVC thread to trigger hang-up and respective
 979 * HVC back-end notifier invocations.
 980 */
 981static int hvc_iucv_pm_restore_thaw(struct device *dev)
 982{
 983	hvc_kick();
 984	return 0;
 985}
 986
 987static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
 988					struct device_attribute *attr,
 989					char *buf)
 990{
 991	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 992	size_t len;
 993
 994	len = sizeof(priv->srv_name);
 995	memcpy(buf, priv->srv_name, len);
 996	EBCASC(buf, len);
 997	buf[len++] = '\n';
 998	return len;
 999}
1000
1001static ssize_t hvc_iucv_dev_state_show(struct device *dev,
1002					struct device_attribute *attr,
1003					char *buf)
1004{
1005	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1006	return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
1007}
1008
1009static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
1010				      struct device_attribute *attr,
1011				      char *buf)
1012{
1013	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1014	char vmid[9], ipuser[9];
1015
1016	memset(vmid, 0, sizeof(vmid));
1017	memset(ipuser, 0, sizeof(ipuser));
1018
1019	spin_lock_bh(&priv->lock);
1020	if (priv->iucv_state == IUCV_CONNECTED) {
1021		memcpy(vmid, priv->info_path, 8);
1022		memcpy(ipuser, priv->info_path + 8, 8);
1023	}
1024	spin_unlock_bh(&priv->lock);
1025	EBCASC(ipuser, 8);
1026
1027	return sprintf(buf, "%s:%s\n", vmid, ipuser);
1028}
1029
1030
1031/* HVC operations */
1032static const struct hv_ops hvc_iucv_ops = {
1033	.get_chars = hvc_iucv_get_chars,
1034	.put_chars = hvc_iucv_put_chars,
1035	.notifier_add = hvc_iucv_notifier_add,
1036	.notifier_del = hvc_iucv_notifier_del,
1037	.notifier_hangup = hvc_iucv_notifier_hangup,
1038	.dtr_rts = hvc_iucv_dtr_rts,
1039};
1040
1041/* Suspend / resume device operations */
1042static const struct dev_pm_ops hvc_iucv_pm_ops = {
1043	.freeze	  = hvc_iucv_pm_freeze,
1044	.thaw	  = hvc_iucv_pm_restore_thaw,
1045	.restore  = hvc_iucv_pm_restore_thaw,
1046};
1047
1048/* IUCV HVC device driver */
1049static struct device_driver hvc_iucv_driver = {
1050	.name = KMSG_COMPONENT,
1051	.bus  = &iucv_bus,
1052	.pm   = &hvc_iucv_pm_ops,
1053};
1054
1055/* IUCV HVC device attributes */
1056static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1057static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1058static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1059static struct attribute *hvc_iucv_dev_attrs[] = {
1060	&dev_attr_termid.attr,
1061	&dev_attr_state.attr,
1062	&dev_attr_peer.attr,
1063	NULL,
1064};
1065static struct attribute_group hvc_iucv_dev_attr_group = {
1066	.attrs = hvc_iucv_dev_attrs,
1067};
1068static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1069	&hvc_iucv_dev_attr_group,
1070	NULL,
1071};
1072
1073
1074/**
1075 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
1076 * @id:			hvc_iucv_table index
1077 * @is_console:		Flag if the instance is used as Linux console
1078 *
1079 * This function allocates a new hvc_iucv_private structure and stores
1080 * the instance in hvc_iucv_table at index @id.
1081 * Returns 0 on success; otherwise non-zero.
1082 */
1083static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1084{
1085	struct hvc_iucv_private *priv;
1086	char name[9];
1087	int rc;
1088
1089	priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1090	if (!priv)
1091		return -ENOMEM;
1092
1093	spin_lock_init(&priv->lock);
1094	INIT_LIST_HEAD(&priv->tty_outqueue);
1095	INIT_LIST_HEAD(&priv->tty_inqueue);
1096	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1097	init_waitqueue_head(&priv->sndbuf_waitq);
1098
1099	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1100	if (!priv->sndbuf) {
1101		kfree(priv);
1102		return -ENOMEM;
1103	}
1104
1105	/* set console flag */
1106	priv->is_console = is_console;
1107
1108	/* allocate hvc device */
1109	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*		  PAGE_SIZE */
1110			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1111	if (IS_ERR(priv->hvc)) {
1112		rc = PTR_ERR(priv->hvc);
1113		goto out_error_hvc;
1114	}
1115
1116	/* notify HVC thread instead of using polling */
1117	priv->hvc->irq_requested = 1;
1118
1119	/* setup iucv related information */
1120	snprintf(name, 9, "lnxhvc%-2d", id);
1121	memcpy(priv->srv_name, name, 8);
1122	ASCEBC(priv->srv_name, 8);
1123
1124	/* create and setup device */
1125	priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1126	if (!priv->dev) {
1127		rc = -ENOMEM;
1128		goto out_error_dev;
1129	}
1130	dev_set_name(priv->dev, "hvc_iucv%d", id);
1131	dev_set_drvdata(priv->dev, priv);
1132	priv->dev->bus = &iucv_bus;
1133	priv->dev->parent = iucv_root;
1134	priv->dev->driver = &hvc_iucv_driver;
1135	priv->dev->groups = hvc_iucv_dev_attr_groups;
1136	priv->dev->release = (void (*)(struct device *)) kfree;
1137	rc = device_register(priv->dev);
1138	if (rc) {
1139		put_device(priv->dev);
1140		goto out_error_dev;
1141	}
1142
1143	hvc_iucv_table[id] = priv;
1144	return 0;
1145
1146out_error_dev:
1147	hvc_remove(priv->hvc);
1148out_error_hvc:
1149	free_page((unsigned long) priv->sndbuf);
1150	kfree(priv);
1151
1152	return rc;
1153}
1154
1155/**
1156 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1157 */
1158static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1159{
1160	hvc_remove(priv->hvc);
1161	device_unregister(priv->dev);
1162	free_page((unsigned long) priv->sndbuf);
1163	kfree(priv);
1164}
1165
1166/**
1167 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1168 * @filter:	String containing a comma-separated list of z/VM user IDs
1169 */
1170static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1171{
1172	const char *nextdelim, *residual;
1173	size_t len;
1174
1175	nextdelim = strchr(filter, ',');
1176	if (nextdelim) {
1177		len = nextdelim - filter;
1178		residual = nextdelim + 1;
1179	} else {
1180		len = strlen(filter);
1181		residual = filter + len;
1182	}
1183
1184	if (len == 0)
1185		return ERR_PTR(-EINVAL);
1186
1187	/* check for '\n' (if called from sysfs) */
1188	if (filter[len - 1] == '\n')
1189		len--;
1190
1191	if (len > 8)
1192		return ERR_PTR(-EINVAL);
1193
1194	/* pad with blanks and save upper case version of user ID */
1195	memset(dest, ' ', 8);
1196	while (len--)
1197		dest[len] = toupper(filter[len]);
1198	return residual;
1199}
1200
1201/**
1202 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1203 * @filter:	String consisting of a comma-separated list of z/VM user IDs
1204 *
1205 * The function parses the @filter string and creates an array containing
1206 * the list of z/VM user ID filter entries.
1207 * Return code 0 means success, -EINVAL if the filter is syntactically
1208 * incorrect, -ENOMEM if there was not enough memory to allocate the
1209 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1210 */
1211static int hvc_iucv_setup_filter(const char *val)
1212{
1213	const char *residual;
1214	int err;
1215	size_t size, count;
1216	void *array, *old_filter;
1217
1218	count = strlen(val);
1219	if (count == 0 || (count == 1 && val[0] == '\n')) {
1220		size  = 0;
1221		array = NULL;
1222		goto out_replace_filter;	/* clear filter */
1223	}
1224
1225	/* count user IDs in order to allocate sufficient memory */
1226	size = 1;
1227	residual = val;
1228	while ((residual = strchr(residual, ',')) != NULL) {
1229		residual++;
1230		size++;
1231	}
1232
1233	/* check if the specified list exceeds the filter limit */
1234	if (size > MAX_VMID_FILTER)
1235		return -ENOSPC;
1236
1237	array = kzalloc(size * 8, GFP_KERNEL);
1238	if (!array)
1239		return -ENOMEM;
1240
1241	count = size;
1242	residual = val;
1243	while (*residual && count) {
1244		residual = hvc_iucv_parse_filter(residual,
1245						 array + ((size - count) * 8));
1246		if (IS_ERR(residual)) {
1247			err = PTR_ERR(residual);
1248			kfree(array);
1249			goto out_err;
1250		}
1251		count--;
1252	}
1253
1254out_replace_filter:
1255	write_lock_bh(&hvc_iucv_filter_lock);
1256	old_filter = hvc_iucv_filter;
1257	hvc_iucv_filter_size = size;
1258	hvc_iucv_filter = array;
1259	write_unlock_bh(&hvc_iucv_filter_lock);
1260	kfree(old_filter);
1261
1262	err = 0;
1263out_err:
1264	return err;
1265}
1266
1267/**
1268 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1269 * @val:	String consisting of a comma-separated list of z/VM user IDs
1270 * @kp:		Kernel parameter pointing to hvc_iucv_filter array
1271 *
1272 * The function sets up the z/VM user ID filter specified as comma-separated
1273 * list of user IDs in @val.
1274 * Note: If it is called early in the boot process, @val is stored and
1275 *	 parsed later in hvc_iucv_init().
1276 */
1277static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1278{
1279	int rc;
1280
1281	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1282		return -ENODEV;
1283
1284	if (!val)
1285		return -EINVAL;
1286
1287	rc = 0;
1288	if (slab_is_available())
1289		rc = hvc_iucv_setup_filter(val);
1290	else
1291		hvc_iucv_filter_string = val;	/* defer... */
1292	return rc;
1293}
1294
1295/**
1296 * param_get_vmidfilter() - Get z/VM user ID filter
1297 * @buffer:	Buffer to store z/VM user ID filter,
1298 *		(buffer size assumption PAGE_SIZE)
1299 * @kp:		Kernel parameter pointing to the hvc_iucv_filter array
1300 *
1301 * The function stores the filter as a comma-separated list of z/VM user IDs
1302 * in @buffer. Typically, sysfs routines call this function for attr show.
1303 */
1304static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1305{
1306	int rc;
1307	size_t index, len;
1308	void *start, *end;
1309
1310	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1311		return -ENODEV;
1312
1313	rc = 0;
1314	read_lock_bh(&hvc_iucv_filter_lock);
1315	for (index = 0; index < hvc_iucv_filter_size; index++) {
1316		start = hvc_iucv_filter + (8 * index);
1317		end   = memchr(start, ' ', 8);
1318		len   = (end) ? end - start : 8;
1319		memcpy(buffer + rc, start, len);
1320		rc += len;
1321		buffer[rc++] = ',';
1322	}
1323	read_unlock_bh(&hvc_iucv_filter_lock);
1324	if (rc)
1325		buffer[--rc] = '\0';	/* replace last comma and update rc */
1326	return rc;
1327}
1328
1329#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1330
1331static struct kernel_param_ops param_ops_vmidfilter = {
1332	.set = param_set_vmidfilter,
1333	.get = param_get_vmidfilter,
1334};
1335
1336/**
1337 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1338 */
1339static int __init hvc_iucv_init(void)
1340{
1341	int rc;
1342	unsigned int i;
1343
1344	if (!hvc_iucv_devices)
1345		return -ENODEV;
1346
1347	if (!MACHINE_IS_VM) {
1348		pr_notice("The z/VM IUCV HVC device driver cannot "
1349			   "be used without z/VM\n");
1350		rc = -ENODEV;
1351		goto out_error;
1352	}
1353
1354	if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1355		pr_err("%lu is not a valid value for the hvc_iucv= "
1356			"kernel parameter\n", hvc_iucv_devices);
1357		rc = -EINVAL;
1358		goto out_error;
1359	}
1360
1361	/* register IUCV HVC device driver */
1362	rc = driver_register(&hvc_iucv_driver);
1363	if (rc)
1364		goto out_error;
1365
1366	/* parse hvc_iucv_allow string and create z/VM user ID filter list */
1367	if (hvc_iucv_filter_string) {
1368		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1369		switch (rc) {
1370		case 0:
1371			break;
1372		case -ENOMEM:
1373			pr_err("Allocating memory failed with "
1374				"reason code=%d\n", 3);
1375			goto out_error;
1376		case -EINVAL:
1377			pr_err("hvc_iucv_allow= does not specify a valid "
1378				"z/VM user ID list\n");
1379			goto out_error;
1380		case -ENOSPC:
1381			pr_err("hvc_iucv_allow= specifies too many "
1382				"z/VM user IDs\n");
1383			goto out_error;
1384		default:
1385			goto out_error;
1386		}
1387	}
1388
1389	hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1390					   sizeof(struct iucv_tty_buffer),
1391					   0, 0, NULL);
1392	if (!hvc_iucv_buffer_cache) {
1393		pr_err("Allocating memory failed with reason code=%d\n", 1);
1394		rc = -ENOMEM;
1395		goto out_error;
1396	}
1397
1398	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1399						    hvc_iucv_buffer_cache);
1400	if (!hvc_iucv_mempool) {
1401		pr_err("Allocating memory failed with reason code=%d\n", 2);
1402		kmem_cache_destroy(hvc_iucv_buffer_cache);
1403		rc = -ENOMEM;
1404		goto out_error;
1405	}
1406
1407	/* register the first terminal device as console
1408	 * (must be done before allocating hvc terminal devices) */
1409	rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1410	if (rc) {
1411		pr_err("Registering HVC terminal device as "
1412		       "Linux console failed\n");
1413		goto out_error_memory;
1414	}
1415
1416	/* allocate hvc_iucv_private structs */
1417	for (i = 0; i < hvc_iucv_devices; i++) {
1418		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1419		if (rc) {
1420			pr_err("Creating a new HVC terminal device "
1421				"failed with error code=%d\n", rc);
1422			goto out_error_hvc;
1423		}
1424	}
1425
1426	/* register IUCV callback handler */
1427	rc = iucv_register(&hvc_iucv_handler, 0);
1428	if (rc) {
1429		pr_err("Registering IUCV handlers failed with error code=%d\n",
1430			rc);
1431		goto out_error_hvc;
1432	}
1433
1434	return 0;
1435
1436out_error_hvc:
1437	for (i = 0; i < hvc_iucv_devices; i++)
1438		if (hvc_iucv_table[i])
1439			hvc_iucv_destroy(hvc_iucv_table[i]);
1440out_error_memory:
1441	mempool_destroy(hvc_iucv_mempool);
1442	kmem_cache_destroy(hvc_iucv_buffer_cache);
1443out_error:
1444	kfree(hvc_iucv_filter);
 
1445	hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1446	return rc;
1447}
1448
1449/**
1450 * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
1451 * @val:	Parameter value (numeric)
1452 */
1453static	int __init hvc_iucv_config(char *val)
1454{
1455	 return kstrtoul(val, 10, &hvc_iucv_devices);
1456}
1457
1458
1459device_initcall(hvc_iucv_init);
1460__setup("hvc_iucv=", hvc_iucv_config);
1461core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);