Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * z/VM IUCV hypervisor console (HVC) device driver
   4 *
   5 * This HVC device driver provides terminal access using
   6 * z/VM IUCV communication paths.
   7 *
   8 * Copyright IBM Corp. 2008, 2013
   9 *
  10 * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  11 */
  12#define KMSG_COMPONENT		"hvc_iucv"
  13#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
  14
  15#include <linux/types.h>
  16#include <linux/slab.h>
  17#include <asm/ebcdic.h>
  18#include <linux/ctype.h>
  19#include <linux/delay.h>
  20#include <linux/device.h>
  21#include <linux/init.h>
  22#include <linux/mempool.h>
  23#include <linux/moduleparam.h>
  24#include <linux/tty.h>
  25#include <linux/wait.h>
  26#include <net/iucv/iucv.h>
  27
  28#include "hvc_console.h"
  29
  30
  31/* General device driver settings */
  32#define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS
  33#define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  34
  35/* IUCV TTY message  */
  36#define MSG_VERSION		0x02	/* Message version */
  37#define MSG_TYPE_ERROR		0x01	/* Error message */
  38#define MSG_TYPE_TERMENV	0x02	/* Terminal environment variable */
  39#define MSG_TYPE_TERMIOS	0x04	/* Terminal IO struct update */
  40#define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */
  41#define MSG_TYPE_DATA		0x10	/* Terminal data */
  42
  43struct iucv_tty_msg {
  44	u8	version;		/* Message version */
  45	u8	type;			/* Message type */
  46#define MSG_MAX_DATALEN		((u16)(~0))
  47	u16	datalen;		/* Payload length */
  48	u8	data[];			/* Payload buffer */
  49} __attribute__((packed));
  50#define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
  51
  52enum iucv_state_t {
  53	IUCV_DISCONN	= 0,
  54	IUCV_CONNECTED	= 1,
  55	IUCV_SEVERED	= 2,
  56};
  57
  58enum tty_state_t {
  59	TTY_CLOSED	= 0,
  60	TTY_OPENED	= 1,
  61};
  62
  63struct hvc_iucv_private {
  64	struct hvc_struct	*hvc;		/* HVC struct reference */
  65	u8			srv_name[8];	/* IUCV service name (ebcdic) */
  66	unsigned char		is_console;	/* Linux console usage flag */
  67	enum iucv_state_t	iucv_state;	/* IUCV connection status */
  68	enum tty_state_t	tty_state;	/* TTY status */
  69	struct iucv_path	*path;		/* IUCV path pointer */
  70	spinlock_t		lock;		/* hvc_iucv_private lock */
  71#define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */
  72	void			*sndbuf;	/* send buffer		  */
  73	size_t			sndbuf_len;	/* length of send buffer  */
  74#define QUEUE_SNDBUF_DELAY	(HZ / 25)
  75	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */
  76	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
  77	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
  78	struct list_head	tty_inqueue;	/* incoming IUCV messages */
  79	struct device		*dev;		/* device structure */
  80	u8			info_path[16];	/* IUCV path info (dev attr) */
  81};
  82
  83struct iucv_tty_buffer {
  84	struct list_head	list;	/* list pointer */
  85	struct iucv_message	msg;	/* store an IUCV message */
  86	size_t			offset;	/* data buffer offset */
  87	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */
  88};
  89
  90/* IUCV callback handler */
  91static	int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
  92static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
  93static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  94static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  95
  96
  97/* Kernel module parameter: use one terminal device as default */
  98static unsigned long hvc_iucv_devices = 1;
  99
 100/* Array of allocated hvc iucv tty lines... */
 101static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
 102#define IUCV_HVC_CON_IDX	(0)
 103/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
 104#define MAX_VMID_FILTER		(500)
 105#define FILTER_WILDCARD_CHAR	'*'
 106static size_t hvc_iucv_filter_size;
 107static void *hvc_iucv_filter;
 108static const char *hvc_iucv_filter_string;
 109static DEFINE_RWLOCK(hvc_iucv_filter_lock);
 110
 111/* Kmem cache and mempool for iucv_tty_buffer elements */
 112static struct kmem_cache *hvc_iucv_buffer_cache;
 113static mempool_t *hvc_iucv_mempool;
 114
 115/* IUCV handler callback functions */
 116static struct iucv_handler hvc_iucv_handler = {
 117	.path_pending  = hvc_iucv_path_pending,
 118	.path_severed  = hvc_iucv_path_severed,
 119	.message_complete = hvc_iucv_msg_complete,
 120	.message_pending  = hvc_iucv_msg_pending,
 121};
 122
 123
 124/**
 125 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
 126 * @num:	The HVC virtual terminal number (vtermno)
 127 *
 128 * This function returns the struct hvc_iucv_private instance that corresponds
 129 * to the HVC virtual terminal number specified as parameter @num.
 130 */
 131static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
 132{
 133	if (num > hvc_iucv_devices)
 134		return NULL;
 135	return hvc_iucv_table[num];
 136}
 137
 138/**
 139 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
 140 * @size:	Size of the internal buffer used to store data.
 141 * @flags:	Memory allocation flags passed to mempool.
 142 *
 143 * This function allocates a new struct iucv_tty_buffer element and, optionally,
 144 * allocates an internal data buffer with the specified size @size.
 145 * The internal data buffer is always allocated with GFP_DMA which is
 146 * required for receiving and sending data with IUCV.
 147 * Note: The total message size arises from the internal buffer size and the
 148 *	 members of the iucv_tty_msg structure.
 149 * The function returns NULL if memory allocation has failed.
 150 */
 151static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
 152{
 153	struct iucv_tty_buffer *bufp;
 154
 155	bufp = mempool_alloc(hvc_iucv_mempool, flags);
 156	if (!bufp)
 157		return NULL;
 158	memset(bufp, 0, sizeof(*bufp));
 159
 160	if (size > 0) {
 161		bufp->msg.length = MSG_SIZE(size);
 162		bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
 163		if (!bufp->mbuf) {
 164			mempool_free(bufp, hvc_iucv_mempool);
 165			return NULL;
 166		}
 167		bufp->mbuf->version = MSG_VERSION;
 168		bufp->mbuf->type    = MSG_TYPE_DATA;
 169		bufp->mbuf->datalen = (u16) size;
 170	}
 171	return bufp;
 172}
 173
 174/**
 175 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
 176 * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
 177 */
 178static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
 179{
 180	kfree(bufp->mbuf);
 181	mempool_free(bufp, hvc_iucv_mempool);
 182}
 183
 184/**
 185 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
 186 * @list:	List containing struct iucv_tty_buffer elements.
 187 */
 188static void destroy_tty_buffer_list(struct list_head *list)
 189{
 190	struct iucv_tty_buffer *ent, *next;
 191
 192	list_for_each_entry_safe(ent, next, list, list) {
 193		list_del(&ent->list);
 194		destroy_tty_buffer(ent);
 195	}
 196}
 197
 198/**
 199 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
 200 * @priv:		Pointer to struct hvc_iucv_private
 201 * @buf:		HVC buffer for writing received terminal data.
 202 * @count:		HVC buffer size.
 203 * @has_more_data:	Pointer to an int variable.
 204 *
 205 * The function picks up pending messages from the input queue and receives
 206 * the message data that is then written to the specified buffer @buf.
 207 * If the buffer size @count is less than the data message size, the
 208 * message is kept on the input queue and @has_more_data is set to 1.
 209 * If all message data has been written, the message is removed from
 210 * the input queue.
 211 *
 212 * The function returns the number of bytes written to the terminal, zero if
 213 * there are no pending data messages available or if there is no established
 214 * IUCV path.
 215 * If the IUCV path has been severed, then -EPIPE is returned to cause a
 216 * hang up (that is issued by the HVC layer).
 217 */
 218static ssize_t hvc_iucv_write(struct hvc_iucv_private *priv,
 219			      u8 *buf, size_t count, int *has_more_data)
 220{
 221	struct iucv_tty_buffer *rb;
 222	ssize_t written;
 223	int rc;
 224
 225	/* immediately return if there is no IUCV connection */
 226	if (priv->iucv_state == IUCV_DISCONN)
 227		return 0;
 228
 229	/* if the IUCV path has been severed, return -EPIPE to inform the
 230	 * HVC layer to hang up the tty device. */
 231	if (priv->iucv_state == IUCV_SEVERED)
 232		return -EPIPE;
 233
 234	/* check if there are pending messages */
 235	if (list_empty(&priv->tty_inqueue))
 236		return 0;
 237
 238	/* receive an iucv message and flip data to the tty (ldisc) */
 239	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
 240
 241	written = 0;
 242	if (!rb->mbuf) { /* message not yet received ... */
 243		/* allocate mem to store msg data; if no memory is available
 244		 * then leave the buffer on the list and re-try later */
 245		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
 246		if (!rb->mbuf)
 247			return -ENOMEM;
 248
 249		rc = __iucv_message_receive(priv->path, &rb->msg, 0,
 250					    rb->mbuf, rb->msg.length, NULL);
 251		switch (rc) {
 252		case 0: /* Successful	    */
 253			break;
 254		case 2:	/* No message found */
 255		case 9: /* Message purged   */
 256			break;
 257		default:
 258			written = -EIO;
 259		}
 260		/* remove buffer if an error has occurred or received data
 261		 * is not correct */
 262		if (rc || (rb->mbuf->version != MSG_VERSION) ||
 263			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen)))
 264			goto out_remove_buffer;
 265	}
 266
 267	switch (rb->mbuf->type) {
 268	case MSG_TYPE_DATA:
 269		written = min_t(int, rb->mbuf->datalen - rb->offset, count);
 270		memcpy(buf, rb->mbuf->data + rb->offset, written);
 271		if (written < (rb->mbuf->datalen - rb->offset)) {
 272			rb->offset += written;
 273			*has_more_data = 1;
 274			goto out_written;
 275		}
 276		break;
 277
 278	case MSG_TYPE_WINSIZE:
 279		if (rb->mbuf->datalen != sizeof(struct winsize))
 280			break;
 281		/* The caller must ensure that the hvc is locked, which
 282		 * is the case when called from hvc_iucv_get_chars() */
 283		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
 284		break;
 285
 286	case MSG_TYPE_ERROR:	/* ignored ... */
 287	case MSG_TYPE_TERMENV:	/* ignored ... */
 288	case MSG_TYPE_TERMIOS:	/* ignored ... */
 289		break;
 290	}
 291
 292out_remove_buffer:
 293	list_del(&rb->list);
 294	destroy_tty_buffer(rb);
 295	*has_more_data = !list_empty(&priv->tty_inqueue);
 296
 297out_written:
 298	return written;
 299}
 300
 301/**
 302 * hvc_iucv_get_chars() - HVC get_chars operation.
 303 * @vtermno:	HVC virtual terminal number.
 304 * @buf:	Pointer to a buffer to store data
 305 * @count:	Size of buffer available for writing
 306 *
 307 * The HVC thread calls this method to read characters from the back-end.
 308 * If an IUCV communication path has been established, pending IUCV messages
 309 * are received and data is copied into buffer @buf up to @count bytes.
 310 *
 311 * Locking:	The routine gets called under an irqsave() spinlock; and
 312 *		the routine locks the struct hvc_iucv_private->lock to call
 313 *		helper functions.
 314 */
 315static ssize_t hvc_iucv_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 316{
 317	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 318	ssize_t written;
 319	int has_more_data;
 320
 321	if (count <= 0)
 322		return 0;
 323
 324	if (!priv)
 325		return -ENODEV;
 326
 327	spin_lock(&priv->lock);
 328	has_more_data = 0;
 329	written = hvc_iucv_write(priv, buf, count, &has_more_data);
 330	spin_unlock(&priv->lock);
 331
 332	/* if there are still messages on the queue... schedule another run */
 333	if (has_more_data)
 334		hvc_kick();
 335
 336	return written;
 337}
 338
 339/**
 340 * hvc_iucv_queue() - Buffer terminal data for sending.
 341 * @priv:	Pointer to struct hvc_iucv_private instance.
 342 * @buf:	Buffer containing data to send.
 343 * @count:	Size of buffer and amount of data to send.
 344 *
 345 * The function queues data for sending. To actually send the buffered data,
 346 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
 347 * The function returns the number of data bytes that has been buffered.
 348 *
 349 * If the device is not connected, data is ignored and the function returns
 350 * @count.
 351 * If the buffer is full, the function returns 0.
 352 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
 353 * (that can be passed to HVC layer to cause a tty hangup).
 354 */
 355static ssize_t hvc_iucv_queue(struct hvc_iucv_private *priv, const u8 *buf,
 356			      size_t count)
 357{
 358	size_t len;
 359
 360	if (priv->iucv_state == IUCV_DISCONN)
 361		return count;			/* ignore data */
 362
 363	if (priv->iucv_state == IUCV_SEVERED)
 364		return -EPIPE;
 365
 366	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
 367	if (!len)
 368		return 0;
 369
 370	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
 371	priv->sndbuf_len += len;
 372
 373	if (priv->iucv_state == IUCV_CONNECTED)
 374		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
 375
 376	return len;
 377}
 378
 379/**
 380 * hvc_iucv_send() - Send an IUCV message containing terminal data.
 381 * @priv:	Pointer to struct hvc_iucv_private instance.
 382 *
 383 * If an IUCV communication path has been established, the buffered output data
 384 * is sent via an IUCV message and the number of bytes sent is returned.
 385 * Returns 0 if there is no established IUCV communication path or
 386 * -EPIPE if an existing IUCV communicaton path has been severed.
 387 */
 388static int hvc_iucv_send(struct hvc_iucv_private *priv)
 389{
 390	struct iucv_tty_buffer *sb;
 391	int rc, len;
 392
 393	if (priv->iucv_state == IUCV_SEVERED)
 394		return -EPIPE;
 395
 396	if (priv->iucv_state == IUCV_DISCONN)
 397		return -EIO;
 398
 399	if (!priv->sndbuf_len)
 400		return 0;
 401
 402	/* allocate internal buffer to store msg data and also compute total
 403	 * message length */
 404	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
 405	if (!sb)
 406		return -ENOMEM;
 407
 408	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
 409	sb->mbuf->datalen = (u16) priv->sndbuf_len;
 410	sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
 411
 412	list_add_tail(&sb->list, &priv->tty_outqueue);
 413
 414	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
 415				 (void *) sb->mbuf, sb->msg.length);
 416	if (rc) {
 417		/* drop the message here; however we might want to handle
 418		 * 0x03 (msg limit reached) by trying again... */
 419		list_del(&sb->list);
 420		destroy_tty_buffer(sb);
 421	}
 422	len = priv->sndbuf_len;
 423	priv->sndbuf_len = 0;
 424
 425	return len;
 426}
 427
 428/**
 429 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
 430 * @work:	Work structure.
 431 *
 432 * This work queue function sends buffered output data over IUCV and,
 433 * if not all buffered data could be sent, reschedules itself.
 434 */
 435static void hvc_iucv_sndbuf_work(struct work_struct *work)
 436{
 437	struct hvc_iucv_private *priv;
 438
 439	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
 440
 441	spin_lock_bh(&priv->lock);
 442	hvc_iucv_send(priv);
 443	spin_unlock_bh(&priv->lock);
 444}
 445
 446/**
 447 * hvc_iucv_put_chars() - HVC put_chars operation.
 448 * @vtermno:	HVC virtual terminal number.
 449 * @buf:	Pointer to an buffer to read data from
 450 * @count:	Size of buffer available for reading
 451 *
 452 * The HVC thread calls this method to write characters to the back-end.
 453 * The function calls hvc_iucv_queue() to queue terminal data for sending.
 454 *
 455 * Locking:	The method gets called under an irqsave() spinlock; and
 456 *		locks struct hvc_iucv_private->lock.
 457 */
 458static ssize_t hvc_iucv_put_chars(uint32_t vtermno, const u8 *buf, size_t count)
 459{
 460	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 461	int queued;
 462
 463	if (!count)
 464		return 0;
 465
 466	if (!priv)
 467		return -ENODEV;
 468
 469	spin_lock(&priv->lock);
 470	queued = hvc_iucv_queue(priv, buf, count);
 471	spin_unlock(&priv->lock);
 472
 473	return queued;
 474}
 475
 476/**
 477 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
 478 * @hp:	Pointer to the HVC device (struct hvc_struct)
 479 * @id:	Additional data (originally passed to hvc_alloc): the index of an struct
 480 *	hvc_iucv_private instance.
 481 *
 482 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
 483 * instance that is derived from @id. Always returns 0.
 484 *
 485 * Locking:	struct hvc_iucv_private->lock, spin_lock_bh
 486 */
 487static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
 488{
 489	struct hvc_iucv_private *priv;
 490
 491	priv = hvc_iucv_get_private(id);
 492	if (!priv)
 493		return 0;
 494
 495	spin_lock_bh(&priv->lock);
 496	priv->tty_state = TTY_OPENED;
 497	spin_unlock_bh(&priv->lock);
 498
 499	return 0;
 500}
 501
 502/**
 503 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
 504 * @priv:	Pointer to the struct hvc_iucv_private instance.
 505 */
 506static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
 507{
 508	destroy_tty_buffer_list(&priv->tty_outqueue);
 509	destroy_tty_buffer_list(&priv->tty_inqueue);
 510
 511	priv->tty_state = TTY_CLOSED;
 512	priv->iucv_state = IUCV_DISCONN;
 513
 514	priv->sndbuf_len = 0;
 515}
 516
 517/**
 518 * tty_outqueue_empty() - Test if the tty outq is empty
 519 * @priv:	Pointer to struct hvc_iucv_private instance.
 520 */
 521static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
 522{
 523	int rc;
 524
 525	spin_lock_bh(&priv->lock);
 526	rc = list_empty(&priv->tty_outqueue);
 527	spin_unlock_bh(&priv->lock);
 528
 529	return rc;
 530}
 531
 532/**
 533 * flush_sndbuf_sync() - Flush send buffer and wait for completion
 534 * @priv:	Pointer to struct hvc_iucv_private instance.
 535 *
 536 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
 537 * to flush any buffered terminal output data and waits for completion.
 538 */
 539static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
 540{
 541	int sync_wait;
 542
 543	cancel_delayed_work_sync(&priv->sndbuf_work);
 544
 545	spin_lock_bh(&priv->lock);
 546	hvc_iucv_send(priv);		/* force sending buffered data */
 547	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
 548	spin_unlock_bh(&priv->lock);
 549
 550	if (sync_wait)
 551		wait_event_timeout(priv->sndbuf_waitq,
 552				   tty_outqueue_empty(priv), HZ/10);
 553}
 554
 555/**
 556 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
 557 * @priv:	Pointer to hvc_iucv_private structure
 558 *
 559 * This routine severs an existing IUCV communication path and hangs
 560 * up the underlying HVC terminal device.
 561 * The hang-up occurs only if an IUCV communication path is established;
 562 * otherwise there is no need to hang up the terminal device.
 563 *
 564 * The IUCV HVC hang-up is separated into two steps:
 565 * 1. After the IUCV path has been severed, the iucv_state is set to
 566 *    IUCV_SEVERED.
 567 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
 568 *    IUCV_SEVERED state causes the tty hang-up in the HVC layer.
 569 *
 570 * If the tty has not yet been opened, clean up the hvc_iucv_private
 571 * structure to allow re-connects.
 572 * If the tty has been opened, let get_chars() return -EPIPE to signal
 573 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
 574 * to call get_chars()...
 575 *
 576 * Special notes on hanging up a HVC terminal instantiated as console:
 577 * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
 578 *		2. do_tty_hangup() calls tty->ops->close() for console_filp
 579 *			=> no hangup notifier is called by HVC (default)
 580 *		2. hvc_close() returns because of tty_hung_up_p(filp)
 581 *			=> no delete notifier is called!
 582 * Finally, the back-end is not being notified, thus, the tty session is
 583 * kept active (TTY_OPEN) to be ready for re-connects.
 584 *
 585 * Locking:	spin_lock(&priv->lock) w/o disabling bh
 586 */
 587static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
 588{
 589	struct iucv_path *path;
 590
 591	path = NULL;
 592	spin_lock(&priv->lock);
 593	if (priv->iucv_state == IUCV_CONNECTED) {
 594		path = priv->path;
 595		priv->path = NULL;
 596		priv->iucv_state = IUCV_SEVERED;
 597		if (priv->tty_state == TTY_CLOSED)
 598			hvc_iucv_cleanup(priv);
 599		else
 600			/* console is special (see above) */
 601			if (priv->is_console) {
 602				hvc_iucv_cleanup(priv);
 603				priv->tty_state = TTY_OPENED;
 604			} else
 605				hvc_kick();
 606	}
 607	spin_unlock(&priv->lock);
 608
 609	/* finally sever path (outside of priv->lock due to lock ordering) */
 610	if (path) {
 611		iucv_path_sever(path, NULL);
 612		iucv_path_free(path);
 613	}
 614}
 615
 616/**
 617 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
 618 * @hp:		Pointer to the HVC device (struct hvc_struct)
 619 * @id:		Additional data (originally passed to hvc_alloc):
 620 *		the index of an struct hvc_iucv_private instance.
 621 *
 622 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
 623 * virtual or otherwise) has occurred.
 624 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
 625 * to keep an existing IUCV communication path established.
 626 * (Background: vhangup() is called from user space (by getty or login) to
 627 *		disable writing to the tty by other applications).
 628 * If the tty has been opened and an established IUCV path has been severed
 629 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
 630 *
 631 * Locking:	struct hvc_iucv_private->lock
 632 */
 633static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
 634{
 635	struct hvc_iucv_private *priv;
 636
 637	priv = hvc_iucv_get_private(id);
 638	if (!priv)
 639		return;
 640
 641	flush_sndbuf_sync(priv);
 642
 643	spin_lock_bh(&priv->lock);
 644	/* NOTE: If the hangup was scheduled by ourself (from the iucv
 645	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up
 646	 *	 our structure and to set state to TTY_CLOSED.
 647	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we
 648	 *	 ignore this hangup and keep an established IUCV path open...
 649	 *	 (...the reason is that we are not able to connect back to the
 650	 *	 client if we disconnect on hang up) */
 651	priv->tty_state = TTY_CLOSED;
 652
 653	if (priv->iucv_state == IUCV_SEVERED)
 654		hvc_iucv_cleanup(priv);
 655	spin_unlock_bh(&priv->lock);
 656}
 657
 658/**
 659 * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
 660 * @hp:		Pointer the HVC device (struct hvc_struct)
 661 * @active:	True to raise or false to lower DTR/RTS lines
 662 *
 663 * This routine notifies the HVC back-end to raise or lower DTR/RTS
 664 * lines.  Raising DTR/RTS is ignored.  Lowering DTR/RTS indicates to
 665 * drop the IUCV connection (similar to hang up the modem).
 666 */
 667static void hvc_iucv_dtr_rts(struct hvc_struct *hp, bool active)
 668{
 669	struct hvc_iucv_private *priv;
 670	struct iucv_path        *path;
 671
 672	/* Raising the DTR/RTS is ignored as IUCV connections can be
 673	 * established at any times.
 674	 */
 675	if (active)
 676		return;
 677
 678	priv = hvc_iucv_get_private(hp->vtermno);
 679	if (!priv)
 680		return;
 681
 682	/* Lowering the DTR/RTS lines disconnects an established IUCV
 683	 * connection.
 684	 */
 685	flush_sndbuf_sync(priv);
 686
 687	spin_lock_bh(&priv->lock);
 688	path = priv->path;		/* save reference to IUCV path */
 689	priv->path = NULL;
 690	priv->iucv_state = IUCV_DISCONN;
 691	spin_unlock_bh(&priv->lock);
 692
 693	/* Sever IUCV path outside of priv->lock due to lock ordering of:
 694	 * priv->lock <--> iucv_table_lock */
 695	if (path) {
 696		iucv_path_sever(path, NULL);
 697		iucv_path_free(path);
 698	}
 699}
 700
 701/**
 702 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
 703 * @hp:		Pointer to the HVC device (struct hvc_struct)
 704 * @id:		Additional data (originally passed to hvc_alloc):
 705 *		the index of an struct hvc_iucv_private instance.
 706 *
 707 * This routine notifies the HVC back-end that the last tty device fd has been
 708 * closed.  The function cleans up tty resources.  The clean-up of the IUCV
 709 * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
 710 * control setting.
 711 *
 712 * Locking:	struct hvc_iucv_private->lock
 713 */
 714static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
 715{
 716	struct hvc_iucv_private *priv;
 717
 718	priv = hvc_iucv_get_private(id);
 719	if (!priv)
 720		return;
 721
 722	flush_sndbuf_sync(priv);
 723
 724	spin_lock_bh(&priv->lock);
 725	destroy_tty_buffer_list(&priv->tty_outqueue);
 726	destroy_tty_buffer_list(&priv->tty_inqueue);
 727	priv->tty_state = TTY_CLOSED;
 728	priv->sndbuf_len = 0;
 729	spin_unlock_bh(&priv->lock);
 730}
 731
 732/**
 733 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
 734 * @ipvmid:	Originating z/VM user ID (right padded with blanks)
 735 *
 736 * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
 737 * connect, otherwise non-zero.
 738 */
 739static int hvc_iucv_filter_connreq(u8 ipvmid[8])
 740{
 741	const char *wildcard, *filter_entry;
 742	size_t i, len;
 743
 744	/* Note: default policy is ACCEPT if no filter is set */
 745	if (!hvc_iucv_filter_size)
 746		return 0;
 747
 748	for (i = 0; i < hvc_iucv_filter_size; i++) {
 749		filter_entry = hvc_iucv_filter + (8 * i);
 750
 751		/* If a filter entry contains the filter wildcard character,
 752		 * reduce the length to match the leading portion of the user
 753		 * ID only (wildcard match).  Characters following the wildcard
 754		 * are ignored.
 755		 */
 756		wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
 757		len = (wildcard) ? wildcard - filter_entry : 8;
 758		if (0 == memcmp(ipvmid, filter_entry, len))
 759			return 0;
 760	}
 761	return 1;
 762}
 763
 764/**
 765 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
 766 * @path:	Pending path (struct iucv_path)
 767 * @ipvmid:	z/VM system identifier of originator
 768 * @ipuser:	User specified data for this path
 769 *		(AF_IUCV: port/service name and originator port)
 770 *
 771 * The function uses the @ipuser data to determine if the pending path belongs
 772 * to a terminal managed by this device driver.
 773 * If the path belongs to this driver, ensure that the terminal is not accessed
 774 * multiple times (only one connection to a terminal is allowed).
 775 * If the terminal is not yet connected, the pending path is accepted and is
 776 * associated to the appropriate struct hvc_iucv_private instance.
 777 *
 778 * Returns 0 if @path belongs to a terminal managed by the this device driver;
 779 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
 780 *
 781 * Locking:	struct hvc_iucv_private->lock
 782 */
 783static	int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
 784				  u8 *ipuser)
 785{
 786	struct hvc_iucv_private *priv, *tmp;
 787	u8 wildcard[9] = "lnxhvc  ";
 788	int i, rc, find_unused;
 789	u8 nuser_data[16];
 790	u8 vm_user_id[9];
 791
 792	ASCEBC(wildcard, sizeof(wildcard));
 793	find_unused = !memcmp(wildcard, ipuser, 8);
 794
 795	/* First, check if the pending path request is managed by this
 796	 * IUCV handler:
 797	 * - find a disconnected device if ipuser contains the wildcard
 798	 * - find the device that matches the terminal ID in ipuser
 799	 */
 800	priv = NULL;
 801	for (i = 0; i < hvc_iucv_devices; i++) {
 802		tmp = hvc_iucv_table[i];
 803		if (!tmp)
 804			continue;
 805
 806		if (find_unused) {
 807			spin_lock(&tmp->lock);
 808			if (tmp->iucv_state == IUCV_DISCONN)
 809				priv = tmp;
 810			spin_unlock(&tmp->lock);
 811
 812		} else if (!memcmp(tmp->srv_name, ipuser, 8))
 813				priv = tmp;
 814		if (priv)
 815			break;
 816	}
 817	if (!priv)
 818		return -ENODEV;
 819
 820	/* Enforce that ipvmid is allowed to connect to us */
 821	read_lock(&hvc_iucv_filter_lock);
 822	rc = hvc_iucv_filter_connreq(ipvmid);
 823	read_unlock(&hvc_iucv_filter_lock);
 824	if (rc) {
 825		iucv_path_sever(path, ipuser);
 826		iucv_path_free(path);
 827		memcpy(vm_user_id, ipvmid, 8);
 828		vm_user_id[8] = 0;
 829		pr_info("A connection request from z/VM user ID %s "
 830			"was refused\n", vm_user_id);
 831		return 0;
 832	}
 833
 834	spin_lock(&priv->lock);
 835
 836	/* If the terminal is already connected or being severed, then sever
 837	 * this path to enforce that there is only ONE established communication
 838	 * path per terminal. */
 839	if (priv->iucv_state != IUCV_DISCONN) {
 840		iucv_path_sever(path, ipuser);
 841		iucv_path_free(path);
 842		goto out_path_handled;
 843	}
 844
 845	/* accept path */
 846	memcpy(nuser_data, ipuser + 8, 8);  /* remote service (for af_iucv) */
 847	memcpy(nuser_data + 8, ipuser, 8);  /* local service  (for af_iucv) */
 848	path->msglim = 0xffff;		    /* IUCV MSGLIMIT */
 849	path->flags &= ~IUCV_IPRMDATA;	    /* TODO: use IUCV_IPRMDATA */
 850	rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
 851	if (rc) {
 852		iucv_path_sever(path, ipuser);
 853		iucv_path_free(path);
 854		goto out_path_handled;
 855	}
 856	priv->path = path;
 857	priv->iucv_state = IUCV_CONNECTED;
 858
 859	/* store path information */
 860	memcpy(priv->info_path, ipvmid, 8);
 861	memcpy(priv->info_path + 8, ipuser + 8, 8);
 862
 863	/* flush buffered output data... */
 864	schedule_delayed_work(&priv->sndbuf_work, 5);
 865
 866out_path_handled:
 867	spin_unlock(&priv->lock);
 868	return 0;
 869}
 870
 871/**
 872 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
 873 * @path:	Pending path (struct iucv_path)
 874 * @ipuser:	User specified data for this path
 875 *		(AF_IUCV: port/service name and originator port)
 876 *
 877 * This function calls the hvc_iucv_hangup() function for the
 878 * respective IUCV HVC terminal.
 879 *
 880 * Locking:	struct hvc_iucv_private->lock
 881 */
 882static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
 883{
 884	struct hvc_iucv_private *priv = path->private;
 885
 886	hvc_iucv_hangup(priv);
 887}
 888
 889/**
 890 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
 891 * @path:	Pending path (struct iucv_path)
 892 * @msg:	Pointer to the IUCV message
 893 *
 894 * The function puts an incoming message on the input queue for later
 895 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
 896 * If the tty has not yet been opened, the message is rejected.
 897 *
 898 * Locking:	struct hvc_iucv_private->lock
 899 */
 900static void hvc_iucv_msg_pending(struct iucv_path *path,
 901				 struct iucv_message *msg)
 902{
 903	struct hvc_iucv_private *priv = path->private;
 904	struct iucv_tty_buffer *rb;
 905
 906	/* reject messages that exceed max size of iucv_tty_msg->datalen */
 907	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
 908		iucv_message_reject(path, msg);
 909		return;
 910	}
 911
 912	spin_lock(&priv->lock);
 913
 914	/* reject messages if tty has not yet been opened */
 915	if (priv->tty_state == TTY_CLOSED) {
 916		iucv_message_reject(path, msg);
 917		goto unlock_return;
 918	}
 919
 920	/* allocate tty buffer to save iucv msg only */
 921	rb = alloc_tty_buffer(0, GFP_ATOMIC);
 922	if (!rb) {
 923		iucv_message_reject(path, msg);
 924		goto unlock_return;	/* -ENOMEM */
 925	}
 926	rb->msg = *msg;
 927
 928	list_add_tail(&rb->list, &priv->tty_inqueue);
 929
 930	hvc_kick();	/* wake up hvc thread */
 931
 932unlock_return:
 933	spin_unlock(&priv->lock);
 934}
 935
 936/**
 937 * hvc_iucv_msg_complete() - IUCV handler to process message completion
 938 * @path:	Pending path (struct iucv_path)
 939 * @msg:	Pointer to the IUCV message
 940 *
 941 * The function is called upon completion of message delivery to remove the
 942 * message from the outqueue. Additional delivery information can be found
 943 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
 944 *	       purged messages	 (0x010000 (IPADPGNR)).
 945 *
 946 * Locking:	struct hvc_iucv_private->lock
 947 */
 948static void hvc_iucv_msg_complete(struct iucv_path *path,
 949				  struct iucv_message *msg)
 950{
 951	struct hvc_iucv_private *priv = path->private;
 952	struct iucv_tty_buffer	*ent, *next;
 953	LIST_HEAD(list_remove);
 954
 955	spin_lock(&priv->lock);
 956	list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
 957		if (ent->msg.id == msg->id) {
 958			list_move(&ent->list, &list_remove);
 959			break;
 960		}
 961	wake_up(&priv->sndbuf_waitq);
 962	spin_unlock(&priv->lock);
 963	destroy_tty_buffer_list(&list_remove);
 964}
 965
 966static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
 967					struct device_attribute *attr,
 968					char *buf)
 969{
 970	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 971	size_t len;
 972
 973	len = sizeof(priv->srv_name);
 974	memcpy(buf, priv->srv_name, len);
 975	EBCASC(buf, len);
 976	buf[len++] = '\n';
 977	return len;
 978}
 979
 980static ssize_t hvc_iucv_dev_state_show(struct device *dev,
 981					struct device_attribute *attr,
 982					char *buf)
 983{
 984	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 985	return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
 986}
 987
 988static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
 989				      struct device_attribute *attr,
 990				      char *buf)
 991{
 992	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 993	char vmid[9], ipuser[9];
 994
 995	memset(vmid, 0, sizeof(vmid));
 996	memset(ipuser, 0, sizeof(ipuser));
 997
 998	spin_lock_bh(&priv->lock);
 999	if (priv->iucv_state == IUCV_CONNECTED) {
1000		memcpy(vmid, priv->info_path, 8);
1001		memcpy(ipuser, priv->info_path + 8, 8);
1002	}
1003	spin_unlock_bh(&priv->lock);
1004	EBCASC(ipuser, 8);
1005
1006	return sprintf(buf, "%s:%s\n", vmid, ipuser);
1007}
1008
1009
1010/* HVC operations */
1011static const struct hv_ops hvc_iucv_ops = {
1012	.get_chars = hvc_iucv_get_chars,
1013	.put_chars = hvc_iucv_put_chars,
1014	.notifier_add = hvc_iucv_notifier_add,
1015	.notifier_del = hvc_iucv_notifier_del,
1016	.notifier_hangup = hvc_iucv_notifier_hangup,
1017	.dtr_rts = hvc_iucv_dtr_rts,
1018};
1019
1020/* IUCV HVC device attributes */
1021static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1022static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1023static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1024static struct attribute *hvc_iucv_dev_attrs[] = {
1025	&dev_attr_termid.attr,
1026	&dev_attr_state.attr,
1027	&dev_attr_peer.attr,
1028	NULL,
1029};
1030static struct attribute_group hvc_iucv_dev_attr_group = {
1031	.attrs = hvc_iucv_dev_attrs,
1032};
1033static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1034	&hvc_iucv_dev_attr_group,
1035	NULL,
1036};
1037
1038
1039/**
1040 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
1041 * @id:			hvc_iucv_table index
1042 * @is_console:		Flag if the instance is used as Linux console
1043 *
1044 * This function allocates a new hvc_iucv_private structure and stores
1045 * the instance in hvc_iucv_table at index @id.
1046 * Returns 0 on success; otherwise non-zero.
1047 */
1048static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1049{
1050	struct hvc_iucv_private *priv;
1051	char name[9];
1052	int rc;
1053
1054	priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1055	if (!priv)
1056		return -ENOMEM;
1057
1058	spin_lock_init(&priv->lock);
1059	INIT_LIST_HEAD(&priv->tty_outqueue);
1060	INIT_LIST_HEAD(&priv->tty_inqueue);
1061	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1062	init_waitqueue_head(&priv->sndbuf_waitq);
1063
1064	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1065	if (!priv->sndbuf) {
1066		kfree(priv);
1067		return -ENOMEM;
1068	}
1069
1070	/* set console flag */
1071	priv->is_console = is_console;
1072
1073	/* allocate hvc device */
1074	priv->hvc = hvc_alloc(id, /*		 PAGE_SIZE */
1075			      id, &hvc_iucv_ops, 256);
1076	if (IS_ERR(priv->hvc)) {
1077		rc = PTR_ERR(priv->hvc);
1078		goto out_error_hvc;
1079	}
1080
1081	/* notify HVC thread instead of using polling */
1082	priv->hvc->irq_requested = 1;
1083
1084	/* setup iucv related information */
1085	snprintf(name, 9, "lnxhvc%-2d", id);
1086	memcpy(priv->srv_name, name, 8);
1087	ASCEBC(priv->srv_name, 8);
1088
1089	/* create and setup device */
1090	priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1091	if (!priv->dev) {
1092		rc = -ENOMEM;
1093		goto out_error_dev;
1094	}
1095	dev_set_name(priv->dev, "hvc_iucv%d", id);
1096	dev_set_drvdata(priv->dev, priv);
1097	priv->dev->bus = &iucv_bus;
1098	priv->dev->parent = iucv_root;
1099	priv->dev->groups = hvc_iucv_dev_attr_groups;
1100	priv->dev->release = (void (*)(struct device *)) kfree;
1101	rc = device_register(priv->dev);
1102	if (rc) {
1103		put_device(priv->dev);
1104		goto out_error_dev;
1105	}
1106
1107	hvc_iucv_table[id] = priv;
1108	return 0;
1109
1110out_error_dev:
1111	hvc_remove(priv->hvc);
1112out_error_hvc:
1113	free_page((unsigned long) priv->sndbuf);
1114	kfree(priv);
1115
1116	return rc;
1117}
1118
1119/**
1120 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1121 */
1122static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1123{
1124	hvc_remove(priv->hvc);
1125	device_unregister(priv->dev);
1126	free_page((unsigned long) priv->sndbuf);
1127	kfree(priv);
1128}
1129
1130/**
1131 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1132 * @filter:	String containing a comma-separated list of z/VM user IDs
1133 * @dest:	Location where to store the parsed z/VM user ID
1134 */
1135static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1136{
1137	const char *nextdelim, *residual;
1138	size_t len;
1139
1140	nextdelim = strchr(filter, ',');
1141	if (nextdelim) {
1142		len = nextdelim - filter;
1143		residual = nextdelim + 1;
1144	} else {
1145		len = strlen(filter);
1146		residual = filter + len;
1147	}
1148
1149	if (len == 0)
1150		return ERR_PTR(-EINVAL);
1151
1152	/* check for '\n' (if called from sysfs) */
1153	if (filter[len - 1] == '\n')
1154		len--;
1155
1156	/* prohibit filter entries containing the wildcard character only */
1157	if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1158		return ERR_PTR(-EINVAL);
1159
1160	if (len > 8)
1161		return ERR_PTR(-EINVAL);
1162
1163	/* pad with blanks and save upper case version of user ID */
1164	memset(dest, ' ', 8);
1165	while (len--)
1166		dest[len] = toupper(filter[len]);
1167	return residual;
1168}
1169
1170/**
1171 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1172 * @filter:	String consisting of a comma-separated list of z/VM user IDs
1173 *
1174 * The function parses the @filter string and creates an array containing
1175 * the list of z/VM user ID filter entries.
1176 * Return code 0 means success, -EINVAL if the filter is syntactically
1177 * incorrect, -ENOMEM if there was not enough memory to allocate the
1178 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1179 */
1180static int hvc_iucv_setup_filter(const char *val)
1181{
1182	const char *residual;
1183	int err;
1184	size_t size, count;
1185	void *array, *old_filter;
1186
1187	count = strlen(val);
1188	if (count == 0 || (count == 1 && val[0] == '\n')) {
1189		size  = 0;
1190		array = NULL;
1191		goto out_replace_filter;	/* clear filter */
1192	}
1193
1194	/* count user IDs in order to allocate sufficient memory */
1195	size = 1;
1196	residual = val;
1197	while ((residual = strchr(residual, ',')) != NULL) {
1198		residual++;
1199		size++;
1200	}
1201
1202	/* check if the specified list exceeds the filter limit */
1203	if (size > MAX_VMID_FILTER)
1204		return -ENOSPC;
1205
1206	array = kcalloc(size, 8, GFP_KERNEL);
1207	if (!array)
1208		return -ENOMEM;
1209
1210	count = size;
1211	residual = val;
1212	while (*residual && count) {
1213		residual = hvc_iucv_parse_filter(residual,
1214						 array + ((size - count) * 8));
1215		if (IS_ERR(residual)) {
1216			err = PTR_ERR(residual);
1217			kfree(array);
1218			goto out_err;
1219		}
1220		count--;
1221	}
1222
1223out_replace_filter:
1224	write_lock_bh(&hvc_iucv_filter_lock);
1225	old_filter = hvc_iucv_filter;
1226	hvc_iucv_filter_size = size;
1227	hvc_iucv_filter = array;
1228	write_unlock_bh(&hvc_iucv_filter_lock);
1229	kfree(old_filter);
1230
1231	err = 0;
1232out_err:
1233	return err;
1234}
1235
1236/**
1237 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1238 * @val:	String consisting of a comma-separated list of z/VM user IDs
1239 * @kp:		Kernel parameter pointing to hvc_iucv_filter array
1240 *
1241 * The function sets up the z/VM user ID filter specified as comma-separated
1242 * list of user IDs in @val.
1243 * Note: If it is called early in the boot process, @val is stored and
1244 *	 parsed later in hvc_iucv_init().
1245 */
1246static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1247{
1248	int rc;
1249
1250	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1251		return -ENODEV;
1252
1253	if (!val)
1254		return -EINVAL;
1255
1256	rc = 0;
1257	if (slab_is_available())
1258		rc = hvc_iucv_setup_filter(val);
1259	else
1260		hvc_iucv_filter_string = val;	/* defer... */
1261	return rc;
1262}
1263
1264/**
1265 * param_get_vmidfilter() - Get z/VM user ID filter
1266 * @buffer:	Buffer to store z/VM user ID filter,
1267 *		(buffer size assumption PAGE_SIZE)
1268 * @kp:		Kernel parameter pointing to the hvc_iucv_filter array
1269 *
1270 * The function stores the filter as a comma-separated list of z/VM user IDs
1271 * in @buffer. Typically, sysfs routines call this function for attr show.
1272 */
1273static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1274{
1275	int rc;
1276	size_t index, len;
1277	void *start, *end;
1278
1279	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1280		return -ENODEV;
1281
1282	rc = 0;
1283	read_lock_bh(&hvc_iucv_filter_lock);
1284	for (index = 0; index < hvc_iucv_filter_size; index++) {
1285		start = hvc_iucv_filter + (8 * index);
1286		end   = memchr(start, ' ', 8);
1287		len   = (end) ? end - start : 8;
1288		memcpy(buffer + rc, start, len);
1289		rc += len;
1290		buffer[rc++] = ',';
1291	}
1292	read_unlock_bh(&hvc_iucv_filter_lock);
1293	if (rc)
1294		buffer[--rc] = '\0';	/* replace last comma and update rc */
1295	return rc;
1296}
1297
1298#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1299
1300static const struct kernel_param_ops param_ops_vmidfilter = {
1301	.set = param_set_vmidfilter,
1302	.get = param_get_vmidfilter,
1303};
1304
1305/**
1306 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1307 */
1308static int __init hvc_iucv_init(void)
1309{
1310	int rc;
1311	unsigned int i;
1312
1313	if (!hvc_iucv_devices)
1314		return -ENODEV;
1315
1316	if (!MACHINE_IS_VM) {
1317		pr_notice("The z/VM IUCV HVC device driver cannot "
1318			   "be used without z/VM\n");
1319		rc = -ENODEV;
1320		goto out_error;
1321	}
1322
1323	if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1324		pr_err("%lu is not a valid value for the hvc_iucv= "
1325			"kernel parameter\n", hvc_iucv_devices);
1326		rc = -EINVAL;
1327		goto out_error;
1328	}
1329
1330	/* parse hvc_iucv_allow string and create z/VM user ID filter list */
1331	if (hvc_iucv_filter_string) {
1332		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1333		switch (rc) {
1334		case 0:
1335			break;
1336		case -ENOMEM:
1337			pr_err("Allocating memory failed with "
1338				"reason code=%d\n", 3);
1339			goto out_error;
1340		case -EINVAL:
1341			pr_err("hvc_iucv_allow= does not specify a valid "
1342				"z/VM user ID list\n");
1343			goto out_error;
1344		case -ENOSPC:
1345			pr_err("hvc_iucv_allow= specifies too many "
1346				"z/VM user IDs\n");
1347			goto out_error;
1348		default:
1349			goto out_error;
1350		}
1351	}
1352
1353	hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1354					   sizeof(struct iucv_tty_buffer),
1355					   0, 0, NULL);
1356	if (!hvc_iucv_buffer_cache) {
1357		pr_err("Allocating memory failed with reason code=%d\n", 1);
1358		rc = -ENOMEM;
1359		goto out_error;
1360	}
1361
1362	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1363						    hvc_iucv_buffer_cache);
1364	if (!hvc_iucv_mempool) {
1365		pr_err("Allocating memory failed with reason code=%d\n", 2);
1366		kmem_cache_destroy(hvc_iucv_buffer_cache);
1367		rc = -ENOMEM;
1368		goto out_error;
1369	}
1370
1371	/* register the first terminal device as console
1372	 * (must be done before allocating hvc terminal devices) */
1373	rc = hvc_instantiate(0, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1374	if (rc) {
1375		pr_err("Registering HVC terminal device as "
1376		       "Linux console failed\n");
1377		goto out_error_memory;
1378	}
1379
1380	/* allocate hvc_iucv_private structs */
1381	for (i = 0; i < hvc_iucv_devices; i++) {
1382		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1383		if (rc) {
1384			pr_err("Creating a new HVC terminal device "
1385				"failed with error code=%d\n", rc);
1386			goto out_error_hvc;
1387		}
1388	}
1389
1390	/* register IUCV callback handler */
1391	rc = iucv_register(&hvc_iucv_handler, 0);
1392	if (rc) {
1393		pr_err("Registering IUCV handlers failed with error code=%d\n",
1394			rc);
1395		goto out_error_hvc;
1396	}
1397
1398	return 0;
1399
1400out_error_hvc:
1401	for (i = 0; i < hvc_iucv_devices; i++)
1402		if (hvc_iucv_table[i])
1403			hvc_iucv_destroy(hvc_iucv_table[i]);
1404out_error_memory:
1405	mempool_destroy(hvc_iucv_mempool);
1406	kmem_cache_destroy(hvc_iucv_buffer_cache);
1407out_error:
1408	kfree(hvc_iucv_filter);
1409	hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1410	return rc;
1411}
1412
1413/**
1414 * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
1415 * @val:	Parameter value (numeric)
1416 */
1417static	int __init hvc_iucv_config(char *val)
1418{
1419	if (kstrtoul(val, 10, &hvc_iucv_devices))
1420		pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
1421	return 1;
1422}
1423
1424
1425device_initcall(hvc_iucv_init);
1426__setup("hvc_iucv=", hvc_iucv_config);
1427core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * z/VM IUCV hypervisor console (HVC) device driver
   4 *
   5 * This HVC device driver provides terminal access using
   6 * z/VM IUCV communication paths.
   7 *
   8 * Copyright IBM Corp. 2008, 2013
   9 *
  10 * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  11 */
  12#define KMSG_COMPONENT		"hvc_iucv"
  13#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
  14
  15#include <linux/types.h>
  16#include <linux/slab.h>
  17#include <asm/ebcdic.h>
  18#include <linux/ctype.h>
  19#include <linux/delay.h>
  20#include <linux/device.h>
  21#include <linux/init.h>
  22#include <linux/mempool.h>
  23#include <linux/moduleparam.h>
  24#include <linux/tty.h>
  25#include <linux/wait.h>
  26#include <net/iucv/iucv.h>
  27
  28#include "hvc_console.h"
  29
  30
  31/* General device driver settings */
  32#define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS
  33#define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  34
  35/* IUCV TTY message  */
  36#define MSG_VERSION		0x02	/* Message version */
  37#define MSG_TYPE_ERROR		0x01	/* Error message */
  38#define MSG_TYPE_TERMENV	0x02	/* Terminal environment variable */
  39#define MSG_TYPE_TERMIOS	0x04	/* Terminal IO struct update */
  40#define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */
  41#define MSG_TYPE_DATA		0x10	/* Terminal data */
  42
  43struct iucv_tty_msg {
  44	u8	version;		/* Message version */
  45	u8	type;			/* Message type */
  46#define MSG_MAX_DATALEN		((u16)(~0))
  47	u16	datalen;		/* Payload length */
  48	u8	data[];			/* Payload buffer */
  49} __attribute__((packed));
  50#define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
  51
  52enum iucv_state_t {
  53	IUCV_DISCONN	= 0,
  54	IUCV_CONNECTED	= 1,
  55	IUCV_SEVERED	= 2,
  56};
  57
  58enum tty_state_t {
  59	TTY_CLOSED	= 0,
  60	TTY_OPENED	= 1,
  61};
  62
  63struct hvc_iucv_private {
  64	struct hvc_struct	*hvc;		/* HVC struct reference */
  65	u8			srv_name[8];	/* IUCV service name (ebcdic) */
  66	unsigned char		is_console;	/* Linux console usage flag */
  67	enum iucv_state_t	iucv_state;	/* IUCV connection status */
  68	enum tty_state_t	tty_state;	/* TTY status */
  69	struct iucv_path	*path;		/* IUCV path pointer */
  70	spinlock_t		lock;		/* hvc_iucv_private lock */
  71#define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */
  72	void			*sndbuf;	/* send buffer		  */
  73	size_t			sndbuf_len;	/* length of send buffer  */
  74#define QUEUE_SNDBUF_DELAY	(HZ / 25)
  75	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */
  76	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
  77	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
  78	struct list_head	tty_inqueue;	/* incoming IUCV messages */
  79	struct device		*dev;		/* device structure */
  80	u8			info_path[16];	/* IUCV path info (dev attr) */
  81};
  82
  83struct iucv_tty_buffer {
  84	struct list_head	list;	/* list pointer */
  85	struct iucv_message	msg;	/* store an IUCV message */
  86	size_t			offset;	/* data buffer offset */
  87	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */
  88};
  89
  90/* IUCV callback handler */
  91static	int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
  92static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
  93static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  94static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  95
  96
  97/* Kernel module parameter: use one terminal device as default */
  98static unsigned long hvc_iucv_devices = 1;
  99
 100/* Array of allocated hvc iucv tty lines... */
 101static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
 102#define IUCV_HVC_CON_IDX	(0)
 103/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
 104#define MAX_VMID_FILTER		(500)
 105#define FILTER_WILDCARD_CHAR	'*'
 106static size_t hvc_iucv_filter_size;
 107static void *hvc_iucv_filter;
 108static const char *hvc_iucv_filter_string;
 109static DEFINE_RWLOCK(hvc_iucv_filter_lock);
 110
 111/* Kmem cache and mempool for iucv_tty_buffer elements */
 112static struct kmem_cache *hvc_iucv_buffer_cache;
 113static mempool_t *hvc_iucv_mempool;
 114
 115/* IUCV handler callback functions */
 116static struct iucv_handler hvc_iucv_handler = {
 117	.path_pending  = hvc_iucv_path_pending,
 118	.path_severed  = hvc_iucv_path_severed,
 119	.message_complete = hvc_iucv_msg_complete,
 120	.message_pending  = hvc_iucv_msg_pending,
 121};
 122
 123
 124/**
 125 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
 126 * @num:	The HVC virtual terminal number (vtermno)
 127 *
 128 * This function returns the struct hvc_iucv_private instance that corresponds
 129 * to the HVC virtual terminal number specified as parameter @num.
 130 */
 131static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
 132{
 133	if (num > hvc_iucv_devices)
 134		return NULL;
 135	return hvc_iucv_table[num];
 136}
 137
 138/**
 139 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
 140 * @size:	Size of the internal buffer used to store data.
 141 * @flags:	Memory allocation flags passed to mempool.
 142 *
 143 * This function allocates a new struct iucv_tty_buffer element and, optionally,
 144 * allocates an internal data buffer with the specified size @size.
 145 * The internal data buffer is always allocated with GFP_DMA which is
 146 * required for receiving and sending data with IUCV.
 147 * Note: The total message size arises from the internal buffer size and the
 148 *	 members of the iucv_tty_msg structure.
 149 * The function returns NULL if memory allocation has failed.
 150 */
 151static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
 152{
 153	struct iucv_tty_buffer *bufp;
 154
 155	bufp = mempool_alloc(hvc_iucv_mempool, flags);
 156	if (!bufp)
 157		return NULL;
 158	memset(bufp, 0, sizeof(*bufp));
 159
 160	if (size > 0) {
 161		bufp->msg.length = MSG_SIZE(size);
 162		bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
 163		if (!bufp->mbuf) {
 164			mempool_free(bufp, hvc_iucv_mempool);
 165			return NULL;
 166		}
 167		bufp->mbuf->version = MSG_VERSION;
 168		bufp->mbuf->type    = MSG_TYPE_DATA;
 169		bufp->mbuf->datalen = (u16) size;
 170	}
 171	return bufp;
 172}
 173
 174/**
 175 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
 176 * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
 177 */
 178static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
 179{
 180	kfree(bufp->mbuf);
 181	mempool_free(bufp, hvc_iucv_mempool);
 182}
 183
 184/**
 185 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
 186 * @list:	List containing struct iucv_tty_buffer elements.
 187 */
 188static void destroy_tty_buffer_list(struct list_head *list)
 189{
 190	struct iucv_tty_buffer *ent, *next;
 191
 192	list_for_each_entry_safe(ent, next, list, list) {
 193		list_del(&ent->list);
 194		destroy_tty_buffer(ent);
 195	}
 196}
 197
 198/**
 199 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
 200 * @priv:		Pointer to struct hvc_iucv_private
 201 * @buf:		HVC buffer for writing received terminal data.
 202 * @count:		HVC buffer size.
 203 * @has_more_data:	Pointer to an int variable.
 204 *
 205 * The function picks up pending messages from the input queue and receives
 206 * the message data that is then written to the specified buffer @buf.
 207 * If the buffer size @count is less than the data message size, the
 208 * message is kept on the input queue and @has_more_data is set to 1.
 209 * If all message data has been written, the message is removed from
 210 * the input queue.
 211 *
 212 * The function returns the number of bytes written to the terminal, zero if
 213 * there are no pending data messages available or if there is no established
 214 * IUCV path.
 215 * If the IUCV path has been severed, then -EPIPE is returned to cause a
 216 * hang up (that is issued by the HVC layer).
 217 */
 218static ssize_t hvc_iucv_write(struct hvc_iucv_private *priv,
 219			      u8 *buf, size_t count, int *has_more_data)
 220{
 221	struct iucv_tty_buffer *rb;
 222	ssize_t written;
 223	int rc;
 224
 225	/* immediately return if there is no IUCV connection */
 226	if (priv->iucv_state == IUCV_DISCONN)
 227		return 0;
 228
 229	/* if the IUCV path has been severed, return -EPIPE to inform the
 230	 * HVC layer to hang up the tty device. */
 231	if (priv->iucv_state == IUCV_SEVERED)
 232		return -EPIPE;
 233
 234	/* check if there are pending messages */
 235	if (list_empty(&priv->tty_inqueue))
 236		return 0;
 237
 238	/* receive an iucv message and flip data to the tty (ldisc) */
 239	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
 240
 241	written = 0;
 242	if (!rb->mbuf) { /* message not yet received ... */
 243		/* allocate mem to store msg data; if no memory is available
 244		 * then leave the buffer on the list and re-try later */
 245		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
 246		if (!rb->mbuf)
 247			return -ENOMEM;
 248
 249		rc = __iucv_message_receive(priv->path, &rb->msg, 0,
 250					    rb->mbuf, rb->msg.length, NULL);
 251		switch (rc) {
 252		case 0: /* Successful	    */
 253			break;
 254		case 2:	/* No message found */
 255		case 9: /* Message purged   */
 256			break;
 257		default:
 258			written = -EIO;
 259		}
 260		/* remove buffer if an error has occurred or received data
 261		 * is not correct */
 262		if (rc || (rb->mbuf->version != MSG_VERSION) ||
 263			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen)))
 264			goto out_remove_buffer;
 265	}
 266
 267	switch (rb->mbuf->type) {
 268	case MSG_TYPE_DATA:
 269		written = min_t(int, rb->mbuf->datalen - rb->offset, count);
 270		memcpy(buf, rb->mbuf->data + rb->offset, written);
 271		if (written < (rb->mbuf->datalen - rb->offset)) {
 272			rb->offset += written;
 273			*has_more_data = 1;
 274			goto out_written;
 275		}
 276		break;
 277
 278	case MSG_TYPE_WINSIZE:
 279		if (rb->mbuf->datalen != sizeof(struct winsize))
 280			break;
 281		/* The caller must ensure that the hvc is locked, which
 282		 * is the case when called from hvc_iucv_get_chars() */
 283		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
 284		break;
 285
 286	case MSG_TYPE_ERROR:	/* ignored ... */
 287	case MSG_TYPE_TERMENV:	/* ignored ... */
 288	case MSG_TYPE_TERMIOS:	/* ignored ... */
 289		break;
 290	}
 291
 292out_remove_buffer:
 293	list_del(&rb->list);
 294	destroy_tty_buffer(rb);
 295	*has_more_data = !list_empty(&priv->tty_inqueue);
 296
 297out_written:
 298	return written;
 299}
 300
 301/**
 302 * hvc_iucv_get_chars() - HVC get_chars operation.
 303 * @vtermno:	HVC virtual terminal number.
 304 * @buf:	Pointer to a buffer to store data
 305 * @count:	Size of buffer available for writing
 306 *
 307 * The HVC thread calls this method to read characters from the back-end.
 308 * If an IUCV communication path has been established, pending IUCV messages
 309 * are received and data is copied into buffer @buf up to @count bytes.
 310 *
 311 * Locking:	The routine gets called under an irqsave() spinlock; and
 312 *		the routine locks the struct hvc_iucv_private->lock to call
 313 *		helper functions.
 314 */
 315static ssize_t hvc_iucv_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 316{
 317	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 318	ssize_t written;
 319	int has_more_data;
 320
 321	if (count <= 0)
 322		return 0;
 323
 324	if (!priv)
 325		return -ENODEV;
 326
 327	spin_lock(&priv->lock);
 328	has_more_data = 0;
 329	written = hvc_iucv_write(priv, buf, count, &has_more_data);
 330	spin_unlock(&priv->lock);
 331
 332	/* if there are still messages on the queue... schedule another run */
 333	if (has_more_data)
 334		hvc_kick();
 335
 336	return written;
 337}
 338
 339/**
 340 * hvc_iucv_queue() - Buffer terminal data for sending.
 341 * @priv:	Pointer to struct hvc_iucv_private instance.
 342 * @buf:	Buffer containing data to send.
 343 * @count:	Size of buffer and amount of data to send.
 344 *
 345 * The function queues data for sending. To actually send the buffered data,
 346 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
 347 * The function returns the number of data bytes that has been buffered.
 348 *
 349 * If the device is not connected, data is ignored and the function returns
 350 * @count.
 351 * If the buffer is full, the function returns 0.
 352 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
 353 * (that can be passed to HVC layer to cause a tty hangup).
 354 */
 355static ssize_t hvc_iucv_queue(struct hvc_iucv_private *priv, const u8 *buf,
 356			      size_t count)
 357{
 358	size_t len;
 359
 360	if (priv->iucv_state == IUCV_DISCONN)
 361		return count;			/* ignore data */
 362
 363	if (priv->iucv_state == IUCV_SEVERED)
 364		return -EPIPE;
 365
 366	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
 367	if (!len)
 368		return 0;
 369
 370	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
 371	priv->sndbuf_len += len;
 372
 373	if (priv->iucv_state == IUCV_CONNECTED)
 374		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
 375
 376	return len;
 377}
 378
 379/**
 380 * hvc_iucv_send() - Send an IUCV message containing terminal data.
 381 * @priv:	Pointer to struct hvc_iucv_private instance.
 382 *
 383 * If an IUCV communication path has been established, the buffered output data
 384 * is sent via an IUCV message and the number of bytes sent is returned.
 385 * Returns 0 if there is no established IUCV communication path or
 386 * -EPIPE if an existing IUCV communicaton path has been severed.
 387 */
 388static int hvc_iucv_send(struct hvc_iucv_private *priv)
 389{
 390	struct iucv_tty_buffer *sb;
 391	int rc, len;
 392
 393	if (priv->iucv_state == IUCV_SEVERED)
 394		return -EPIPE;
 395
 396	if (priv->iucv_state == IUCV_DISCONN)
 397		return -EIO;
 398
 399	if (!priv->sndbuf_len)
 400		return 0;
 401
 402	/* allocate internal buffer to store msg data and also compute total
 403	 * message length */
 404	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
 405	if (!sb)
 406		return -ENOMEM;
 407
 408	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
 409	sb->mbuf->datalen = (u16) priv->sndbuf_len;
 410	sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
 411
 412	list_add_tail(&sb->list, &priv->tty_outqueue);
 413
 414	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
 415				 (void *) sb->mbuf, sb->msg.length);
 416	if (rc) {
 417		/* drop the message here; however we might want to handle
 418		 * 0x03 (msg limit reached) by trying again... */
 419		list_del(&sb->list);
 420		destroy_tty_buffer(sb);
 421	}
 422	len = priv->sndbuf_len;
 423	priv->sndbuf_len = 0;
 424
 425	return len;
 426}
 427
 428/**
 429 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
 430 * @work:	Work structure.
 431 *
 432 * This work queue function sends buffered output data over IUCV and,
 433 * if not all buffered data could be sent, reschedules itself.
 434 */
 435static void hvc_iucv_sndbuf_work(struct work_struct *work)
 436{
 437	struct hvc_iucv_private *priv;
 438
 439	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
 440
 441	spin_lock_bh(&priv->lock);
 442	hvc_iucv_send(priv);
 443	spin_unlock_bh(&priv->lock);
 444}
 445
 446/**
 447 * hvc_iucv_put_chars() - HVC put_chars operation.
 448 * @vtermno:	HVC virtual terminal number.
 449 * @buf:	Pointer to an buffer to read data from
 450 * @count:	Size of buffer available for reading
 451 *
 452 * The HVC thread calls this method to write characters to the back-end.
 453 * The function calls hvc_iucv_queue() to queue terminal data for sending.
 454 *
 455 * Locking:	The method gets called under an irqsave() spinlock; and
 456 *		locks struct hvc_iucv_private->lock.
 457 */
 458static ssize_t hvc_iucv_put_chars(uint32_t vtermno, const u8 *buf, size_t count)
 459{
 460	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
 461	int queued;
 462
 463	if (!count)
 464		return 0;
 465
 466	if (!priv)
 467		return -ENODEV;
 468
 469	spin_lock(&priv->lock);
 470	queued = hvc_iucv_queue(priv, buf, count);
 471	spin_unlock(&priv->lock);
 472
 473	return queued;
 474}
 475
 476/**
 477 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
 478 * @hp:	Pointer to the HVC device (struct hvc_struct)
 479 * @id:	Additional data (originally passed to hvc_alloc): the index of an struct
 480 *	hvc_iucv_private instance.
 481 *
 482 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
 483 * instance that is derived from @id. Always returns 0.
 484 *
 485 * Locking:	struct hvc_iucv_private->lock, spin_lock_bh
 486 */
 487static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
 488{
 489	struct hvc_iucv_private *priv;
 490
 491	priv = hvc_iucv_get_private(id);
 492	if (!priv)
 493		return 0;
 494
 495	spin_lock_bh(&priv->lock);
 496	priv->tty_state = TTY_OPENED;
 497	spin_unlock_bh(&priv->lock);
 498
 499	return 0;
 500}
 501
 502/**
 503 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
 504 * @priv:	Pointer to the struct hvc_iucv_private instance.
 505 */
 506static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
 507{
 508	destroy_tty_buffer_list(&priv->tty_outqueue);
 509	destroy_tty_buffer_list(&priv->tty_inqueue);
 510
 511	priv->tty_state = TTY_CLOSED;
 512	priv->iucv_state = IUCV_DISCONN;
 513
 514	priv->sndbuf_len = 0;
 515}
 516
 517/**
 518 * tty_outqueue_empty() - Test if the tty outq is empty
 519 * @priv:	Pointer to struct hvc_iucv_private instance.
 520 */
 521static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
 522{
 523	int rc;
 524
 525	spin_lock_bh(&priv->lock);
 526	rc = list_empty(&priv->tty_outqueue);
 527	spin_unlock_bh(&priv->lock);
 528
 529	return rc;
 530}
 531
 532/**
 533 * flush_sndbuf_sync() - Flush send buffer and wait for completion
 534 * @priv:	Pointer to struct hvc_iucv_private instance.
 535 *
 536 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
 537 * to flush any buffered terminal output data and waits for completion.
 538 */
 539static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
 540{
 541	int sync_wait;
 542
 543	cancel_delayed_work_sync(&priv->sndbuf_work);
 544
 545	spin_lock_bh(&priv->lock);
 546	hvc_iucv_send(priv);		/* force sending buffered data */
 547	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
 548	spin_unlock_bh(&priv->lock);
 549
 550	if (sync_wait)
 551		wait_event_timeout(priv->sndbuf_waitq,
 552				   tty_outqueue_empty(priv), HZ/10);
 553}
 554
 555/**
 556 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
 557 * @priv:	Pointer to hvc_iucv_private structure
 558 *
 559 * This routine severs an existing IUCV communication path and hangs
 560 * up the underlying HVC terminal device.
 561 * The hang-up occurs only if an IUCV communication path is established;
 562 * otherwise there is no need to hang up the terminal device.
 563 *
 564 * The IUCV HVC hang-up is separated into two steps:
 565 * 1. After the IUCV path has been severed, the iucv_state is set to
 566 *    IUCV_SEVERED.
 567 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
 568 *    IUCV_SEVERED state causes the tty hang-up in the HVC layer.
 569 *
 570 * If the tty has not yet been opened, clean up the hvc_iucv_private
 571 * structure to allow re-connects.
 572 * If the tty has been opened, let get_chars() return -EPIPE to signal
 573 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
 574 * to call get_chars()...
 575 *
 576 * Special notes on hanging up a HVC terminal instantiated as console:
 577 * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
 578 *		2. do_tty_hangup() calls tty->ops->close() for console_filp
 579 *			=> no hangup notifier is called by HVC (default)
 580 *		2. hvc_close() returns because of tty_hung_up_p(filp)
 581 *			=> no delete notifier is called!
 582 * Finally, the back-end is not being notified, thus, the tty session is
 583 * kept active (TTY_OPEN) to be ready for re-connects.
 584 *
 585 * Locking:	spin_lock(&priv->lock) w/o disabling bh
 586 */
 587static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
 588{
 589	struct iucv_path *path;
 590
 591	path = NULL;
 592	spin_lock(&priv->lock);
 593	if (priv->iucv_state == IUCV_CONNECTED) {
 594		path = priv->path;
 595		priv->path = NULL;
 596		priv->iucv_state = IUCV_SEVERED;
 597		if (priv->tty_state == TTY_CLOSED)
 598			hvc_iucv_cleanup(priv);
 599		else
 600			/* console is special (see above) */
 601			if (priv->is_console) {
 602				hvc_iucv_cleanup(priv);
 603				priv->tty_state = TTY_OPENED;
 604			} else
 605				hvc_kick();
 606	}
 607	spin_unlock(&priv->lock);
 608
 609	/* finally sever path (outside of priv->lock due to lock ordering) */
 610	if (path) {
 611		iucv_path_sever(path, NULL);
 612		iucv_path_free(path);
 613	}
 614}
 615
 616/**
 617 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
 618 * @hp:		Pointer to the HVC device (struct hvc_struct)
 619 * @id:		Additional data (originally passed to hvc_alloc):
 620 *		the index of an struct hvc_iucv_private instance.
 621 *
 622 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
 623 * virtual or otherwise) has occurred.
 624 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
 625 * to keep an existing IUCV communication path established.
 626 * (Background: vhangup() is called from user space (by getty or login) to
 627 *		disable writing to the tty by other applications).
 628 * If the tty has been opened and an established IUCV path has been severed
 629 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
 630 *
 631 * Locking:	struct hvc_iucv_private->lock
 632 */
 633static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
 634{
 635	struct hvc_iucv_private *priv;
 636
 637	priv = hvc_iucv_get_private(id);
 638	if (!priv)
 639		return;
 640
 641	flush_sndbuf_sync(priv);
 642
 643	spin_lock_bh(&priv->lock);
 644	/* NOTE: If the hangup was scheduled by ourself (from the iucv
 645	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up
 646	 *	 our structure and to set state to TTY_CLOSED.
 647	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we
 648	 *	 ignore this hangup and keep an established IUCV path open...
 649	 *	 (...the reason is that we are not able to connect back to the
 650	 *	 client if we disconnect on hang up) */
 651	priv->tty_state = TTY_CLOSED;
 652
 653	if (priv->iucv_state == IUCV_SEVERED)
 654		hvc_iucv_cleanup(priv);
 655	spin_unlock_bh(&priv->lock);
 656}
 657
 658/**
 659 * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
 660 * @hp:		Pointer the HVC device (struct hvc_struct)
 661 * @active:	True to raise or false to lower DTR/RTS lines
 662 *
 663 * This routine notifies the HVC back-end to raise or lower DTR/RTS
 664 * lines.  Raising DTR/RTS is ignored.  Lowering DTR/RTS indicates to
 665 * drop the IUCV connection (similar to hang up the modem).
 666 */
 667static void hvc_iucv_dtr_rts(struct hvc_struct *hp, bool active)
 668{
 669	struct hvc_iucv_private *priv;
 670	struct iucv_path        *path;
 671
 672	/* Raising the DTR/RTS is ignored as IUCV connections can be
 673	 * established at any times.
 674	 */
 675	if (active)
 676		return;
 677
 678	priv = hvc_iucv_get_private(hp->vtermno);
 679	if (!priv)
 680		return;
 681
 682	/* Lowering the DTR/RTS lines disconnects an established IUCV
 683	 * connection.
 684	 */
 685	flush_sndbuf_sync(priv);
 686
 687	spin_lock_bh(&priv->lock);
 688	path = priv->path;		/* save reference to IUCV path */
 689	priv->path = NULL;
 690	priv->iucv_state = IUCV_DISCONN;
 691	spin_unlock_bh(&priv->lock);
 692
 693	/* Sever IUCV path outside of priv->lock due to lock ordering of:
 694	 * priv->lock <--> iucv_table_lock */
 695	if (path) {
 696		iucv_path_sever(path, NULL);
 697		iucv_path_free(path);
 698	}
 699}
 700
 701/**
 702 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
 703 * @hp:		Pointer to the HVC device (struct hvc_struct)
 704 * @id:		Additional data (originally passed to hvc_alloc):
 705 *		the index of an struct hvc_iucv_private instance.
 706 *
 707 * This routine notifies the HVC back-end that the last tty device fd has been
 708 * closed.  The function cleans up tty resources.  The clean-up of the IUCV
 709 * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
 710 * control setting.
 711 *
 712 * Locking:	struct hvc_iucv_private->lock
 713 */
 714static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
 715{
 716	struct hvc_iucv_private *priv;
 717
 718	priv = hvc_iucv_get_private(id);
 719	if (!priv)
 720		return;
 721
 722	flush_sndbuf_sync(priv);
 723
 724	spin_lock_bh(&priv->lock);
 725	destroy_tty_buffer_list(&priv->tty_outqueue);
 726	destroy_tty_buffer_list(&priv->tty_inqueue);
 727	priv->tty_state = TTY_CLOSED;
 728	priv->sndbuf_len = 0;
 729	spin_unlock_bh(&priv->lock);
 730}
 731
 732/**
 733 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
 734 * @ipvmid:	Originating z/VM user ID (right padded with blanks)
 735 *
 736 * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
 737 * connect, otherwise non-zero.
 738 */
 739static int hvc_iucv_filter_connreq(u8 ipvmid[8])
 740{
 741	const char *wildcard, *filter_entry;
 742	size_t i, len;
 743
 744	/* Note: default policy is ACCEPT if no filter is set */
 745	if (!hvc_iucv_filter_size)
 746		return 0;
 747
 748	for (i = 0; i < hvc_iucv_filter_size; i++) {
 749		filter_entry = hvc_iucv_filter + (8 * i);
 750
 751		/* If a filter entry contains the filter wildcard character,
 752		 * reduce the length to match the leading portion of the user
 753		 * ID only (wildcard match).  Characters following the wildcard
 754		 * are ignored.
 755		 */
 756		wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
 757		len = (wildcard) ? wildcard - filter_entry : 8;
 758		if (0 == memcmp(ipvmid, filter_entry, len))
 759			return 0;
 760	}
 761	return 1;
 762}
 763
 764/**
 765 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
 766 * @path:	Pending path (struct iucv_path)
 767 * @ipvmid:	z/VM system identifier of originator
 768 * @ipuser:	User specified data for this path
 769 *		(AF_IUCV: port/service name and originator port)
 770 *
 771 * The function uses the @ipuser data to determine if the pending path belongs
 772 * to a terminal managed by this device driver.
 773 * If the path belongs to this driver, ensure that the terminal is not accessed
 774 * multiple times (only one connection to a terminal is allowed).
 775 * If the terminal is not yet connected, the pending path is accepted and is
 776 * associated to the appropriate struct hvc_iucv_private instance.
 777 *
 778 * Returns 0 if @path belongs to a terminal managed by the this device driver;
 779 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
 780 *
 781 * Locking:	struct hvc_iucv_private->lock
 782 */
 783static	int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
 784				  u8 *ipuser)
 785{
 786	struct hvc_iucv_private *priv, *tmp;
 787	u8 wildcard[9] = "lnxhvc  ";
 788	int i, rc, find_unused;
 789	u8 nuser_data[16];
 790	u8 vm_user_id[9];
 791
 792	ASCEBC(wildcard, sizeof(wildcard));
 793	find_unused = !memcmp(wildcard, ipuser, 8);
 794
 795	/* First, check if the pending path request is managed by this
 796	 * IUCV handler:
 797	 * - find a disconnected device if ipuser contains the wildcard
 798	 * - find the device that matches the terminal ID in ipuser
 799	 */
 800	priv = NULL;
 801	for (i = 0; i < hvc_iucv_devices; i++) {
 802		tmp = hvc_iucv_table[i];
 803		if (!tmp)
 804			continue;
 805
 806		if (find_unused) {
 807			spin_lock(&tmp->lock);
 808			if (tmp->iucv_state == IUCV_DISCONN)
 809				priv = tmp;
 810			spin_unlock(&tmp->lock);
 811
 812		} else if (!memcmp(tmp->srv_name, ipuser, 8))
 813				priv = tmp;
 814		if (priv)
 815			break;
 816	}
 817	if (!priv)
 818		return -ENODEV;
 819
 820	/* Enforce that ipvmid is allowed to connect to us */
 821	read_lock(&hvc_iucv_filter_lock);
 822	rc = hvc_iucv_filter_connreq(ipvmid);
 823	read_unlock(&hvc_iucv_filter_lock);
 824	if (rc) {
 825		iucv_path_sever(path, ipuser);
 826		iucv_path_free(path);
 827		memcpy(vm_user_id, ipvmid, 8);
 828		vm_user_id[8] = 0;
 829		pr_info("A connection request from z/VM user ID %s "
 830			"was refused\n", vm_user_id);
 831		return 0;
 832	}
 833
 834	spin_lock(&priv->lock);
 835
 836	/* If the terminal is already connected or being severed, then sever
 837	 * this path to enforce that there is only ONE established communication
 838	 * path per terminal. */
 839	if (priv->iucv_state != IUCV_DISCONN) {
 840		iucv_path_sever(path, ipuser);
 841		iucv_path_free(path);
 842		goto out_path_handled;
 843	}
 844
 845	/* accept path */
 846	memcpy(nuser_data, ipuser + 8, 8);  /* remote service (for af_iucv) */
 847	memcpy(nuser_data + 8, ipuser, 8);  /* local service  (for af_iucv) */
 848	path->msglim = 0xffff;		    /* IUCV MSGLIMIT */
 849	path->flags &= ~IUCV_IPRMDATA;	    /* TODO: use IUCV_IPRMDATA */
 850	rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
 851	if (rc) {
 852		iucv_path_sever(path, ipuser);
 853		iucv_path_free(path);
 854		goto out_path_handled;
 855	}
 856	priv->path = path;
 857	priv->iucv_state = IUCV_CONNECTED;
 858
 859	/* store path information */
 860	memcpy(priv->info_path, ipvmid, 8);
 861	memcpy(priv->info_path + 8, ipuser + 8, 8);
 862
 863	/* flush buffered output data... */
 864	schedule_delayed_work(&priv->sndbuf_work, 5);
 865
 866out_path_handled:
 867	spin_unlock(&priv->lock);
 868	return 0;
 869}
 870
 871/**
 872 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
 873 * @path:	Pending path (struct iucv_path)
 874 * @ipuser:	User specified data for this path
 875 *		(AF_IUCV: port/service name and originator port)
 876 *
 877 * This function calls the hvc_iucv_hangup() function for the
 878 * respective IUCV HVC terminal.
 879 *
 880 * Locking:	struct hvc_iucv_private->lock
 881 */
 882static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
 883{
 884	struct hvc_iucv_private *priv = path->private;
 885
 886	hvc_iucv_hangup(priv);
 887}
 888
 889/**
 890 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
 891 * @path:	Pending path (struct iucv_path)
 892 * @msg:	Pointer to the IUCV message
 893 *
 894 * The function puts an incoming message on the input queue for later
 895 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
 896 * If the tty has not yet been opened, the message is rejected.
 897 *
 898 * Locking:	struct hvc_iucv_private->lock
 899 */
 900static void hvc_iucv_msg_pending(struct iucv_path *path,
 901				 struct iucv_message *msg)
 902{
 903	struct hvc_iucv_private *priv = path->private;
 904	struct iucv_tty_buffer *rb;
 905
 906	/* reject messages that exceed max size of iucv_tty_msg->datalen */
 907	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
 908		iucv_message_reject(path, msg);
 909		return;
 910	}
 911
 912	spin_lock(&priv->lock);
 913
 914	/* reject messages if tty has not yet been opened */
 915	if (priv->tty_state == TTY_CLOSED) {
 916		iucv_message_reject(path, msg);
 917		goto unlock_return;
 918	}
 919
 920	/* allocate tty buffer to save iucv msg only */
 921	rb = alloc_tty_buffer(0, GFP_ATOMIC);
 922	if (!rb) {
 923		iucv_message_reject(path, msg);
 924		goto unlock_return;	/* -ENOMEM */
 925	}
 926	rb->msg = *msg;
 927
 928	list_add_tail(&rb->list, &priv->tty_inqueue);
 929
 930	hvc_kick();	/* wake up hvc thread */
 931
 932unlock_return:
 933	spin_unlock(&priv->lock);
 934}
 935
 936/**
 937 * hvc_iucv_msg_complete() - IUCV handler to process message completion
 938 * @path:	Pending path (struct iucv_path)
 939 * @msg:	Pointer to the IUCV message
 940 *
 941 * The function is called upon completion of message delivery to remove the
 942 * message from the outqueue. Additional delivery information can be found
 943 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
 944 *	       purged messages	 (0x010000 (IPADPGNR)).
 945 *
 946 * Locking:	struct hvc_iucv_private->lock
 947 */
 948static void hvc_iucv_msg_complete(struct iucv_path *path,
 949				  struct iucv_message *msg)
 950{
 951	struct hvc_iucv_private *priv = path->private;
 952	struct iucv_tty_buffer	*ent, *next;
 953	LIST_HEAD(list_remove);
 954
 955	spin_lock(&priv->lock);
 956	list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
 957		if (ent->msg.id == msg->id) {
 958			list_move(&ent->list, &list_remove);
 959			break;
 960		}
 961	wake_up(&priv->sndbuf_waitq);
 962	spin_unlock(&priv->lock);
 963	destroy_tty_buffer_list(&list_remove);
 964}
 965
 966static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
 967					struct device_attribute *attr,
 968					char *buf)
 969{
 970	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 971	size_t len;
 972
 973	len = sizeof(priv->srv_name);
 974	memcpy(buf, priv->srv_name, len);
 975	EBCASC(buf, len);
 976	buf[len++] = '\n';
 977	return len;
 978}
 979
 980static ssize_t hvc_iucv_dev_state_show(struct device *dev,
 981					struct device_attribute *attr,
 982					char *buf)
 983{
 984	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 985	return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
 986}
 987
 988static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
 989				      struct device_attribute *attr,
 990				      char *buf)
 991{
 992	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
 993	char vmid[9], ipuser[9];
 994
 995	memset(vmid, 0, sizeof(vmid));
 996	memset(ipuser, 0, sizeof(ipuser));
 997
 998	spin_lock_bh(&priv->lock);
 999	if (priv->iucv_state == IUCV_CONNECTED) {
1000		memcpy(vmid, priv->info_path, 8);
1001		memcpy(ipuser, priv->info_path + 8, 8);
1002	}
1003	spin_unlock_bh(&priv->lock);
1004	EBCASC(ipuser, 8);
1005
1006	return sprintf(buf, "%s:%s\n", vmid, ipuser);
1007}
1008
1009
1010/* HVC operations */
1011static const struct hv_ops hvc_iucv_ops = {
1012	.get_chars = hvc_iucv_get_chars,
1013	.put_chars = hvc_iucv_put_chars,
1014	.notifier_add = hvc_iucv_notifier_add,
1015	.notifier_del = hvc_iucv_notifier_del,
1016	.notifier_hangup = hvc_iucv_notifier_hangup,
1017	.dtr_rts = hvc_iucv_dtr_rts,
1018};
1019
1020/* IUCV HVC device attributes */
1021static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1022static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1023static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1024static struct attribute *hvc_iucv_dev_attrs[] = {
1025	&dev_attr_termid.attr,
1026	&dev_attr_state.attr,
1027	&dev_attr_peer.attr,
1028	NULL,
1029};
1030static struct attribute_group hvc_iucv_dev_attr_group = {
1031	.attrs = hvc_iucv_dev_attrs,
1032};
1033static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1034	&hvc_iucv_dev_attr_group,
1035	NULL,
1036};
1037
 
1038/**
1039 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
1040 * @id:			hvc_iucv_table index
1041 * @is_console:		Flag if the instance is used as Linux console
1042 *
1043 * This function allocates a new hvc_iucv_private structure and stores
1044 * the instance in hvc_iucv_table at index @id.
1045 * Returns 0 on success; otherwise non-zero.
1046 */
1047static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1048{
1049	struct hvc_iucv_private *priv;
1050	char name[9];
1051	int rc;
1052
1053	priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1054	if (!priv)
1055		return -ENOMEM;
1056
1057	spin_lock_init(&priv->lock);
1058	INIT_LIST_HEAD(&priv->tty_outqueue);
1059	INIT_LIST_HEAD(&priv->tty_inqueue);
1060	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1061	init_waitqueue_head(&priv->sndbuf_waitq);
1062
1063	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1064	if (!priv->sndbuf) {
1065		kfree(priv);
1066		return -ENOMEM;
1067	}
1068
1069	/* set console flag */
1070	priv->is_console = is_console;
1071
1072	/* allocate hvc device */
1073	priv->hvc = hvc_alloc(id, /*		 PAGE_SIZE */
1074			      id, &hvc_iucv_ops, 256);
1075	if (IS_ERR(priv->hvc)) {
1076		rc = PTR_ERR(priv->hvc);
1077		goto out_error_hvc;
1078	}
1079
1080	/* notify HVC thread instead of using polling */
1081	priv->hvc->irq_requested = 1;
1082
1083	/* setup iucv related information */
1084	snprintf(name, 9, "lnxhvc%-2d", id);
1085	memcpy(priv->srv_name, name, 8);
1086	ASCEBC(priv->srv_name, 8);
1087
1088	priv->dev = iucv_alloc_device(hvc_iucv_dev_attr_groups, NULL,
1089				      priv, "hvc_iucv%d", id);
1090	if (!priv->dev) {
1091		rc = -ENOMEM;
1092		goto out_error_dev;
1093	}
 
 
 
 
 
 
1094	rc = device_register(priv->dev);
1095	if (rc) {
1096		put_device(priv->dev);
1097		goto out_error_dev;
1098	}
1099
1100	hvc_iucv_table[id] = priv;
1101	return 0;
1102
1103out_error_dev:
1104	hvc_remove(priv->hvc);
1105out_error_hvc:
1106	free_page((unsigned long) priv->sndbuf);
1107	kfree(priv);
1108
1109	return rc;
1110}
1111
1112/**
1113 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1114 */
1115static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1116{
1117	hvc_remove(priv->hvc);
1118	device_unregister(priv->dev);
1119	free_page((unsigned long) priv->sndbuf);
1120	kfree(priv);
1121}
1122
1123/**
1124 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1125 * @filter:	String containing a comma-separated list of z/VM user IDs
1126 * @dest:	Location where to store the parsed z/VM user ID
1127 */
1128static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1129{
1130	const char *nextdelim, *residual;
1131	size_t len;
1132
1133	nextdelim = strchr(filter, ',');
1134	if (nextdelim) {
1135		len = nextdelim - filter;
1136		residual = nextdelim + 1;
1137	} else {
1138		len = strlen(filter);
1139		residual = filter + len;
1140	}
1141
1142	if (len == 0)
1143		return ERR_PTR(-EINVAL);
1144
1145	/* check for '\n' (if called from sysfs) */
1146	if (filter[len - 1] == '\n')
1147		len--;
1148
1149	/* prohibit filter entries containing the wildcard character only */
1150	if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1151		return ERR_PTR(-EINVAL);
1152
1153	if (len > 8)
1154		return ERR_PTR(-EINVAL);
1155
1156	/* pad with blanks and save upper case version of user ID */
1157	memset(dest, ' ', 8);
1158	while (len--)
1159		dest[len] = toupper(filter[len]);
1160	return residual;
1161}
1162
1163/**
1164 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1165 * @filter:	String consisting of a comma-separated list of z/VM user IDs
1166 *
1167 * The function parses the @filter string and creates an array containing
1168 * the list of z/VM user ID filter entries.
1169 * Return code 0 means success, -EINVAL if the filter is syntactically
1170 * incorrect, -ENOMEM if there was not enough memory to allocate the
1171 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1172 */
1173static int hvc_iucv_setup_filter(const char *val)
1174{
1175	const char *residual;
1176	int err;
1177	size_t size, count;
1178	void *array, *old_filter;
1179
1180	count = strlen(val);
1181	if (count == 0 || (count == 1 && val[0] == '\n')) {
1182		size  = 0;
1183		array = NULL;
1184		goto out_replace_filter;	/* clear filter */
1185	}
1186
1187	/* count user IDs in order to allocate sufficient memory */
1188	size = 1;
1189	residual = val;
1190	while ((residual = strchr(residual, ',')) != NULL) {
1191		residual++;
1192		size++;
1193	}
1194
1195	/* check if the specified list exceeds the filter limit */
1196	if (size > MAX_VMID_FILTER)
1197		return -ENOSPC;
1198
1199	array = kcalloc(size, 8, GFP_KERNEL);
1200	if (!array)
1201		return -ENOMEM;
1202
1203	count = size;
1204	residual = val;
1205	while (*residual && count) {
1206		residual = hvc_iucv_parse_filter(residual,
1207						 array + ((size - count) * 8));
1208		if (IS_ERR(residual)) {
1209			err = PTR_ERR(residual);
1210			kfree(array);
1211			goto out_err;
1212		}
1213		count--;
1214	}
1215
1216out_replace_filter:
1217	write_lock_bh(&hvc_iucv_filter_lock);
1218	old_filter = hvc_iucv_filter;
1219	hvc_iucv_filter_size = size;
1220	hvc_iucv_filter = array;
1221	write_unlock_bh(&hvc_iucv_filter_lock);
1222	kfree(old_filter);
1223
1224	err = 0;
1225out_err:
1226	return err;
1227}
1228
1229/**
1230 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1231 * @val:	String consisting of a comma-separated list of z/VM user IDs
1232 * @kp:		Kernel parameter pointing to hvc_iucv_filter array
1233 *
1234 * The function sets up the z/VM user ID filter specified as comma-separated
1235 * list of user IDs in @val.
1236 * Note: If it is called early in the boot process, @val is stored and
1237 *	 parsed later in hvc_iucv_init().
1238 */
1239static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1240{
1241	int rc;
1242
1243	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1244		return -ENODEV;
1245
1246	if (!val)
1247		return -EINVAL;
1248
1249	rc = 0;
1250	if (slab_is_available())
1251		rc = hvc_iucv_setup_filter(val);
1252	else
1253		hvc_iucv_filter_string = val;	/* defer... */
1254	return rc;
1255}
1256
1257/**
1258 * param_get_vmidfilter() - Get z/VM user ID filter
1259 * @buffer:	Buffer to store z/VM user ID filter,
1260 *		(buffer size assumption PAGE_SIZE)
1261 * @kp:		Kernel parameter pointing to the hvc_iucv_filter array
1262 *
1263 * The function stores the filter as a comma-separated list of z/VM user IDs
1264 * in @buffer. Typically, sysfs routines call this function for attr show.
1265 */
1266static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1267{
1268	int rc;
1269	size_t index, len;
1270	void *start, *end;
1271
1272	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1273		return -ENODEV;
1274
1275	rc = 0;
1276	read_lock_bh(&hvc_iucv_filter_lock);
1277	for (index = 0; index < hvc_iucv_filter_size; index++) {
1278		start = hvc_iucv_filter + (8 * index);
1279		end   = memchr(start, ' ', 8);
1280		len   = (end) ? end - start : 8;
1281		memcpy(buffer + rc, start, len);
1282		rc += len;
1283		buffer[rc++] = ',';
1284	}
1285	read_unlock_bh(&hvc_iucv_filter_lock);
1286	if (rc)
1287		buffer[--rc] = '\0';	/* replace last comma and update rc */
1288	return rc;
1289}
1290
1291#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1292
1293static const struct kernel_param_ops param_ops_vmidfilter = {
1294	.set = param_set_vmidfilter,
1295	.get = param_get_vmidfilter,
1296};
1297
1298/**
1299 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1300 */
1301static int __init hvc_iucv_init(void)
1302{
1303	int rc;
1304	unsigned int i;
1305
1306	if (!hvc_iucv_devices)
1307		return -ENODEV;
1308
1309	if (!MACHINE_IS_VM) {
1310		pr_notice("The z/VM IUCV HVC device driver cannot "
1311			   "be used without z/VM\n");
1312		rc = -ENODEV;
1313		goto out_error;
1314	}
1315
1316	if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1317		pr_err("%lu is not a valid value for the hvc_iucv= "
1318			"kernel parameter\n", hvc_iucv_devices);
1319		rc = -EINVAL;
1320		goto out_error;
1321	}
1322
1323	/* parse hvc_iucv_allow string and create z/VM user ID filter list */
1324	if (hvc_iucv_filter_string) {
1325		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1326		switch (rc) {
1327		case 0:
1328			break;
1329		case -ENOMEM:
1330			pr_err("Allocating memory failed with "
1331				"reason code=%d\n", 3);
1332			goto out_error;
1333		case -EINVAL:
1334			pr_err("hvc_iucv_allow= does not specify a valid "
1335				"z/VM user ID list\n");
1336			goto out_error;
1337		case -ENOSPC:
1338			pr_err("hvc_iucv_allow= specifies too many "
1339				"z/VM user IDs\n");
1340			goto out_error;
1341		default:
1342			goto out_error;
1343		}
1344	}
1345
1346	hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1347					   sizeof(struct iucv_tty_buffer),
1348					   0, 0, NULL);
1349	if (!hvc_iucv_buffer_cache) {
1350		pr_err("Allocating memory failed with reason code=%d\n", 1);
1351		rc = -ENOMEM;
1352		goto out_error;
1353	}
1354
1355	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1356						    hvc_iucv_buffer_cache);
1357	if (!hvc_iucv_mempool) {
1358		pr_err("Allocating memory failed with reason code=%d\n", 2);
1359		kmem_cache_destroy(hvc_iucv_buffer_cache);
1360		rc = -ENOMEM;
1361		goto out_error;
1362	}
1363
1364	/* register the first terminal device as console
1365	 * (must be done before allocating hvc terminal devices) */
1366	rc = hvc_instantiate(0, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1367	if (rc) {
1368		pr_err("Registering HVC terminal device as "
1369		       "Linux console failed\n");
1370		goto out_error_memory;
1371	}
1372
1373	/* allocate hvc_iucv_private structs */
1374	for (i = 0; i < hvc_iucv_devices; i++) {
1375		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1376		if (rc) {
1377			pr_err("Creating a new HVC terminal device "
1378				"failed with error code=%d\n", rc);
1379			goto out_error_hvc;
1380		}
1381	}
1382
1383	/* register IUCV callback handler */
1384	rc = iucv_register(&hvc_iucv_handler, 0);
1385	if (rc) {
1386		pr_err("Registering IUCV handlers failed with error code=%d\n",
1387			rc);
1388		goto out_error_hvc;
1389	}
1390
1391	return 0;
1392
1393out_error_hvc:
1394	for (i = 0; i < hvc_iucv_devices; i++)
1395		if (hvc_iucv_table[i])
1396			hvc_iucv_destroy(hvc_iucv_table[i]);
1397out_error_memory:
1398	mempool_destroy(hvc_iucv_mempool);
1399	kmem_cache_destroy(hvc_iucv_buffer_cache);
1400out_error:
1401	kfree(hvc_iucv_filter);
1402	hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1403	return rc;
1404}
1405
1406/**
1407 * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
1408 * @val:	Parameter value (numeric)
1409 */
1410static	int __init hvc_iucv_config(char *val)
1411{
1412	if (kstrtoul(val, 10, &hvc_iucv_devices))
1413		pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
1414	return 1;
1415}
1416
1417
1418device_initcall(hvc_iucv_init);
1419__setup("hvc_iucv=", hvc_iucv_config);
1420core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);