Loading...
1/*
2 * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
3 *
4 * This HVC device driver provides terminal access using
5 * z/VM IUCV communication paths.
6 *
7 * Copyright IBM Corp. 2008, 2009
8 *
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */
11#define KMSG_COMPONENT "hvc_iucv"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <asm/ebcdic.h>
17#include <linux/ctype.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/mempool.h>
22#include <linux/moduleparam.h>
23#include <linux/tty.h>
24#include <linux/wait.h>
25#include <net/iucv/iucv.h>
26
27#include "hvc_console.h"
28
29
30/* General device driver settings */
31#define HVC_IUCV_MAGIC 0xc9e4c3e5
32#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
33#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34
35/* IUCV TTY message */
36#define MSG_VERSION 0x02 /* Message version */
37#define MSG_TYPE_ERROR 0x01 /* Error message */
38#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
39#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
40#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
41#define MSG_TYPE_DATA 0x10 /* Terminal data */
42
43struct iucv_tty_msg {
44 u8 version; /* Message version */
45 u8 type; /* Message type */
46#define MSG_MAX_DATALEN ((u16)(~0))
47 u16 datalen; /* Payload length */
48 u8 data[]; /* Payload buffer */
49} __attribute__((packed));
50#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
51
52enum iucv_state_t {
53 IUCV_DISCONN = 0,
54 IUCV_CONNECTED = 1,
55 IUCV_SEVERED = 2,
56};
57
58enum tty_state_t {
59 TTY_CLOSED = 0,
60 TTY_OPENED = 1,
61};
62
63struct hvc_iucv_private {
64 struct hvc_struct *hvc; /* HVC struct reference */
65 u8 srv_name[8]; /* IUCV service name (ebcdic) */
66 unsigned char is_console; /* Linux console usage flag */
67 enum iucv_state_t iucv_state; /* IUCV connection status */
68 enum tty_state_t tty_state; /* TTY status */
69 struct iucv_path *path; /* IUCV path pointer */
70 spinlock_t lock; /* hvc_iucv_private lock */
71#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
72 void *sndbuf; /* send buffer */
73 size_t sndbuf_len; /* length of send buffer */
74#define QUEUE_SNDBUF_DELAY (HZ / 25)
75 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
76 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
77 struct list_head tty_outqueue; /* outgoing IUCV messages */
78 struct list_head tty_inqueue; /* incoming IUCV messages */
79 struct device *dev; /* device structure */
80};
81
82struct iucv_tty_buffer {
83 struct list_head list; /* list pointer */
84 struct iucv_message msg; /* store an IUCV message */
85 size_t offset; /* data buffer offset */
86 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
87};
88
89/* IUCV callback handler */
90static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
91static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
92static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
93static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
94
95
96/* Kernel module parameter: use one terminal device as default */
97static unsigned long hvc_iucv_devices = 1;
98
99/* Array of allocated hvc iucv tty lines... */
100static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
101#define IUCV_HVC_CON_IDX (0)
102/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
103#define MAX_VMID_FILTER (500)
104static size_t hvc_iucv_filter_size;
105static void *hvc_iucv_filter;
106static const char *hvc_iucv_filter_string;
107static DEFINE_RWLOCK(hvc_iucv_filter_lock);
108
109/* Kmem cache and mempool for iucv_tty_buffer elements */
110static struct kmem_cache *hvc_iucv_buffer_cache;
111static mempool_t *hvc_iucv_mempool;
112
113/* IUCV handler callback functions */
114static struct iucv_handler hvc_iucv_handler = {
115 .path_pending = hvc_iucv_path_pending,
116 .path_severed = hvc_iucv_path_severed,
117 .message_complete = hvc_iucv_msg_complete,
118 .message_pending = hvc_iucv_msg_pending,
119};
120
121
122/**
123 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
124 * @num: The HVC virtual terminal number (vtermno)
125 *
126 * This function returns the struct hvc_iucv_private instance that corresponds
127 * to the HVC virtual terminal number specified as parameter @num.
128 */
129struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
130{
131 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
132 return NULL;
133 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
134}
135
136/**
137 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
138 * @size: Size of the internal buffer used to store data.
139 * @flags: Memory allocation flags passed to mempool.
140 *
141 * This function allocates a new struct iucv_tty_buffer element and, optionally,
142 * allocates an internal data buffer with the specified size @size.
143 * The internal data buffer is always allocated with GFP_DMA which is
144 * required for receiving and sending data with IUCV.
145 * Note: The total message size arises from the internal buffer size and the
146 * members of the iucv_tty_msg structure.
147 * The function returns NULL if memory allocation has failed.
148 */
149static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
150{
151 struct iucv_tty_buffer *bufp;
152
153 bufp = mempool_alloc(hvc_iucv_mempool, flags);
154 if (!bufp)
155 return NULL;
156 memset(bufp, 0, sizeof(*bufp));
157
158 if (size > 0) {
159 bufp->msg.length = MSG_SIZE(size);
160 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
161 if (!bufp->mbuf) {
162 mempool_free(bufp, hvc_iucv_mempool);
163 return NULL;
164 }
165 bufp->mbuf->version = MSG_VERSION;
166 bufp->mbuf->type = MSG_TYPE_DATA;
167 bufp->mbuf->datalen = (u16) size;
168 }
169 return bufp;
170}
171
172/**
173 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
174 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
175 */
176static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
177{
178 kfree(bufp->mbuf);
179 mempool_free(bufp, hvc_iucv_mempool);
180}
181
182/**
183 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
184 * @list: List containing struct iucv_tty_buffer elements.
185 */
186static void destroy_tty_buffer_list(struct list_head *list)
187{
188 struct iucv_tty_buffer *ent, *next;
189
190 list_for_each_entry_safe(ent, next, list, list) {
191 list_del(&ent->list);
192 destroy_tty_buffer(ent);
193 }
194}
195
196/**
197 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
198 * @priv: Pointer to struct hvc_iucv_private
199 * @buf: HVC buffer for writing received terminal data.
200 * @count: HVC buffer size.
201 * @has_more_data: Pointer to an int variable.
202 *
203 * The function picks up pending messages from the input queue and receives
204 * the message data that is then written to the specified buffer @buf.
205 * If the buffer size @count is less than the data message size, the
206 * message is kept on the input queue and @has_more_data is set to 1.
207 * If all message data has been written, the message is removed from
208 * the input queue.
209 *
210 * The function returns the number of bytes written to the terminal, zero if
211 * there are no pending data messages available or if there is no established
212 * IUCV path.
213 * If the IUCV path has been severed, then -EPIPE is returned to cause a
214 * hang up (that is issued by the HVC layer).
215 */
216static int hvc_iucv_write(struct hvc_iucv_private *priv,
217 char *buf, int count, int *has_more_data)
218{
219 struct iucv_tty_buffer *rb;
220 int written;
221 int rc;
222
223 /* immediately return if there is no IUCV connection */
224 if (priv->iucv_state == IUCV_DISCONN)
225 return 0;
226
227 /* if the IUCV path has been severed, return -EPIPE to inform the
228 * HVC layer to hang up the tty device. */
229 if (priv->iucv_state == IUCV_SEVERED)
230 return -EPIPE;
231
232 /* check if there are pending messages */
233 if (list_empty(&priv->tty_inqueue))
234 return 0;
235
236 /* receive an iucv message and flip data to the tty (ldisc) */
237 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
238
239 written = 0;
240 if (!rb->mbuf) { /* message not yet received ... */
241 /* allocate mem to store msg data; if no memory is available
242 * then leave the buffer on the list and re-try later */
243 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
244 if (!rb->mbuf)
245 return -ENOMEM;
246
247 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
248 rb->mbuf, rb->msg.length, NULL);
249 switch (rc) {
250 case 0: /* Successful */
251 break;
252 case 2: /* No message found */
253 case 9: /* Message purged */
254 break;
255 default:
256 written = -EIO;
257 }
258 /* remove buffer if an error has occurred or received data
259 * is not correct */
260 if (rc || (rb->mbuf->version != MSG_VERSION) ||
261 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
262 goto out_remove_buffer;
263 }
264
265 switch (rb->mbuf->type) {
266 case MSG_TYPE_DATA:
267 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
268 memcpy(buf, rb->mbuf->data + rb->offset, written);
269 if (written < (rb->mbuf->datalen - rb->offset)) {
270 rb->offset += written;
271 *has_more_data = 1;
272 goto out_written;
273 }
274 break;
275
276 case MSG_TYPE_WINSIZE:
277 if (rb->mbuf->datalen != sizeof(struct winsize))
278 break;
279 /* The caller must ensure that the hvc is locked, which
280 * is the case when called from hvc_iucv_get_chars() */
281 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
282 break;
283
284 case MSG_TYPE_ERROR: /* ignored ... */
285 case MSG_TYPE_TERMENV: /* ignored ... */
286 case MSG_TYPE_TERMIOS: /* ignored ... */
287 break;
288 }
289
290out_remove_buffer:
291 list_del(&rb->list);
292 destroy_tty_buffer(rb);
293 *has_more_data = !list_empty(&priv->tty_inqueue);
294
295out_written:
296 return written;
297}
298
299/**
300 * hvc_iucv_get_chars() - HVC get_chars operation.
301 * @vtermno: HVC virtual terminal number.
302 * @buf: Pointer to a buffer to store data
303 * @count: Size of buffer available for writing
304 *
305 * The HVC thread calls this method to read characters from the back-end.
306 * If an IUCV communication path has been established, pending IUCV messages
307 * are received and data is copied into buffer @buf up to @count bytes.
308 *
309 * Locking: The routine gets called under an irqsave() spinlock; and
310 * the routine locks the struct hvc_iucv_private->lock to call
311 * helper functions.
312 */
313static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
314{
315 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
316 int written;
317 int has_more_data;
318
319 if (count <= 0)
320 return 0;
321
322 if (!priv)
323 return -ENODEV;
324
325 spin_lock(&priv->lock);
326 has_more_data = 0;
327 written = hvc_iucv_write(priv, buf, count, &has_more_data);
328 spin_unlock(&priv->lock);
329
330 /* if there are still messages on the queue... schedule another run */
331 if (has_more_data)
332 hvc_kick();
333
334 return written;
335}
336
337/**
338 * hvc_iucv_queue() - Buffer terminal data for sending.
339 * @priv: Pointer to struct hvc_iucv_private instance.
340 * @buf: Buffer containing data to send.
341 * @count: Size of buffer and amount of data to send.
342 *
343 * The function queues data for sending. To actually send the buffered data,
344 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
345 * The function returns the number of data bytes that has been buffered.
346 *
347 * If the device is not connected, data is ignored and the function returns
348 * @count.
349 * If the buffer is full, the function returns 0.
350 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
351 * (that can be passed to HVC layer to cause a tty hangup).
352 */
353static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
354 int count)
355{
356 size_t len;
357
358 if (priv->iucv_state == IUCV_DISCONN)
359 return count; /* ignore data */
360
361 if (priv->iucv_state == IUCV_SEVERED)
362 return -EPIPE;
363
364 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
365 if (!len)
366 return 0;
367
368 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
369 priv->sndbuf_len += len;
370
371 if (priv->iucv_state == IUCV_CONNECTED)
372 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
373
374 return len;
375}
376
377/**
378 * hvc_iucv_send() - Send an IUCV message containing terminal data.
379 * @priv: Pointer to struct hvc_iucv_private instance.
380 *
381 * If an IUCV communication path has been established, the buffered output data
382 * is sent via an IUCV message and the number of bytes sent is returned.
383 * Returns 0 if there is no established IUCV communication path or
384 * -EPIPE if an existing IUCV communicaton path has been severed.
385 */
386static int hvc_iucv_send(struct hvc_iucv_private *priv)
387{
388 struct iucv_tty_buffer *sb;
389 int rc, len;
390
391 if (priv->iucv_state == IUCV_SEVERED)
392 return -EPIPE;
393
394 if (priv->iucv_state == IUCV_DISCONN)
395 return -EIO;
396
397 if (!priv->sndbuf_len)
398 return 0;
399
400 /* allocate internal buffer to store msg data and also compute total
401 * message length */
402 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
403 if (!sb)
404 return -ENOMEM;
405
406 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
407 sb->mbuf->datalen = (u16) priv->sndbuf_len;
408 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
409
410 list_add_tail(&sb->list, &priv->tty_outqueue);
411
412 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
413 (void *) sb->mbuf, sb->msg.length);
414 if (rc) {
415 /* drop the message here; however we might want to handle
416 * 0x03 (msg limit reached) by trying again... */
417 list_del(&sb->list);
418 destroy_tty_buffer(sb);
419 }
420 len = priv->sndbuf_len;
421 priv->sndbuf_len = 0;
422
423 return len;
424}
425
426/**
427 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
428 * @work: Work structure.
429 *
430 * This work queue function sends buffered output data over IUCV and,
431 * if not all buffered data could be sent, reschedules itself.
432 */
433static void hvc_iucv_sndbuf_work(struct work_struct *work)
434{
435 struct hvc_iucv_private *priv;
436
437 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
438 if (!priv)
439 return;
440
441 spin_lock_bh(&priv->lock);
442 hvc_iucv_send(priv);
443 spin_unlock_bh(&priv->lock);
444}
445
446/**
447 * hvc_iucv_put_chars() - HVC put_chars operation.
448 * @vtermno: HVC virtual terminal number.
449 * @buf: Pointer to an buffer to read data from
450 * @count: Size of buffer available for reading
451 *
452 * The HVC thread calls this method to write characters to the back-end.
453 * The function calls hvc_iucv_queue() to queue terminal data for sending.
454 *
455 * Locking: The method gets called under an irqsave() spinlock; and
456 * locks struct hvc_iucv_private->lock.
457 */
458static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
459{
460 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
461 int queued;
462
463 if (count <= 0)
464 return 0;
465
466 if (!priv)
467 return -ENODEV;
468
469 spin_lock(&priv->lock);
470 queued = hvc_iucv_queue(priv, buf, count);
471 spin_unlock(&priv->lock);
472
473 return queued;
474}
475
476/**
477 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
478 * @hp: Pointer to the HVC device (struct hvc_struct)
479 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
480 * hvc_iucv_private instance.
481 *
482 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
483 * instance that is derived from @id. Always returns 0.
484 *
485 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
486 */
487static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
488{
489 struct hvc_iucv_private *priv;
490
491 priv = hvc_iucv_get_private(id);
492 if (!priv)
493 return 0;
494
495 spin_lock_bh(&priv->lock);
496 priv->tty_state = TTY_OPENED;
497 spin_unlock_bh(&priv->lock);
498
499 return 0;
500}
501
502/**
503 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
504 * @priv: Pointer to the struct hvc_iucv_private instance.
505 */
506static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
507{
508 destroy_tty_buffer_list(&priv->tty_outqueue);
509 destroy_tty_buffer_list(&priv->tty_inqueue);
510
511 priv->tty_state = TTY_CLOSED;
512 priv->iucv_state = IUCV_DISCONN;
513
514 priv->sndbuf_len = 0;
515}
516
517/**
518 * tty_outqueue_empty() - Test if the tty outq is empty
519 * @priv: Pointer to struct hvc_iucv_private instance.
520 */
521static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
522{
523 int rc;
524
525 spin_lock_bh(&priv->lock);
526 rc = list_empty(&priv->tty_outqueue);
527 spin_unlock_bh(&priv->lock);
528
529 return rc;
530}
531
532/**
533 * flush_sndbuf_sync() - Flush send buffer and wait for completion
534 * @priv: Pointer to struct hvc_iucv_private instance.
535 *
536 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
537 * to flush any buffered terminal output data and waits for completion.
538 */
539static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
540{
541 int sync_wait;
542
543 cancel_delayed_work_sync(&priv->sndbuf_work);
544
545 spin_lock_bh(&priv->lock);
546 hvc_iucv_send(priv); /* force sending buffered data */
547 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
548 spin_unlock_bh(&priv->lock);
549
550 if (sync_wait)
551 wait_event_timeout(priv->sndbuf_waitq,
552 tty_outqueue_empty(priv), HZ/10);
553}
554
555/**
556 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
557 * @priv: Pointer to hvc_iucv_private structure
558 *
559 * This routine severs an existing IUCV communication path and hangs
560 * up the underlying HVC terminal device.
561 * The hang-up occurs only if an IUCV communication path is established;
562 * otherwise there is no need to hang up the terminal device.
563 *
564 * The IUCV HVC hang-up is separated into two steps:
565 * 1. After the IUCV path has been severed, the iucv_state is set to
566 * IUCV_SEVERED.
567 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
568 * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
569 *
570 * If the tty has not yet been opened, clean up the hvc_iucv_private
571 * structure to allow re-connects.
572 * If the tty has been opened, let get_chars() return -EPIPE to signal
573 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
574 * to call get_chars()...
575 *
576 * Special notes on hanging up a HVC terminal instantiated as console:
577 * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
578 * 2. do_tty_hangup() calls tty->ops->close() for console_filp
579 * => no hangup notifier is called by HVC (default)
580 * 2. hvc_close() returns because of tty_hung_up_p(filp)
581 * => no delete notifier is called!
582 * Finally, the back-end is not being notified, thus, the tty session is
583 * kept active (TTY_OPEN) to be ready for re-connects.
584 *
585 * Locking: spin_lock(&priv->lock) w/o disabling bh
586 */
587static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
588{
589 struct iucv_path *path;
590
591 path = NULL;
592 spin_lock(&priv->lock);
593 if (priv->iucv_state == IUCV_CONNECTED) {
594 path = priv->path;
595 priv->path = NULL;
596 priv->iucv_state = IUCV_SEVERED;
597 if (priv->tty_state == TTY_CLOSED)
598 hvc_iucv_cleanup(priv);
599 else
600 /* console is special (see above) */
601 if (priv->is_console) {
602 hvc_iucv_cleanup(priv);
603 priv->tty_state = TTY_OPENED;
604 } else
605 hvc_kick();
606 }
607 spin_unlock(&priv->lock);
608
609 /* finally sever path (outside of priv->lock due to lock ordering) */
610 if (path) {
611 iucv_path_sever(path, NULL);
612 iucv_path_free(path);
613 }
614}
615
616/**
617 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
618 * @hp: Pointer to the HVC device (struct hvc_struct)
619 * @id: Additional data (originally passed to hvc_alloc):
620 * the index of an struct hvc_iucv_private instance.
621 *
622 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
623 * virtual or otherwise) has occurred.
624 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
625 * to keep an existing IUCV communication path established.
626 * (Background: vhangup() is called from user space (by getty or login) to
627 * disable writing to the tty by other applications).
628 * If the tty has been opened and an established IUCV path has been severed
629 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
630 *
631 * Locking: struct hvc_iucv_private->lock
632 */
633static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
634{
635 struct hvc_iucv_private *priv;
636
637 priv = hvc_iucv_get_private(id);
638 if (!priv)
639 return;
640
641 flush_sndbuf_sync(priv);
642
643 spin_lock_bh(&priv->lock);
644 /* NOTE: If the hangup was scheduled by ourself (from the iucv
645 * path_servered callback [IUCV_SEVERED]), we have to clean up
646 * our structure and to set state to TTY_CLOSED.
647 * If the tty was hung up otherwise (e.g. vhangup()), then we
648 * ignore this hangup and keep an established IUCV path open...
649 * (...the reason is that we are not able to connect back to the
650 * client if we disconnect on hang up) */
651 priv->tty_state = TTY_CLOSED;
652
653 if (priv->iucv_state == IUCV_SEVERED)
654 hvc_iucv_cleanup(priv);
655 spin_unlock_bh(&priv->lock);
656}
657
658/**
659 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
660 * @hp: Pointer to the HVC device (struct hvc_struct)
661 * @id: Additional data (originally passed to hvc_alloc):
662 * the index of an struct hvc_iucv_private instance.
663 *
664 * This routine notifies the HVC back-end that the last tty device fd has been
665 * closed. The function calls hvc_iucv_cleanup() to clean up the struct
666 * hvc_iucv_private instance.
667 *
668 * Locking: struct hvc_iucv_private->lock
669 */
670static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
671{
672 struct hvc_iucv_private *priv;
673 struct iucv_path *path;
674
675 priv = hvc_iucv_get_private(id);
676 if (!priv)
677 return;
678
679 flush_sndbuf_sync(priv);
680
681 spin_lock_bh(&priv->lock);
682 path = priv->path; /* save reference to IUCV path */
683 priv->path = NULL;
684 hvc_iucv_cleanup(priv);
685 spin_unlock_bh(&priv->lock);
686
687 /* sever IUCV path outside of priv->lock due to lock ordering of:
688 * priv->lock <--> iucv_table_lock */
689 if (path) {
690 iucv_path_sever(path, NULL);
691 iucv_path_free(path);
692 }
693}
694
695/**
696 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
697 * @ipvmid: Originating z/VM user ID (right padded with blanks)
698 *
699 * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
700 * non-zero.
701 */
702static int hvc_iucv_filter_connreq(u8 ipvmid[8])
703{
704 size_t i;
705
706 /* Note: default policy is ACCEPT if no filter is set */
707 if (!hvc_iucv_filter_size)
708 return 0;
709
710 for (i = 0; i < hvc_iucv_filter_size; i++)
711 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
712 return 0;
713 return 1;
714}
715
716/**
717 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
718 * @path: Pending path (struct iucv_path)
719 * @ipvmid: z/VM system identifier of originator
720 * @ipuser: User specified data for this path
721 * (AF_IUCV: port/service name and originator port)
722 *
723 * The function uses the @ipuser data to determine if the pending path belongs
724 * to a terminal managed by this device driver.
725 * If the path belongs to this driver, ensure that the terminal is not accessed
726 * multiple times (only one connection to a terminal is allowed).
727 * If the terminal is not yet connected, the pending path is accepted and is
728 * associated to the appropriate struct hvc_iucv_private instance.
729 *
730 * Returns 0 if @path belongs to a terminal managed by the this device driver;
731 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
732 *
733 * Locking: struct hvc_iucv_private->lock
734 */
735static int hvc_iucv_path_pending(struct iucv_path *path,
736 u8 ipvmid[8], u8 ipuser[16])
737{
738 struct hvc_iucv_private *priv;
739 u8 nuser_data[16];
740 u8 vm_user_id[9];
741 int i, rc;
742
743 priv = NULL;
744 for (i = 0; i < hvc_iucv_devices; i++)
745 if (hvc_iucv_table[i] &&
746 (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
747 priv = hvc_iucv_table[i];
748 break;
749 }
750 if (!priv)
751 return -ENODEV;
752
753 /* Enforce that ipvmid is allowed to connect to us */
754 read_lock(&hvc_iucv_filter_lock);
755 rc = hvc_iucv_filter_connreq(ipvmid);
756 read_unlock(&hvc_iucv_filter_lock);
757 if (rc) {
758 iucv_path_sever(path, ipuser);
759 iucv_path_free(path);
760 memcpy(vm_user_id, ipvmid, 8);
761 vm_user_id[8] = 0;
762 pr_info("A connection request from z/VM user ID %s "
763 "was refused\n", vm_user_id);
764 return 0;
765 }
766
767 spin_lock(&priv->lock);
768
769 /* If the terminal is already connected or being severed, then sever
770 * this path to enforce that there is only ONE established communication
771 * path per terminal. */
772 if (priv->iucv_state != IUCV_DISCONN) {
773 iucv_path_sever(path, ipuser);
774 iucv_path_free(path);
775 goto out_path_handled;
776 }
777
778 /* accept path */
779 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
780 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
781 path->msglim = 0xffff; /* IUCV MSGLIMIT */
782 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
783 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
784 if (rc) {
785 iucv_path_sever(path, ipuser);
786 iucv_path_free(path);
787 goto out_path_handled;
788 }
789 priv->path = path;
790 priv->iucv_state = IUCV_CONNECTED;
791
792 /* flush buffered output data... */
793 schedule_delayed_work(&priv->sndbuf_work, 5);
794
795out_path_handled:
796 spin_unlock(&priv->lock);
797 return 0;
798}
799
800/**
801 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
802 * @path: Pending path (struct iucv_path)
803 * @ipuser: User specified data for this path
804 * (AF_IUCV: port/service name and originator port)
805 *
806 * This function calls the hvc_iucv_hangup() function for the
807 * respective IUCV HVC terminal.
808 *
809 * Locking: struct hvc_iucv_private->lock
810 */
811static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
812{
813 struct hvc_iucv_private *priv = path->private;
814
815 hvc_iucv_hangup(priv);
816}
817
818/**
819 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
820 * @path: Pending path (struct iucv_path)
821 * @msg: Pointer to the IUCV message
822 *
823 * The function puts an incoming message on the input queue for later
824 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
825 * If the tty has not yet been opened, the message is rejected.
826 *
827 * Locking: struct hvc_iucv_private->lock
828 */
829static void hvc_iucv_msg_pending(struct iucv_path *path,
830 struct iucv_message *msg)
831{
832 struct hvc_iucv_private *priv = path->private;
833 struct iucv_tty_buffer *rb;
834
835 /* reject messages that exceed max size of iucv_tty_msg->datalen */
836 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
837 iucv_message_reject(path, msg);
838 return;
839 }
840
841 spin_lock(&priv->lock);
842
843 /* reject messages if tty has not yet been opened */
844 if (priv->tty_state == TTY_CLOSED) {
845 iucv_message_reject(path, msg);
846 goto unlock_return;
847 }
848
849 /* allocate tty buffer to save iucv msg only */
850 rb = alloc_tty_buffer(0, GFP_ATOMIC);
851 if (!rb) {
852 iucv_message_reject(path, msg);
853 goto unlock_return; /* -ENOMEM */
854 }
855 rb->msg = *msg;
856
857 list_add_tail(&rb->list, &priv->tty_inqueue);
858
859 hvc_kick(); /* wake up hvc thread */
860
861unlock_return:
862 spin_unlock(&priv->lock);
863}
864
865/**
866 * hvc_iucv_msg_complete() - IUCV handler to process message completion
867 * @path: Pending path (struct iucv_path)
868 * @msg: Pointer to the IUCV message
869 *
870 * The function is called upon completion of message delivery to remove the
871 * message from the outqueue. Additional delivery information can be found
872 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
873 * purged messages (0x010000 (IPADPGNR)).
874 *
875 * Locking: struct hvc_iucv_private->lock
876 */
877static void hvc_iucv_msg_complete(struct iucv_path *path,
878 struct iucv_message *msg)
879{
880 struct hvc_iucv_private *priv = path->private;
881 struct iucv_tty_buffer *ent, *next;
882 LIST_HEAD(list_remove);
883
884 spin_lock(&priv->lock);
885 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
886 if (ent->msg.id == msg->id) {
887 list_move(&ent->list, &list_remove);
888 break;
889 }
890 wake_up(&priv->sndbuf_waitq);
891 spin_unlock(&priv->lock);
892 destroy_tty_buffer_list(&list_remove);
893}
894
895/**
896 * hvc_iucv_pm_freeze() - Freeze PM callback
897 * @dev: IUVC HVC terminal device
898 *
899 * Sever an established IUCV communication path and
900 * trigger a hang-up of the underlying HVC terminal.
901 */
902static int hvc_iucv_pm_freeze(struct device *dev)
903{
904 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
905
906 local_bh_disable();
907 hvc_iucv_hangup(priv);
908 local_bh_enable();
909
910 return 0;
911}
912
913/**
914 * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
915 * @dev: IUVC HVC terminal device
916 *
917 * Wake up the HVC thread to trigger hang-up and respective
918 * HVC back-end notifier invocations.
919 */
920static int hvc_iucv_pm_restore_thaw(struct device *dev)
921{
922 hvc_kick();
923 return 0;
924}
925
926
927/* HVC operations */
928static const struct hv_ops hvc_iucv_ops = {
929 .get_chars = hvc_iucv_get_chars,
930 .put_chars = hvc_iucv_put_chars,
931 .notifier_add = hvc_iucv_notifier_add,
932 .notifier_del = hvc_iucv_notifier_del,
933 .notifier_hangup = hvc_iucv_notifier_hangup,
934};
935
936/* Suspend / resume device operations */
937static const struct dev_pm_ops hvc_iucv_pm_ops = {
938 .freeze = hvc_iucv_pm_freeze,
939 .thaw = hvc_iucv_pm_restore_thaw,
940 .restore = hvc_iucv_pm_restore_thaw,
941};
942
943/* IUCV HVC device driver */
944static struct device_driver hvc_iucv_driver = {
945 .name = KMSG_COMPONENT,
946 .bus = &iucv_bus,
947 .pm = &hvc_iucv_pm_ops,
948};
949
950/**
951 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
952 * @id: hvc_iucv_table index
953 * @is_console: Flag if the instance is used as Linux console
954 *
955 * This function allocates a new hvc_iucv_private structure and stores
956 * the instance in hvc_iucv_table at index @id.
957 * Returns 0 on success; otherwise non-zero.
958 */
959static int __init hvc_iucv_alloc(int id, unsigned int is_console)
960{
961 struct hvc_iucv_private *priv;
962 char name[9];
963 int rc;
964
965 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
966 if (!priv)
967 return -ENOMEM;
968
969 spin_lock_init(&priv->lock);
970 INIT_LIST_HEAD(&priv->tty_outqueue);
971 INIT_LIST_HEAD(&priv->tty_inqueue);
972 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
973 init_waitqueue_head(&priv->sndbuf_waitq);
974
975 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
976 if (!priv->sndbuf) {
977 kfree(priv);
978 return -ENOMEM;
979 }
980
981 /* set console flag */
982 priv->is_console = is_console;
983
984 /* allocate hvc device */
985 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
986 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
987 if (IS_ERR(priv->hvc)) {
988 rc = PTR_ERR(priv->hvc);
989 goto out_error_hvc;
990 }
991
992 /* notify HVC thread instead of using polling */
993 priv->hvc->irq_requested = 1;
994
995 /* setup iucv related information */
996 snprintf(name, 9, "lnxhvc%-2d", id);
997 memcpy(priv->srv_name, name, 8);
998 ASCEBC(priv->srv_name, 8);
999
1000 /* create and setup device */
1001 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1002 if (!priv->dev) {
1003 rc = -ENOMEM;
1004 goto out_error_dev;
1005 }
1006 dev_set_name(priv->dev, "hvc_iucv%d", id);
1007 dev_set_drvdata(priv->dev, priv);
1008 priv->dev->bus = &iucv_bus;
1009 priv->dev->parent = iucv_root;
1010 priv->dev->driver = &hvc_iucv_driver;
1011 priv->dev->release = (void (*)(struct device *)) kfree;
1012 rc = device_register(priv->dev);
1013 if (rc) {
1014 put_device(priv->dev);
1015 goto out_error_dev;
1016 }
1017
1018 hvc_iucv_table[id] = priv;
1019 return 0;
1020
1021out_error_dev:
1022 hvc_remove(priv->hvc);
1023out_error_hvc:
1024 free_page((unsigned long) priv->sndbuf);
1025 kfree(priv);
1026
1027 return rc;
1028}
1029
1030/**
1031 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1032 */
1033static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1034{
1035 hvc_remove(priv->hvc);
1036 device_unregister(priv->dev);
1037 free_page((unsigned long) priv->sndbuf);
1038 kfree(priv);
1039}
1040
1041/**
1042 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1043 * @filter: String containing a comma-separated list of z/VM user IDs
1044 */
1045static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1046{
1047 const char *nextdelim, *residual;
1048 size_t len;
1049
1050 nextdelim = strchr(filter, ',');
1051 if (nextdelim) {
1052 len = nextdelim - filter;
1053 residual = nextdelim + 1;
1054 } else {
1055 len = strlen(filter);
1056 residual = filter + len;
1057 }
1058
1059 if (len == 0)
1060 return ERR_PTR(-EINVAL);
1061
1062 /* check for '\n' (if called from sysfs) */
1063 if (filter[len - 1] == '\n')
1064 len--;
1065
1066 if (len > 8)
1067 return ERR_PTR(-EINVAL);
1068
1069 /* pad with blanks and save upper case version of user ID */
1070 memset(dest, ' ', 8);
1071 while (len--)
1072 dest[len] = toupper(filter[len]);
1073 return residual;
1074}
1075
1076/**
1077 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1078 * @filter: String consisting of a comma-separated list of z/VM user IDs
1079 *
1080 * The function parses the @filter string and creates an array containing
1081 * the list of z/VM user ID filter entries.
1082 * Return code 0 means success, -EINVAL if the filter is syntactically
1083 * incorrect, -ENOMEM if there was not enough memory to allocate the
1084 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1085 */
1086static int hvc_iucv_setup_filter(const char *val)
1087{
1088 const char *residual;
1089 int err;
1090 size_t size, count;
1091 void *array, *old_filter;
1092
1093 count = strlen(val);
1094 if (count == 0 || (count == 1 && val[0] == '\n')) {
1095 size = 0;
1096 array = NULL;
1097 goto out_replace_filter; /* clear filter */
1098 }
1099
1100 /* count user IDs in order to allocate sufficient memory */
1101 size = 1;
1102 residual = val;
1103 while ((residual = strchr(residual, ',')) != NULL) {
1104 residual++;
1105 size++;
1106 }
1107
1108 /* check if the specified list exceeds the filter limit */
1109 if (size > MAX_VMID_FILTER)
1110 return -ENOSPC;
1111
1112 array = kzalloc(size * 8, GFP_KERNEL);
1113 if (!array)
1114 return -ENOMEM;
1115
1116 count = size;
1117 residual = val;
1118 while (*residual && count) {
1119 residual = hvc_iucv_parse_filter(residual,
1120 array + ((size - count) * 8));
1121 if (IS_ERR(residual)) {
1122 err = PTR_ERR(residual);
1123 kfree(array);
1124 goto out_err;
1125 }
1126 count--;
1127 }
1128
1129out_replace_filter:
1130 write_lock_bh(&hvc_iucv_filter_lock);
1131 old_filter = hvc_iucv_filter;
1132 hvc_iucv_filter_size = size;
1133 hvc_iucv_filter = array;
1134 write_unlock_bh(&hvc_iucv_filter_lock);
1135 kfree(old_filter);
1136
1137 err = 0;
1138out_err:
1139 return err;
1140}
1141
1142/**
1143 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1144 * @val: String consisting of a comma-separated list of z/VM user IDs
1145 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1146 *
1147 * The function sets up the z/VM user ID filter specified as comma-separated
1148 * list of user IDs in @val.
1149 * Note: If it is called early in the boot process, @val is stored and
1150 * parsed later in hvc_iucv_init().
1151 */
1152static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1153{
1154 int rc;
1155
1156 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1157 return -ENODEV;
1158
1159 if (!val)
1160 return -EINVAL;
1161
1162 rc = 0;
1163 if (slab_is_available())
1164 rc = hvc_iucv_setup_filter(val);
1165 else
1166 hvc_iucv_filter_string = val; /* defer... */
1167 return rc;
1168}
1169
1170/**
1171 * param_get_vmidfilter() - Get z/VM user ID filter
1172 * @buffer: Buffer to store z/VM user ID filter,
1173 * (buffer size assumption PAGE_SIZE)
1174 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1175 *
1176 * The function stores the filter as a comma-separated list of z/VM user IDs
1177 * in @buffer. Typically, sysfs routines call this function for attr show.
1178 */
1179static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1180{
1181 int rc;
1182 size_t index, len;
1183 void *start, *end;
1184
1185 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1186 return -ENODEV;
1187
1188 rc = 0;
1189 read_lock_bh(&hvc_iucv_filter_lock);
1190 for (index = 0; index < hvc_iucv_filter_size; index++) {
1191 start = hvc_iucv_filter + (8 * index);
1192 end = memchr(start, ' ', 8);
1193 len = (end) ? end - start : 8;
1194 memcpy(buffer + rc, start, len);
1195 rc += len;
1196 buffer[rc++] = ',';
1197 }
1198 read_unlock_bh(&hvc_iucv_filter_lock);
1199 if (rc)
1200 buffer[--rc] = '\0'; /* replace last comma and update rc */
1201 return rc;
1202}
1203
1204#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1205
1206static struct kernel_param_ops param_ops_vmidfilter = {
1207 .set = param_set_vmidfilter,
1208 .get = param_get_vmidfilter,
1209};
1210
1211/**
1212 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1213 */
1214static int __init hvc_iucv_init(void)
1215{
1216 int rc;
1217 unsigned int i;
1218
1219 if (!hvc_iucv_devices)
1220 return -ENODEV;
1221
1222 if (!MACHINE_IS_VM) {
1223 pr_notice("The z/VM IUCV HVC device driver cannot "
1224 "be used without z/VM\n");
1225 rc = -ENODEV;
1226 goto out_error;
1227 }
1228
1229 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1230 pr_err("%lu is not a valid value for the hvc_iucv= "
1231 "kernel parameter\n", hvc_iucv_devices);
1232 rc = -EINVAL;
1233 goto out_error;
1234 }
1235
1236 /* register IUCV HVC device driver */
1237 rc = driver_register(&hvc_iucv_driver);
1238 if (rc)
1239 goto out_error;
1240
1241 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1242 if (hvc_iucv_filter_string) {
1243 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1244 switch (rc) {
1245 case 0:
1246 break;
1247 case -ENOMEM:
1248 pr_err("Allocating memory failed with "
1249 "reason code=%d\n", 3);
1250 goto out_error;
1251 case -EINVAL:
1252 pr_err("hvc_iucv_allow= does not specify a valid "
1253 "z/VM user ID list\n");
1254 goto out_error;
1255 case -ENOSPC:
1256 pr_err("hvc_iucv_allow= specifies too many "
1257 "z/VM user IDs\n");
1258 goto out_error;
1259 default:
1260 goto out_error;
1261 }
1262 }
1263
1264 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1265 sizeof(struct iucv_tty_buffer),
1266 0, 0, NULL);
1267 if (!hvc_iucv_buffer_cache) {
1268 pr_err("Allocating memory failed with reason code=%d\n", 1);
1269 rc = -ENOMEM;
1270 goto out_error;
1271 }
1272
1273 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1274 hvc_iucv_buffer_cache);
1275 if (!hvc_iucv_mempool) {
1276 pr_err("Allocating memory failed with reason code=%d\n", 2);
1277 kmem_cache_destroy(hvc_iucv_buffer_cache);
1278 rc = -ENOMEM;
1279 goto out_error;
1280 }
1281
1282 /* register the first terminal device as console
1283 * (must be done before allocating hvc terminal devices) */
1284 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1285 if (rc) {
1286 pr_err("Registering HVC terminal device as "
1287 "Linux console failed\n");
1288 goto out_error_memory;
1289 }
1290
1291 /* allocate hvc_iucv_private structs */
1292 for (i = 0; i < hvc_iucv_devices; i++) {
1293 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1294 if (rc) {
1295 pr_err("Creating a new HVC terminal device "
1296 "failed with error code=%d\n", rc);
1297 goto out_error_hvc;
1298 }
1299 }
1300
1301 /* register IUCV callback handler */
1302 rc = iucv_register(&hvc_iucv_handler, 0);
1303 if (rc) {
1304 pr_err("Registering IUCV handlers failed with error code=%d\n",
1305 rc);
1306 goto out_error_hvc;
1307 }
1308
1309 return 0;
1310
1311out_error_hvc:
1312 for (i = 0; i < hvc_iucv_devices; i++)
1313 if (hvc_iucv_table[i])
1314 hvc_iucv_destroy(hvc_iucv_table[i]);
1315out_error_memory:
1316 mempool_destroy(hvc_iucv_mempool);
1317 kmem_cache_destroy(hvc_iucv_buffer_cache);
1318out_error:
1319 if (hvc_iucv_filter)
1320 kfree(hvc_iucv_filter);
1321 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1322 return rc;
1323}
1324
1325/**
1326 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
1327 * @val: Parameter value (numeric)
1328 */
1329static int __init hvc_iucv_config(char *val)
1330{
1331 return strict_strtoul(val, 10, &hvc_iucv_devices);
1332}
1333
1334
1335device_initcall(hvc_iucv_init);
1336__setup("hvc_iucv=", hvc_iucv_config);
1337core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * z/VM IUCV hypervisor console (HVC) device driver
4 *
5 * This HVC device driver provides terminal access using
6 * z/VM IUCV communication paths.
7 *
8 * Copyright IBM Corp. 2008, 2013
9 *
10 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
11 */
12#define KMSG_COMPONENT "hvc_iucv"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <asm/ebcdic.h>
18#include <linux/ctype.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/init.h>
22#include <linux/mempool.h>
23#include <linux/moduleparam.h>
24#include <linux/tty.h>
25#include <linux/wait.h>
26#include <net/iucv/iucv.h>
27
28#include "hvc_console.h"
29
30
31/* General device driver settings */
32#define HVC_IUCV_MAGIC 0xc9e4c3e5
33#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
34#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
35
36/* IUCV TTY message */
37#define MSG_VERSION 0x02 /* Message version */
38#define MSG_TYPE_ERROR 0x01 /* Error message */
39#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
40#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
41#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
42#define MSG_TYPE_DATA 0x10 /* Terminal data */
43
44struct iucv_tty_msg {
45 u8 version; /* Message version */
46 u8 type; /* Message type */
47#define MSG_MAX_DATALEN ((u16)(~0))
48 u16 datalen; /* Payload length */
49 u8 data[]; /* Payload buffer */
50} __attribute__((packed));
51#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
52
53enum iucv_state_t {
54 IUCV_DISCONN = 0,
55 IUCV_CONNECTED = 1,
56 IUCV_SEVERED = 2,
57};
58
59enum tty_state_t {
60 TTY_CLOSED = 0,
61 TTY_OPENED = 1,
62};
63
64struct hvc_iucv_private {
65 struct hvc_struct *hvc; /* HVC struct reference */
66 u8 srv_name[8]; /* IUCV service name (ebcdic) */
67 unsigned char is_console; /* Linux console usage flag */
68 enum iucv_state_t iucv_state; /* IUCV connection status */
69 enum tty_state_t tty_state; /* TTY status */
70 struct iucv_path *path; /* IUCV path pointer */
71 spinlock_t lock; /* hvc_iucv_private lock */
72#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
73 void *sndbuf; /* send buffer */
74 size_t sndbuf_len; /* length of send buffer */
75#define QUEUE_SNDBUF_DELAY (HZ / 25)
76 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
77 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
78 struct list_head tty_outqueue; /* outgoing IUCV messages */
79 struct list_head tty_inqueue; /* incoming IUCV messages */
80 struct device *dev; /* device structure */
81 u8 info_path[16]; /* IUCV path info (dev attr) */
82};
83
84struct iucv_tty_buffer {
85 struct list_head list; /* list pointer */
86 struct iucv_message msg; /* store an IUCV message */
87 size_t offset; /* data buffer offset */
88 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
89};
90
91/* IUCV callback handler */
92static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
93static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
94static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
95static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
96
97
98/* Kernel module parameter: use one terminal device as default */
99static unsigned long hvc_iucv_devices = 1;
100
101/* Array of allocated hvc iucv tty lines... */
102static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
103#define IUCV_HVC_CON_IDX (0)
104/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
105#define MAX_VMID_FILTER (500)
106#define FILTER_WILDCARD_CHAR '*'
107static size_t hvc_iucv_filter_size;
108static void *hvc_iucv_filter;
109static const char *hvc_iucv_filter_string;
110static DEFINE_RWLOCK(hvc_iucv_filter_lock);
111
112/* Kmem cache and mempool for iucv_tty_buffer elements */
113static struct kmem_cache *hvc_iucv_buffer_cache;
114static mempool_t *hvc_iucv_mempool;
115
116/* IUCV handler callback functions */
117static struct iucv_handler hvc_iucv_handler = {
118 .path_pending = hvc_iucv_path_pending,
119 .path_severed = hvc_iucv_path_severed,
120 .message_complete = hvc_iucv_msg_complete,
121 .message_pending = hvc_iucv_msg_pending,
122};
123
124
125/**
126 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
127 * @num: The HVC virtual terminal number (vtermno)
128 *
129 * This function returns the struct hvc_iucv_private instance that corresponds
130 * to the HVC virtual terminal number specified as parameter @num.
131 */
132static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
133{
134 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
135 return NULL;
136 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
137}
138
139/**
140 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
141 * @size: Size of the internal buffer used to store data.
142 * @flags: Memory allocation flags passed to mempool.
143 *
144 * This function allocates a new struct iucv_tty_buffer element and, optionally,
145 * allocates an internal data buffer with the specified size @size.
146 * The internal data buffer is always allocated with GFP_DMA which is
147 * required for receiving and sending data with IUCV.
148 * Note: The total message size arises from the internal buffer size and the
149 * members of the iucv_tty_msg structure.
150 * The function returns NULL if memory allocation has failed.
151 */
152static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
153{
154 struct iucv_tty_buffer *bufp;
155
156 bufp = mempool_alloc(hvc_iucv_mempool, flags);
157 if (!bufp)
158 return NULL;
159 memset(bufp, 0, sizeof(*bufp));
160
161 if (size > 0) {
162 bufp->msg.length = MSG_SIZE(size);
163 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
164 if (!bufp->mbuf) {
165 mempool_free(bufp, hvc_iucv_mempool);
166 return NULL;
167 }
168 bufp->mbuf->version = MSG_VERSION;
169 bufp->mbuf->type = MSG_TYPE_DATA;
170 bufp->mbuf->datalen = (u16) size;
171 }
172 return bufp;
173}
174
175/**
176 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
177 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
178 */
179static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
180{
181 kfree(bufp->mbuf);
182 mempool_free(bufp, hvc_iucv_mempool);
183}
184
185/**
186 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
187 * @list: List containing struct iucv_tty_buffer elements.
188 */
189static void destroy_tty_buffer_list(struct list_head *list)
190{
191 struct iucv_tty_buffer *ent, *next;
192
193 list_for_each_entry_safe(ent, next, list, list) {
194 list_del(&ent->list);
195 destroy_tty_buffer(ent);
196 }
197}
198
199/**
200 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
201 * @priv: Pointer to struct hvc_iucv_private
202 * @buf: HVC buffer for writing received terminal data.
203 * @count: HVC buffer size.
204 * @has_more_data: Pointer to an int variable.
205 *
206 * The function picks up pending messages from the input queue and receives
207 * the message data that is then written to the specified buffer @buf.
208 * If the buffer size @count is less than the data message size, the
209 * message is kept on the input queue and @has_more_data is set to 1.
210 * If all message data has been written, the message is removed from
211 * the input queue.
212 *
213 * The function returns the number of bytes written to the terminal, zero if
214 * there are no pending data messages available or if there is no established
215 * IUCV path.
216 * If the IUCV path has been severed, then -EPIPE is returned to cause a
217 * hang up (that is issued by the HVC layer).
218 */
219static int hvc_iucv_write(struct hvc_iucv_private *priv,
220 char *buf, int count, int *has_more_data)
221{
222 struct iucv_tty_buffer *rb;
223 int written;
224 int rc;
225
226 /* immediately return if there is no IUCV connection */
227 if (priv->iucv_state == IUCV_DISCONN)
228 return 0;
229
230 /* if the IUCV path has been severed, return -EPIPE to inform the
231 * HVC layer to hang up the tty device. */
232 if (priv->iucv_state == IUCV_SEVERED)
233 return -EPIPE;
234
235 /* check if there are pending messages */
236 if (list_empty(&priv->tty_inqueue))
237 return 0;
238
239 /* receive an iucv message and flip data to the tty (ldisc) */
240 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
241
242 written = 0;
243 if (!rb->mbuf) { /* message not yet received ... */
244 /* allocate mem to store msg data; if no memory is available
245 * then leave the buffer on the list and re-try later */
246 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
247 if (!rb->mbuf)
248 return -ENOMEM;
249
250 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
251 rb->mbuf, rb->msg.length, NULL);
252 switch (rc) {
253 case 0: /* Successful */
254 break;
255 case 2: /* No message found */
256 case 9: /* Message purged */
257 break;
258 default:
259 written = -EIO;
260 }
261 /* remove buffer if an error has occurred or received data
262 * is not correct */
263 if (rc || (rb->mbuf->version != MSG_VERSION) ||
264 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
265 goto out_remove_buffer;
266 }
267
268 switch (rb->mbuf->type) {
269 case MSG_TYPE_DATA:
270 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
271 memcpy(buf, rb->mbuf->data + rb->offset, written);
272 if (written < (rb->mbuf->datalen - rb->offset)) {
273 rb->offset += written;
274 *has_more_data = 1;
275 goto out_written;
276 }
277 break;
278
279 case MSG_TYPE_WINSIZE:
280 if (rb->mbuf->datalen != sizeof(struct winsize))
281 break;
282 /* The caller must ensure that the hvc is locked, which
283 * is the case when called from hvc_iucv_get_chars() */
284 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
285 break;
286
287 case MSG_TYPE_ERROR: /* ignored ... */
288 case MSG_TYPE_TERMENV: /* ignored ... */
289 case MSG_TYPE_TERMIOS: /* ignored ... */
290 break;
291 }
292
293out_remove_buffer:
294 list_del(&rb->list);
295 destroy_tty_buffer(rb);
296 *has_more_data = !list_empty(&priv->tty_inqueue);
297
298out_written:
299 return written;
300}
301
302/**
303 * hvc_iucv_get_chars() - HVC get_chars operation.
304 * @vtermno: HVC virtual terminal number.
305 * @buf: Pointer to a buffer to store data
306 * @count: Size of buffer available for writing
307 *
308 * The HVC thread calls this method to read characters from the back-end.
309 * If an IUCV communication path has been established, pending IUCV messages
310 * are received and data is copied into buffer @buf up to @count bytes.
311 *
312 * Locking: The routine gets called under an irqsave() spinlock; and
313 * the routine locks the struct hvc_iucv_private->lock to call
314 * helper functions.
315 */
316static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
317{
318 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
319 int written;
320 int has_more_data;
321
322 if (count <= 0)
323 return 0;
324
325 if (!priv)
326 return -ENODEV;
327
328 spin_lock(&priv->lock);
329 has_more_data = 0;
330 written = hvc_iucv_write(priv, buf, count, &has_more_data);
331 spin_unlock(&priv->lock);
332
333 /* if there are still messages on the queue... schedule another run */
334 if (has_more_data)
335 hvc_kick();
336
337 return written;
338}
339
340/**
341 * hvc_iucv_queue() - Buffer terminal data for sending.
342 * @priv: Pointer to struct hvc_iucv_private instance.
343 * @buf: Buffer containing data to send.
344 * @count: Size of buffer and amount of data to send.
345 *
346 * The function queues data for sending. To actually send the buffered data,
347 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
348 * The function returns the number of data bytes that has been buffered.
349 *
350 * If the device is not connected, data is ignored and the function returns
351 * @count.
352 * If the buffer is full, the function returns 0.
353 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
354 * (that can be passed to HVC layer to cause a tty hangup).
355 */
356static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
357 int count)
358{
359 size_t len;
360
361 if (priv->iucv_state == IUCV_DISCONN)
362 return count; /* ignore data */
363
364 if (priv->iucv_state == IUCV_SEVERED)
365 return -EPIPE;
366
367 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
368 if (!len)
369 return 0;
370
371 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
372 priv->sndbuf_len += len;
373
374 if (priv->iucv_state == IUCV_CONNECTED)
375 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
376
377 return len;
378}
379
380/**
381 * hvc_iucv_send() - Send an IUCV message containing terminal data.
382 * @priv: Pointer to struct hvc_iucv_private instance.
383 *
384 * If an IUCV communication path has been established, the buffered output data
385 * is sent via an IUCV message and the number of bytes sent is returned.
386 * Returns 0 if there is no established IUCV communication path or
387 * -EPIPE if an existing IUCV communicaton path has been severed.
388 */
389static int hvc_iucv_send(struct hvc_iucv_private *priv)
390{
391 struct iucv_tty_buffer *sb;
392 int rc, len;
393
394 if (priv->iucv_state == IUCV_SEVERED)
395 return -EPIPE;
396
397 if (priv->iucv_state == IUCV_DISCONN)
398 return -EIO;
399
400 if (!priv->sndbuf_len)
401 return 0;
402
403 /* allocate internal buffer to store msg data and also compute total
404 * message length */
405 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
406 if (!sb)
407 return -ENOMEM;
408
409 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
410 sb->mbuf->datalen = (u16) priv->sndbuf_len;
411 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
412
413 list_add_tail(&sb->list, &priv->tty_outqueue);
414
415 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
416 (void *) sb->mbuf, sb->msg.length);
417 if (rc) {
418 /* drop the message here; however we might want to handle
419 * 0x03 (msg limit reached) by trying again... */
420 list_del(&sb->list);
421 destroy_tty_buffer(sb);
422 }
423 len = priv->sndbuf_len;
424 priv->sndbuf_len = 0;
425
426 return len;
427}
428
429/**
430 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
431 * @work: Work structure.
432 *
433 * This work queue function sends buffered output data over IUCV and,
434 * if not all buffered data could be sent, reschedules itself.
435 */
436static void hvc_iucv_sndbuf_work(struct work_struct *work)
437{
438 struct hvc_iucv_private *priv;
439
440 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
441 if (!priv)
442 return;
443
444 spin_lock_bh(&priv->lock);
445 hvc_iucv_send(priv);
446 spin_unlock_bh(&priv->lock);
447}
448
449/**
450 * hvc_iucv_put_chars() - HVC put_chars operation.
451 * @vtermno: HVC virtual terminal number.
452 * @buf: Pointer to an buffer to read data from
453 * @count: Size of buffer available for reading
454 *
455 * The HVC thread calls this method to write characters to the back-end.
456 * The function calls hvc_iucv_queue() to queue terminal data for sending.
457 *
458 * Locking: The method gets called under an irqsave() spinlock; and
459 * locks struct hvc_iucv_private->lock.
460 */
461static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
462{
463 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
464 int queued;
465
466 if (count <= 0)
467 return 0;
468
469 if (!priv)
470 return -ENODEV;
471
472 spin_lock(&priv->lock);
473 queued = hvc_iucv_queue(priv, buf, count);
474 spin_unlock(&priv->lock);
475
476 return queued;
477}
478
479/**
480 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
481 * @hp: Pointer to the HVC device (struct hvc_struct)
482 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
483 * hvc_iucv_private instance.
484 *
485 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
486 * instance that is derived from @id. Always returns 0.
487 *
488 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
489 */
490static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
491{
492 struct hvc_iucv_private *priv;
493
494 priv = hvc_iucv_get_private(id);
495 if (!priv)
496 return 0;
497
498 spin_lock_bh(&priv->lock);
499 priv->tty_state = TTY_OPENED;
500 spin_unlock_bh(&priv->lock);
501
502 return 0;
503}
504
505/**
506 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
507 * @priv: Pointer to the struct hvc_iucv_private instance.
508 */
509static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
510{
511 destroy_tty_buffer_list(&priv->tty_outqueue);
512 destroy_tty_buffer_list(&priv->tty_inqueue);
513
514 priv->tty_state = TTY_CLOSED;
515 priv->iucv_state = IUCV_DISCONN;
516
517 priv->sndbuf_len = 0;
518}
519
520/**
521 * tty_outqueue_empty() - Test if the tty outq is empty
522 * @priv: Pointer to struct hvc_iucv_private instance.
523 */
524static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
525{
526 int rc;
527
528 spin_lock_bh(&priv->lock);
529 rc = list_empty(&priv->tty_outqueue);
530 spin_unlock_bh(&priv->lock);
531
532 return rc;
533}
534
535/**
536 * flush_sndbuf_sync() - Flush send buffer and wait for completion
537 * @priv: Pointer to struct hvc_iucv_private instance.
538 *
539 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
540 * to flush any buffered terminal output data and waits for completion.
541 */
542static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
543{
544 int sync_wait;
545
546 cancel_delayed_work_sync(&priv->sndbuf_work);
547
548 spin_lock_bh(&priv->lock);
549 hvc_iucv_send(priv); /* force sending buffered data */
550 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
551 spin_unlock_bh(&priv->lock);
552
553 if (sync_wait)
554 wait_event_timeout(priv->sndbuf_waitq,
555 tty_outqueue_empty(priv), HZ/10);
556}
557
558/**
559 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
560 * @priv: Pointer to hvc_iucv_private structure
561 *
562 * This routine severs an existing IUCV communication path and hangs
563 * up the underlying HVC terminal device.
564 * The hang-up occurs only if an IUCV communication path is established;
565 * otherwise there is no need to hang up the terminal device.
566 *
567 * The IUCV HVC hang-up is separated into two steps:
568 * 1. After the IUCV path has been severed, the iucv_state is set to
569 * IUCV_SEVERED.
570 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
571 * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
572 *
573 * If the tty has not yet been opened, clean up the hvc_iucv_private
574 * structure to allow re-connects.
575 * If the tty has been opened, let get_chars() return -EPIPE to signal
576 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
577 * to call get_chars()...
578 *
579 * Special notes on hanging up a HVC terminal instantiated as console:
580 * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
581 * 2. do_tty_hangup() calls tty->ops->close() for console_filp
582 * => no hangup notifier is called by HVC (default)
583 * 2. hvc_close() returns because of tty_hung_up_p(filp)
584 * => no delete notifier is called!
585 * Finally, the back-end is not being notified, thus, the tty session is
586 * kept active (TTY_OPEN) to be ready for re-connects.
587 *
588 * Locking: spin_lock(&priv->lock) w/o disabling bh
589 */
590static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
591{
592 struct iucv_path *path;
593
594 path = NULL;
595 spin_lock(&priv->lock);
596 if (priv->iucv_state == IUCV_CONNECTED) {
597 path = priv->path;
598 priv->path = NULL;
599 priv->iucv_state = IUCV_SEVERED;
600 if (priv->tty_state == TTY_CLOSED)
601 hvc_iucv_cleanup(priv);
602 else
603 /* console is special (see above) */
604 if (priv->is_console) {
605 hvc_iucv_cleanup(priv);
606 priv->tty_state = TTY_OPENED;
607 } else
608 hvc_kick();
609 }
610 spin_unlock(&priv->lock);
611
612 /* finally sever path (outside of priv->lock due to lock ordering) */
613 if (path) {
614 iucv_path_sever(path, NULL);
615 iucv_path_free(path);
616 }
617}
618
619/**
620 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
621 * @hp: Pointer to the HVC device (struct hvc_struct)
622 * @id: Additional data (originally passed to hvc_alloc):
623 * the index of an struct hvc_iucv_private instance.
624 *
625 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
626 * virtual or otherwise) has occurred.
627 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
628 * to keep an existing IUCV communication path established.
629 * (Background: vhangup() is called from user space (by getty or login) to
630 * disable writing to the tty by other applications).
631 * If the tty has been opened and an established IUCV path has been severed
632 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
633 *
634 * Locking: struct hvc_iucv_private->lock
635 */
636static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
637{
638 struct hvc_iucv_private *priv;
639
640 priv = hvc_iucv_get_private(id);
641 if (!priv)
642 return;
643
644 flush_sndbuf_sync(priv);
645
646 spin_lock_bh(&priv->lock);
647 /* NOTE: If the hangup was scheduled by ourself (from the iucv
648 * path_servered callback [IUCV_SEVERED]), we have to clean up
649 * our structure and to set state to TTY_CLOSED.
650 * If the tty was hung up otherwise (e.g. vhangup()), then we
651 * ignore this hangup and keep an established IUCV path open...
652 * (...the reason is that we are not able to connect back to the
653 * client if we disconnect on hang up) */
654 priv->tty_state = TTY_CLOSED;
655
656 if (priv->iucv_state == IUCV_SEVERED)
657 hvc_iucv_cleanup(priv);
658 spin_unlock_bh(&priv->lock);
659}
660
661/**
662 * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
663 * @hp: Pointer the HVC device (struct hvc_struct)
664 * @raise: Non-zero to raise or zero to lower DTR/RTS lines
665 *
666 * This routine notifies the HVC back-end to raise or lower DTR/RTS
667 * lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
668 * drop the IUCV connection (similar to hang up the modem).
669 */
670static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
671{
672 struct hvc_iucv_private *priv;
673 struct iucv_path *path;
674
675 /* Raising the DTR/RTS is ignored as IUCV connections can be
676 * established at any times.
677 */
678 if (raise)
679 return;
680
681 priv = hvc_iucv_get_private(hp->vtermno);
682 if (!priv)
683 return;
684
685 /* Lowering the DTR/RTS lines disconnects an established IUCV
686 * connection.
687 */
688 flush_sndbuf_sync(priv);
689
690 spin_lock_bh(&priv->lock);
691 path = priv->path; /* save reference to IUCV path */
692 priv->path = NULL;
693 priv->iucv_state = IUCV_DISCONN;
694 spin_unlock_bh(&priv->lock);
695
696 /* Sever IUCV path outside of priv->lock due to lock ordering of:
697 * priv->lock <--> iucv_table_lock */
698 if (path) {
699 iucv_path_sever(path, NULL);
700 iucv_path_free(path);
701 }
702}
703
704/**
705 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
706 * @hp: Pointer to the HVC device (struct hvc_struct)
707 * @id: Additional data (originally passed to hvc_alloc):
708 * the index of an struct hvc_iucv_private instance.
709 *
710 * This routine notifies the HVC back-end that the last tty device fd has been
711 * closed. The function cleans up tty resources. The clean-up of the IUCV
712 * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
713 * control setting.
714 *
715 * Locking: struct hvc_iucv_private->lock
716 */
717static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
718{
719 struct hvc_iucv_private *priv;
720
721 priv = hvc_iucv_get_private(id);
722 if (!priv)
723 return;
724
725 flush_sndbuf_sync(priv);
726
727 spin_lock_bh(&priv->lock);
728 destroy_tty_buffer_list(&priv->tty_outqueue);
729 destroy_tty_buffer_list(&priv->tty_inqueue);
730 priv->tty_state = TTY_CLOSED;
731 priv->sndbuf_len = 0;
732 spin_unlock_bh(&priv->lock);
733}
734
735/**
736 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
737 * @ipvmid: Originating z/VM user ID (right padded with blanks)
738 *
739 * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
740 * connect, otherwise non-zero.
741 */
742static int hvc_iucv_filter_connreq(u8 ipvmid[8])
743{
744 const char *wildcard, *filter_entry;
745 size_t i, len;
746
747 /* Note: default policy is ACCEPT if no filter is set */
748 if (!hvc_iucv_filter_size)
749 return 0;
750
751 for (i = 0; i < hvc_iucv_filter_size; i++) {
752 filter_entry = hvc_iucv_filter + (8 * i);
753
754 /* If a filter entry contains the filter wildcard character,
755 * reduce the length to match the leading portion of the user
756 * ID only (wildcard match). Characters following the wildcard
757 * are ignored.
758 */
759 wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
760 len = (wildcard) ? wildcard - filter_entry : 8;
761 if (0 == memcmp(ipvmid, filter_entry, len))
762 return 0;
763 }
764 return 1;
765}
766
767/**
768 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
769 * @path: Pending path (struct iucv_path)
770 * @ipvmid: z/VM system identifier of originator
771 * @ipuser: User specified data for this path
772 * (AF_IUCV: port/service name and originator port)
773 *
774 * The function uses the @ipuser data to determine if the pending path belongs
775 * to a terminal managed by this device driver.
776 * If the path belongs to this driver, ensure that the terminal is not accessed
777 * multiple times (only one connection to a terminal is allowed).
778 * If the terminal is not yet connected, the pending path is accepted and is
779 * associated to the appropriate struct hvc_iucv_private instance.
780 *
781 * Returns 0 if @path belongs to a terminal managed by the this device driver;
782 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
783 *
784 * Locking: struct hvc_iucv_private->lock
785 */
786static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
787 u8 *ipuser)
788{
789 struct hvc_iucv_private *priv, *tmp;
790 u8 wildcard[9] = "lnxhvc ";
791 int i, rc, find_unused;
792 u8 nuser_data[16];
793 u8 vm_user_id[9];
794
795 ASCEBC(wildcard, sizeof(wildcard));
796 find_unused = !memcmp(wildcard, ipuser, 8);
797
798 /* First, check if the pending path request is managed by this
799 * IUCV handler:
800 * - find a disconnected device if ipuser contains the wildcard
801 * - find the device that matches the terminal ID in ipuser
802 */
803 priv = NULL;
804 for (i = 0; i < hvc_iucv_devices; i++) {
805 tmp = hvc_iucv_table[i];
806 if (!tmp)
807 continue;
808
809 if (find_unused) {
810 spin_lock(&tmp->lock);
811 if (tmp->iucv_state == IUCV_DISCONN)
812 priv = tmp;
813 spin_unlock(&tmp->lock);
814
815 } else if (!memcmp(tmp->srv_name, ipuser, 8))
816 priv = tmp;
817 if (priv)
818 break;
819 }
820 if (!priv)
821 return -ENODEV;
822
823 /* Enforce that ipvmid is allowed to connect to us */
824 read_lock(&hvc_iucv_filter_lock);
825 rc = hvc_iucv_filter_connreq(ipvmid);
826 read_unlock(&hvc_iucv_filter_lock);
827 if (rc) {
828 iucv_path_sever(path, ipuser);
829 iucv_path_free(path);
830 memcpy(vm_user_id, ipvmid, 8);
831 vm_user_id[8] = 0;
832 pr_info("A connection request from z/VM user ID %s "
833 "was refused\n", vm_user_id);
834 return 0;
835 }
836
837 spin_lock(&priv->lock);
838
839 /* If the terminal is already connected or being severed, then sever
840 * this path to enforce that there is only ONE established communication
841 * path per terminal. */
842 if (priv->iucv_state != IUCV_DISCONN) {
843 iucv_path_sever(path, ipuser);
844 iucv_path_free(path);
845 goto out_path_handled;
846 }
847
848 /* accept path */
849 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
850 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
851 path->msglim = 0xffff; /* IUCV MSGLIMIT */
852 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
853 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
854 if (rc) {
855 iucv_path_sever(path, ipuser);
856 iucv_path_free(path);
857 goto out_path_handled;
858 }
859 priv->path = path;
860 priv->iucv_state = IUCV_CONNECTED;
861
862 /* store path information */
863 memcpy(priv->info_path, ipvmid, 8);
864 memcpy(priv->info_path + 8, ipuser + 8, 8);
865
866 /* flush buffered output data... */
867 schedule_delayed_work(&priv->sndbuf_work, 5);
868
869out_path_handled:
870 spin_unlock(&priv->lock);
871 return 0;
872}
873
874/**
875 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
876 * @path: Pending path (struct iucv_path)
877 * @ipuser: User specified data for this path
878 * (AF_IUCV: port/service name and originator port)
879 *
880 * This function calls the hvc_iucv_hangup() function for the
881 * respective IUCV HVC terminal.
882 *
883 * Locking: struct hvc_iucv_private->lock
884 */
885static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
886{
887 struct hvc_iucv_private *priv = path->private;
888
889 hvc_iucv_hangup(priv);
890}
891
892/**
893 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
894 * @path: Pending path (struct iucv_path)
895 * @msg: Pointer to the IUCV message
896 *
897 * The function puts an incoming message on the input queue for later
898 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
899 * If the tty has not yet been opened, the message is rejected.
900 *
901 * Locking: struct hvc_iucv_private->lock
902 */
903static void hvc_iucv_msg_pending(struct iucv_path *path,
904 struct iucv_message *msg)
905{
906 struct hvc_iucv_private *priv = path->private;
907 struct iucv_tty_buffer *rb;
908
909 /* reject messages that exceed max size of iucv_tty_msg->datalen */
910 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
911 iucv_message_reject(path, msg);
912 return;
913 }
914
915 spin_lock(&priv->lock);
916
917 /* reject messages if tty has not yet been opened */
918 if (priv->tty_state == TTY_CLOSED) {
919 iucv_message_reject(path, msg);
920 goto unlock_return;
921 }
922
923 /* allocate tty buffer to save iucv msg only */
924 rb = alloc_tty_buffer(0, GFP_ATOMIC);
925 if (!rb) {
926 iucv_message_reject(path, msg);
927 goto unlock_return; /* -ENOMEM */
928 }
929 rb->msg = *msg;
930
931 list_add_tail(&rb->list, &priv->tty_inqueue);
932
933 hvc_kick(); /* wake up hvc thread */
934
935unlock_return:
936 spin_unlock(&priv->lock);
937}
938
939/**
940 * hvc_iucv_msg_complete() - IUCV handler to process message completion
941 * @path: Pending path (struct iucv_path)
942 * @msg: Pointer to the IUCV message
943 *
944 * The function is called upon completion of message delivery to remove the
945 * message from the outqueue. Additional delivery information can be found
946 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
947 * purged messages (0x010000 (IPADPGNR)).
948 *
949 * Locking: struct hvc_iucv_private->lock
950 */
951static void hvc_iucv_msg_complete(struct iucv_path *path,
952 struct iucv_message *msg)
953{
954 struct hvc_iucv_private *priv = path->private;
955 struct iucv_tty_buffer *ent, *next;
956 LIST_HEAD(list_remove);
957
958 spin_lock(&priv->lock);
959 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
960 if (ent->msg.id == msg->id) {
961 list_move(&ent->list, &list_remove);
962 break;
963 }
964 wake_up(&priv->sndbuf_waitq);
965 spin_unlock(&priv->lock);
966 destroy_tty_buffer_list(&list_remove);
967}
968
969/**
970 * hvc_iucv_pm_freeze() - Freeze PM callback
971 * @dev: IUVC HVC terminal device
972 *
973 * Sever an established IUCV communication path and
974 * trigger a hang-up of the underlying HVC terminal.
975 */
976static int hvc_iucv_pm_freeze(struct device *dev)
977{
978 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
979
980 local_bh_disable();
981 hvc_iucv_hangup(priv);
982 local_bh_enable();
983
984 return 0;
985}
986
987/**
988 * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
989 * @dev: IUVC HVC terminal device
990 *
991 * Wake up the HVC thread to trigger hang-up and respective
992 * HVC back-end notifier invocations.
993 */
994static int hvc_iucv_pm_restore_thaw(struct device *dev)
995{
996 hvc_kick();
997 return 0;
998}
999
1000static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
1001 struct device_attribute *attr,
1002 char *buf)
1003{
1004 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1005 size_t len;
1006
1007 len = sizeof(priv->srv_name);
1008 memcpy(buf, priv->srv_name, len);
1009 EBCASC(buf, len);
1010 buf[len++] = '\n';
1011 return len;
1012}
1013
1014static ssize_t hvc_iucv_dev_state_show(struct device *dev,
1015 struct device_attribute *attr,
1016 char *buf)
1017{
1018 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1019 return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
1020}
1021
1022static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
1023 struct device_attribute *attr,
1024 char *buf)
1025{
1026 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1027 char vmid[9], ipuser[9];
1028
1029 memset(vmid, 0, sizeof(vmid));
1030 memset(ipuser, 0, sizeof(ipuser));
1031
1032 spin_lock_bh(&priv->lock);
1033 if (priv->iucv_state == IUCV_CONNECTED) {
1034 memcpy(vmid, priv->info_path, 8);
1035 memcpy(ipuser, priv->info_path + 8, 8);
1036 }
1037 spin_unlock_bh(&priv->lock);
1038 EBCASC(ipuser, 8);
1039
1040 return sprintf(buf, "%s:%s\n", vmid, ipuser);
1041}
1042
1043
1044/* HVC operations */
1045static const struct hv_ops hvc_iucv_ops = {
1046 .get_chars = hvc_iucv_get_chars,
1047 .put_chars = hvc_iucv_put_chars,
1048 .notifier_add = hvc_iucv_notifier_add,
1049 .notifier_del = hvc_iucv_notifier_del,
1050 .notifier_hangup = hvc_iucv_notifier_hangup,
1051 .dtr_rts = hvc_iucv_dtr_rts,
1052};
1053
1054/* Suspend / resume device operations */
1055static const struct dev_pm_ops hvc_iucv_pm_ops = {
1056 .freeze = hvc_iucv_pm_freeze,
1057 .thaw = hvc_iucv_pm_restore_thaw,
1058 .restore = hvc_iucv_pm_restore_thaw,
1059};
1060
1061/* IUCV HVC device driver */
1062static struct device_driver hvc_iucv_driver = {
1063 .name = KMSG_COMPONENT,
1064 .bus = &iucv_bus,
1065 .pm = &hvc_iucv_pm_ops,
1066};
1067
1068/* IUCV HVC device attributes */
1069static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1070static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1071static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1072static struct attribute *hvc_iucv_dev_attrs[] = {
1073 &dev_attr_termid.attr,
1074 &dev_attr_state.attr,
1075 &dev_attr_peer.attr,
1076 NULL,
1077};
1078static struct attribute_group hvc_iucv_dev_attr_group = {
1079 .attrs = hvc_iucv_dev_attrs,
1080};
1081static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1082 &hvc_iucv_dev_attr_group,
1083 NULL,
1084};
1085
1086
1087/**
1088 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
1089 * @id: hvc_iucv_table index
1090 * @is_console: Flag if the instance is used as Linux console
1091 *
1092 * This function allocates a new hvc_iucv_private structure and stores
1093 * the instance in hvc_iucv_table at index @id.
1094 * Returns 0 on success; otherwise non-zero.
1095 */
1096static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1097{
1098 struct hvc_iucv_private *priv;
1099 char name[9];
1100 int rc;
1101
1102 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1103 if (!priv)
1104 return -ENOMEM;
1105
1106 spin_lock_init(&priv->lock);
1107 INIT_LIST_HEAD(&priv->tty_outqueue);
1108 INIT_LIST_HEAD(&priv->tty_inqueue);
1109 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1110 init_waitqueue_head(&priv->sndbuf_waitq);
1111
1112 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1113 if (!priv->sndbuf) {
1114 kfree(priv);
1115 return -ENOMEM;
1116 }
1117
1118 /* set console flag */
1119 priv->is_console = is_console;
1120
1121 /* allocate hvc device */
1122 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
1123 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1124 if (IS_ERR(priv->hvc)) {
1125 rc = PTR_ERR(priv->hvc);
1126 goto out_error_hvc;
1127 }
1128
1129 /* notify HVC thread instead of using polling */
1130 priv->hvc->irq_requested = 1;
1131
1132 /* setup iucv related information */
1133 snprintf(name, 9, "lnxhvc%-2d", id);
1134 memcpy(priv->srv_name, name, 8);
1135 ASCEBC(priv->srv_name, 8);
1136
1137 /* create and setup device */
1138 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1139 if (!priv->dev) {
1140 rc = -ENOMEM;
1141 goto out_error_dev;
1142 }
1143 dev_set_name(priv->dev, "hvc_iucv%d", id);
1144 dev_set_drvdata(priv->dev, priv);
1145 priv->dev->bus = &iucv_bus;
1146 priv->dev->parent = iucv_root;
1147 priv->dev->driver = &hvc_iucv_driver;
1148 priv->dev->groups = hvc_iucv_dev_attr_groups;
1149 priv->dev->release = (void (*)(struct device *)) kfree;
1150 rc = device_register(priv->dev);
1151 if (rc) {
1152 put_device(priv->dev);
1153 goto out_error_dev;
1154 }
1155
1156 hvc_iucv_table[id] = priv;
1157 return 0;
1158
1159out_error_dev:
1160 hvc_remove(priv->hvc);
1161out_error_hvc:
1162 free_page((unsigned long) priv->sndbuf);
1163 kfree(priv);
1164
1165 return rc;
1166}
1167
1168/**
1169 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1170 */
1171static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1172{
1173 hvc_remove(priv->hvc);
1174 device_unregister(priv->dev);
1175 free_page((unsigned long) priv->sndbuf);
1176 kfree(priv);
1177}
1178
1179/**
1180 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1181 * @filter: String containing a comma-separated list of z/VM user IDs
1182 * @dest: Location where to store the parsed z/VM user ID
1183 */
1184static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1185{
1186 const char *nextdelim, *residual;
1187 size_t len;
1188
1189 nextdelim = strchr(filter, ',');
1190 if (nextdelim) {
1191 len = nextdelim - filter;
1192 residual = nextdelim + 1;
1193 } else {
1194 len = strlen(filter);
1195 residual = filter + len;
1196 }
1197
1198 if (len == 0)
1199 return ERR_PTR(-EINVAL);
1200
1201 /* check for '\n' (if called from sysfs) */
1202 if (filter[len - 1] == '\n')
1203 len--;
1204
1205 /* prohibit filter entries containing the wildcard character only */
1206 if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1207 return ERR_PTR(-EINVAL);
1208
1209 if (len > 8)
1210 return ERR_PTR(-EINVAL);
1211
1212 /* pad with blanks and save upper case version of user ID */
1213 memset(dest, ' ', 8);
1214 while (len--)
1215 dest[len] = toupper(filter[len]);
1216 return residual;
1217}
1218
1219/**
1220 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1221 * @filter: String consisting of a comma-separated list of z/VM user IDs
1222 *
1223 * The function parses the @filter string and creates an array containing
1224 * the list of z/VM user ID filter entries.
1225 * Return code 0 means success, -EINVAL if the filter is syntactically
1226 * incorrect, -ENOMEM if there was not enough memory to allocate the
1227 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1228 */
1229static int hvc_iucv_setup_filter(const char *val)
1230{
1231 const char *residual;
1232 int err;
1233 size_t size, count;
1234 void *array, *old_filter;
1235
1236 count = strlen(val);
1237 if (count == 0 || (count == 1 && val[0] == '\n')) {
1238 size = 0;
1239 array = NULL;
1240 goto out_replace_filter; /* clear filter */
1241 }
1242
1243 /* count user IDs in order to allocate sufficient memory */
1244 size = 1;
1245 residual = val;
1246 while ((residual = strchr(residual, ',')) != NULL) {
1247 residual++;
1248 size++;
1249 }
1250
1251 /* check if the specified list exceeds the filter limit */
1252 if (size > MAX_VMID_FILTER)
1253 return -ENOSPC;
1254
1255 array = kzalloc(size * 8, GFP_KERNEL);
1256 if (!array)
1257 return -ENOMEM;
1258
1259 count = size;
1260 residual = val;
1261 while (*residual && count) {
1262 residual = hvc_iucv_parse_filter(residual,
1263 array + ((size - count) * 8));
1264 if (IS_ERR(residual)) {
1265 err = PTR_ERR(residual);
1266 kfree(array);
1267 goto out_err;
1268 }
1269 count--;
1270 }
1271
1272out_replace_filter:
1273 write_lock_bh(&hvc_iucv_filter_lock);
1274 old_filter = hvc_iucv_filter;
1275 hvc_iucv_filter_size = size;
1276 hvc_iucv_filter = array;
1277 write_unlock_bh(&hvc_iucv_filter_lock);
1278 kfree(old_filter);
1279
1280 err = 0;
1281out_err:
1282 return err;
1283}
1284
1285/**
1286 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1287 * @val: String consisting of a comma-separated list of z/VM user IDs
1288 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1289 *
1290 * The function sets up the z/VM user ID filter specified as comma-separated
1291 * list of user IDs in @val.
1292 * Note: If it is called early in the boot process, @val is stored and
1293 * parsed later in hvc_iucv_init().
1294 */
1295static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1296{
1297 int rc;
1298
1299 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1300 return -ENODEV;
1301
1302 if (!val)
1303 return -EINVAL;
1304
1305 rc = 0;
1306 if (slab_is_available())
1307 rc = hvc_iucv_setup_filter(val);
1308 else
1309 hvc_iucv_filter_string = val; /* defer... */
1310 return rc;
1311}
1312
1313/**
1314 * param_get_vmidfilter() - Get z/VM user ID filter
1315 * @buffer: Buffer to store z/VM user ID filter,
1316 * (buffer size assumption PAGE_SIZE)
1317 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1318 *
1319 * The function stores the filter as a comma-separated list of z/VM user IDs
1320 * in @buffer. Typically, sysfs routines call this function for attr show.
1321 */
1322static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1323{
1324 int rc;
1325 size_t index, len;
1326 void *start, *end;
1327
1328 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1329 return -ENODEV;
1330
1331 rc = 0;
1332 read_lock_bh(&hvc_iucv_filter_lock);
1333 for (index = 0; index < hvc_iucv_filter_size; index++) {
1334 start = hvc_iucv_filter + (8 * index);
1335 end = memchr(start, ' ', 8);
1336 len = (end) ? end - start : 8;
1337 memcpy(buffer + rc, start, len);
1338 rc += len;
1339 buffer[rc++] = ',';
1340 }
1341 read_unlock_bh(&hvc_iucv_filter_lock);
1342 if (rc)
1343 buffer[--rc] = '\0'; /* replace last comma and update rc */
1344 return rc;
1345}
1346
1347#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1348
1349static const struct kernel_param_ops param_ops_vmidfilter = {
1350 .set = param_set_vmidfilter,
1351 .get = param_get_vmidfilter,
1352};
1353
1354/**
1355 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1356 */
1357static int __init hvc_iucv_init(void)
1358{
1359 int rc;
1360 unsigned int i;
1361
1362 if (!hvc_iucv_devices)
1363 return -ENODEV;
1364
1365 if (!MACHINE_IS_VM) {
1366 pr_notice("The z/VM IUCV HVC device driver cannot "
1367 "be used without z/VM\n");
1368 rc = -ENODEV;
1369 goto out_error;
1370 }
1371
1372 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1373 pr_err("%lu is not a valid value for the hvc_iucv= "
1374 "kernel parameter\n", hvc_iucv_devices);
1375 rc = -EINVAL;
1376 goto out_error;
1377 }
1378
1379 /* register IUCV HVC device driver */
1380 rc = driver_register(&hvc_iucv_driver);
1381 if (rc)
1382 goto out_error;
1383
1384 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1385 if (hvc_iucv_filter_string) {
1386 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1387 switch (rc) {
1388 case 0:
1389 break;
1390 case -ENOMEM:
1391 pr_err("Allocating memory failed with "
1392 "reason code=%d\n", 3);
1393 goto out_error;
1394 case -EINVAL:
1395 pr_err("hvc_iucv_allow= does not specify a valid "
1396 "z/VM user ID list\n");
1397 goto out_error;
1398 case -ENOSPC:
1399 pr_err("hvc_iucv_allow= specifies too many "
1400 "z/VM user IDs\n");
1401 goto out_error;
1402 default:
1403 goto out_error;
1404 }
1405 }
1406
1407 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1408 sizeof(struct iucv_tty_buffer),
1409 0, 0, NULL);
1410 if (!hvc_iucv_buffer_cache) {
1411 pr_err("Allocating memory failed with reason code=%d\n", 1);
1412 rc = -ENOMEM;
1413 goto out_error;
1414 }
1415
1416 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1417 hvc_iucv_buffer_cache);
1418 if (!hvc_iucv_mempool) {
1419 pr_err("Allocating memory failed with reason code=%d\n", 2);
1420 kmem_cache_destroy(hvc_iucv_buffer_cache);
1421 rc = -ENOMEM;
1422 goto out_error;
1423 }
1424
1425 /* register the first terminal device as console
1426 * (must be done before allocating hvc terminal devices) */
1427 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1428 if (rc) {
1429 pr_err("Registering HVC terminal device as "
1430 "Linux console failed\n");
1431 goto out_error_memory;
1432 }
1433
1434 /* allocate hvc_iucv_private structs */
1435 for (i = 0; i < hvc_iucv_devices; i++) {
1436 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1437 if (rc) {
1438 pr_err("Creating a new HVC terminal device "
1439 "failed with error code=%d\n", rc);
1440 goto out_error_hvc;
1441 }
1442 }
1443
1444 /* register IUCV callback handler */
1445 rc = iucv_register(&hvc_iucv_handler, 0);
1446 if (rc) {
1447 pr_err("Registering IUCV handlers failed with error code=%d\n",
1448 rc);
1449 goto out_error_hvc;
1450 }
1451
1452 return 0;
1453
1454out_error_hvc:
1455 for (i = 0; i < hvc_iucv_devices; i++)
1456 if (hvc_iucv_table[i])
1457 hvc_iucv_destroy(hvc_iucv_table[i]);
1458out_error_memory:
1459 mempool_destroy(hvc_iucv_mempool);
1460 kmem_cache_destroy(hvc_iucv_buffer_cache);
1461out_error:
1462 kfree(hvc_iucv_filter);
1463 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1464 return rc;
1465}
1466
1467/**
1468 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
1469 * @val: Parameter value (numeric)
1470 */
1471static int __init hvc_iucv_config(char *val)
1472{
1473 return kstrtoul(val, 10, &hvc_iucv_devices);
1474}
1475
1476
1477device_initcall(hvc_iucv_init);
1478__setup("hvc_iucv=", hvc_iucv_config);
1479core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);