Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Driver for s390 chsc subchannels
  3 *
  4 * Copyright IBM Corp. 2008, 2009
  5 *
  6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  7 *
  8 */
  9
 10#include <linux/slab.h>
 
 11#include <linux/device.h>
 12#include <linux/module.h>
 13#include <linux/uaccess.h>
 14#include <linux/miscdevice.h>
 
 15
 16#include <asm/compat.h>
 17#include <asm/cio.h>
 18#include <asm/chsc.h>
 19#include <asm/isc.h>
 20
 21#include "cio.h"
 22#include "cio_debug.h"
 23#include "css.h"
 24#include "chsc_sch.h"
 25#include "ioasm.h"
 26
 27static debug_info_t *chsc_debug_msg_id;
 28static debug_info_t *chsc_debug_log_id;
 29
 
 
 
 
 30#define CHSC_MSG(imp, args...) do {					\
 31		debug_sprintf_event(chsc_debug_msg_id, imp , ##args);	\
 32	} while (0)
 33
 34#define CHSC_LOG(imp, txt) do {					\
 35		debug_text_event(chsc_debug_log_id, imp , txt);	\
 36	} while (0)
 37
 38static void CHSC_LOG_HEX(int level, void *data, int length)
 39{
 40	while (length > 0) {
 41		debug_event(chsc_debug_log_id, level, data, length);
 42		length -= chsc_debug_log_id->buf_size;
 43		data += chsc_debug_log_id->buf_size;
 44	}
 45}
 46
 47MODULE_AUTHOR("IBM Corporation");
 48MODULE_DESCRIPTION("driver for s390 chsc subchannels");
 49MODULE_LICENSE("GPL");
 50
 51static void chsc_subchannel_irq(struct subchannel *sch)
 52{
 53	struct chsc_private *private = dev_get_drvdata(&sch->dev);
 54	struct chsc_request *request = private->request;
 55	struct irb *irb = (struct irb *)&S390_lowcore.irb;
 56
 57	CHSC_LOG(4, "irb");
 58	CHSC_LOG_HEX(4, irb, sizeof(*irb));
 
 
 59	/* Copy irb to provided request and set done. */
 60	if (!request) {
 61		CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
 62			 sch->schid.ssid, sch->schid.sch_no);
 63		return;
 64	}
 65	private->request = NULL;
 66	memcpy(&request->irb, irb, sizeof(*irb));
 67	cio_update_schib(sch);
 68	complete(&request->completion);
 69	put_device(&sch->dev);
 70}
 71
 72static int chsc_subchannel_probe(struct subchannel *sch)
 73{
 74	struct chsc_private *private;
 75	int ret;
 76
 77	CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
 78		 sch->schid.ssid, sch->schid.sch_no);
 79	sch->isc = CHSC_SCH_ISC;
 80	private = kzalloc(sizeof(*private), GFP_KERNEL);
 81	if (!private)
 82		return -ENOMEM;
 83	dev_set_drvdata(&sch->dev, private);
 84	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
 85	if (ret) {
 86		CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
 87			 sch->schid.ssid, sch->schid.sch_no, ret);
 88		dev_set_drvdata(&sch->dev, NULL);
 89		kfree(private);
 90	} else {
 91		if (dev_get_uevent_suppress(&sch->dev)) {
 92			dev_set_uevent_suppress(&sch->dev, 0);
 93			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 94		}
 95	}
 96	return ret;
 97}
 98
 99static int chsc_subchannel_remove(struct subchannel *sch)
100{
101	struct chsc_private *private;
102
103	cio_disable_subchannel(sch);
104	private = dev_get_drvdata(&sch->dev);
105	dev_set_drvdata(&sch->dev, NULL);
106	if (private->request) {
107		complete(&private->request->completion);
108		put_device(&sch->dev);
109	}
110	kfree(private);
111	return 0;
112}
113
114static void chsc_subchannel_shutdown(struct subchannel *sch)
115{
116	cio_disable_subchannel(sch);
117}
118
119static int chsc_subchannel_prepare(struct subchannel *sch)
120{
121	int cc;
122	struct schib schib;
123	/*
124	 * Don't allow suspend while the subchannel is not idle
125	 * since we don't have a way to clear the subchannel and
126	 * cannot disable it with a request running.
127	 */
128	cc = stsch_err(sch->schid, &schib);
129	if (!cc && scsw_stctl(&schib.scsw))
130		return -EAGAIN;
131	return 0;
132}
133
134static int chsc_subchannel_freeze(struct subchannel *sch)
135{
136	return cio_disable_subchannel(sch);
137}
138
139static int chsc_subchannel_restore(struct subchannel *sch)
140{
141	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
142}
143
144static struct css_device_id chsc_subchannel_ids[] = {
145	{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
146	{ /* end of list */ },
147};
148MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
149
150static struct css_driver chsc_subchannel_driver = {
151	.drv = {
152		.owner = THIS_MODULE,
153		.name = "chsc_subchannel",
154	},
155	.subchannel_type = chsc_subchannel_ids,
156	.irq = chsc_subchannel_irq,
157	.probe = chsc_subchannel_probe,
158	.remove = chsc_subchannel_remove,
159	.shutdown = chsc_subchannel_shutdown,
160	.prepare = chsc_subchannel_prepare,
161	.freeze = chsc_subchannel_freeze,
162	.thaw = chsc_subchannel_restore,
163	.restore = chsc_subchannel_restore,
164};
165
166static int __init chsc_init_dbfs(void)
167{
168	chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
169					   16 * sizeof(long));
170	if (!chsc_debug_msg_id)
171		goto out;
172	debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
173	debug_set_level(chsc_debug_msg_id, 2);
174	chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
175	if (!chsc_debug_log_id)
176		goto out;
177	debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
178	debug_set_level(chsc_debug_log_id, 2);
179	return 0;
180out:
181	if (chsc_debug_msg_id)
182		debug_unregister(chsc_debug_msg_id);
183	return -ENOMEM;
184}
185
186static void chsc_remove_dbfs(void)
187{
188	debug_unregister(chsc_debug_log_id);
189	debug_unregister(chsc_debug_msg_id);
190}
191
192static int __init chsc_init_sch_driver(void)
193{
194	return css_driver_register(&chsc_subchannel_driver);
195}
196
197static void chsc_cleanup_sch_driver(void)
198{
199	css_driver_unregister(&chsc_subchannel_driver);
200}
201
202static DEFINE_SPINLOCK(chsc_lock);
203
204static int chsc_subchannel_match_next_free(struct device *dev, void *data)
205{
206	struct subchannel *sch = to_subchannel(dev);
207
208	return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
209}
210
211static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
212{
213	struct device *dev;
214
215	dev = driver_find_device(&chsc_subchannel_driver.drv,
216				 sch ? &sch->dev : NULL, NULL,
217				 chsc_subchannel_match_next_free);
218	return dev ? to_subchannel(dev) : NULL;
219}
220
221/**
222 * chsc_async() - try to start a chsc request asynchronously
223 * @chsc_area: request to be started
224 * @request: request structure to associate
225 *
226 * Tries to start a chsc request on one of the existing chsc subchannels.
227 * Returns:
228 *  %0 if the request was performed synchronously
229 *  %-EINPROGRESS if the request was successfully started
230 *  %-EBUSY if all chsc subchannels are busy
231 *  %-ENODEV if no chsc subchannels are available
232 * Context:
233 *  interrupts disabled, chsc_lock held
234 */
235static int chsc_async(struct chsc_async_area *chsc_area,
236		      struct chsc_request *request)
237{
238	int cc;
239	struct chsc_private *private;
240	struct subchannel *sch = NULL;
241	int ret = -ENODEV;
242	char dbf[10];
243
244	chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
245	while ((sch = chsc_get_next_subchannel(sch))) {
246		spin_lock(sch->lock);
247		private = dev_get_drvdata(&sch->dev);
248		if (private->request) {
249			spin_unlock(sch->lock);
250			ret = -EBUSY;
251			continue;
252		}
253		chsc_area->header.sid = sch->schid;
254		CHSC_LOG(2, "schid");
255		CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
256		cc = chsc(chsc_area);
257		sprintf(dbf, "cc:%d", cc);
258		CHSC_LOG(2, dbf);
259		switch (cc) {
260		case 0:
261			ret = 0;
262			break;
263		case 1:
264			sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
265			ret = -EINPROGRESS;
266			private->request = request;
267			break;
268		case 2:
269			ret = -EBUSY;
270			break;
271		default:
272			ret = -ENODEV;
273		}
274		spin_unlock(sch->lock);
275		CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
276			 sch->schid.ssid, sch->schid.sch_no, cc);
277		if (ret == -EINPROGRESS)
278			return -EINPROGRESS;
279		put_device(&sch->dev);
280		if (ret == 0)
281			return 0;
282	}
283	return ret;
284}
285
286static void chsc_log_command(struct chsc_async_area *chsc_area)
287{
288	char dbf[10];
289
290	sprintf(dbf, "CHSC:%x", chsc_area->header.code);
291	CHSC_LOG(0, dbf);
292	CHSC_LOG_HEX(0, chsc_area, 32);
293}
294
295static int chsc_examine_irb(struct chsc_request *request)
296{
297	int backed_up;
298
299	if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
300		return -EIO;
301	backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
302	request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
303	if (scsw_cstat(&request->irb.scsw) == 0)
304		return 0;
305	if (!backed_up)
306		return 0;
307	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
308		return -EIO;
309	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
310		return -EPERM;
311	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
312		return -EAGAIN;
313	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
314		return -EAGAIN;
315	return -EIO;
316}
317
318static int chsc_ioctl_start(void __user *user_area)
319{
320	struct chsc_request *request;
321	struct chsc_async_area *chsc_area;
322	int ret;
323	char dbf[10];
324
325	if (!css_general_characteristics.dynio)
326		/* It makes no sense to try. */
327		return -EOPNOTSUPP;
328	chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
329	if (!chsc_area)
330		return -ENOMEM;
331	request = kzalloc(sizeof(*request), GFP_KERNEL);
332	if (!request) {
333		ret = -ENOMEM;
334		goto out_free;
335	}
336	init_completion(&request->completion);
337	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
338		ret = -EFAULT;
339		goto out_free;
340	}
341	chsc_log_command(chsc_area);
342	spin_lock_irq(&chsc_lock);
343	ret = chsc_async(chsc_area, request);
344	spin_unlock_irq(&chsc_lock);
345	if (ret == -EINPROGRESS) {
346		wait_for_completion(&request->completion);
347		ret = chsc_examine_irb(request);
348	}
349	/* copy area back to user */
350	if (!ret)
351		if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
352			ret = -EFAULT;
353out_free:
354	sprintf(dbf, "ret:%d", ret);
355	CHSC_LOG(0, dbf);
356	kfree(request);
357	free_page((unsigned long)chsc_area);
358	return ret;
359}
360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361static int chsc_ioctl_info_channel_path(void __user *user_cd)
362{
363	struct chsc_chp_cd *cd;
364	int ret, ccode;
365	struct {
366		struct chsc_header request;
367		u32 : 2;
368		u32 m : 1;
369		u32 : 1;
370		u32 fmt1 : 4;
371		u32 cssid : 8;
372		u32 : 8;
373		u32 first_chpid : 8;
374		u32 : 24;
375		u32 last_chpid : 8;
376		u32 : 32;
377		struct chsc_header response;
378		u8 data[PAGE_SIZE - 20];
379	} __attribute__ ((packed)) *scpcd_area;
380
381	scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
382	if (!scpcd_area)
383		return -ENOMEM;
384	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
385	if (!cd) {
386		ret = -ENOMEM;
387		goto out_free;
388	}
389	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
390		ret = -EFAULT;
391		goto out_free;
392	}
393	scpcd_area->request.length = 0x0010;
394	scpcd_area->request.code = 0x0028;
395	scpcd_area->m = cd->m;
396	scpcd_area->fmt1 = cd->fmt;
397	scpcd_area->cssid = cd->chpid.cssid;
398	scpcd_area->first_chpid = cd->chpid.id;
399	scpcd_area->last_chpid = cd->chpid.id;
400
401	ccode = chsc(scpcd_area);
402	if (ccode != 0) {
403		ret = -EIO;
404		goto out_free;
405	}
406	if (scpcd_area->response.code != 0x0001) {
407		ret = -EIO;
408		CHSC_MSG(0, "scpcd: response code=%x\n",
409			 scpcd_area->response.code);
410		goto out_free;
411	}
412	memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
413	if (copy_to_user(user_cd, cd, sizeof(*cd)))
414		ret = -EFAULT;
415	else
416		ret = 0;
417out_free:
418	kfree(cd);
419	free_page((unsigned long)scpcd_area);
420	return ret;
421}
422
423static int chsc_ioctl_info_cu(void __user *user_cd)
424{
425	struct chsc_cu_cd *cd;
426	int ret, ccode;
427	struct {
428		struct chsc_header request;
429		u32 : 2;
430		u32 m : 1;
431		u32 : 1;
432		u32 fmt1 : 4;
433		u32 cssid : 8;
434		u32 : 8;
435		u32 first_cun : 8;
436		u32 : 24;
437		u32 last_cun : 8;
438		u32 : 32;
439		struct chsc_header response;
440		u8 data[PAGE_SIZE - 20];
441	} __attribute__ ((packed)) *scucd_area;
442
443	scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
444	if (!scucd_area)
445		return -ENOMEM;
446	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
447	if (!cd) {
448		ret = -ENOMEM;
449		goto out_free;
450	}
451	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
452		ret = -EFAULT;
453		goto out_free;
454	}
455	scucd_area->request.length = 0x0010;
456	scucd_area->request.code = 0x0028;
457	scucd_area->m = cd->m;
458	scucd_area->fmt1 = cd->fmt;
459	scucd_area->cssid = cd->cssid;
460	scucd_area->first_cun = cd->cun;
461	scucd_area->last_cun = cd->cun;
462
463	ccode = chsc(scucd_area);
464	if (ccode != 0) {
465		ret = -EIO;
466		goto out_free;
467	}
468	if (scucd_area->response.code != 0x0001) {
469		ret = -EIO;
470		CHSC_MSG(0, "scucd: response code=%x\n",
471			 scucd_area->response.code);
472		goto out_free;
473	}
474	memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
475	if (copy_to_user(user_cd, cd, sizeof(*cd)))
476		ret = -EFAULT;
477	else
478		ret = 0;
479out_free:
480	kfree(cd);
481	free_page((unsigned long)scucd_area);
482	return ret;
483}
484
485static int chsc_ioctl_info_sch_cu(void __user *user_cud)
486{
487	struct chsc_sch_cud *cud;
488	int ret, ccode;
489	struct {
490		struct chsc_header request;
491		u32 : 2;
492		u32 m : 1;
493		u32 : 5;
494		u32 fmt1 : 4;
495		u32 : 2;
496		u32 ssid : 2;
497		u32 first_sch : 16;
498		u32 : 8;
499		u32 cssid : 8;
500		u32 last_sch : 16;
501		u32 : 32;
502		struct chsc_header response;
503		u8 data[PAGE_SIZE - 20];
504	} __attribute__ ((packed)) *sscud_area;
505
506	sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
507	if (!sscud_area)
508		return -ENOMEM;
509	cud = kzalloc(sizeof(*cud), GFP_KERNEL);
510	if (!cud) {
511		ret = -ENOMEM;
512		goto out_free;
513	}
514	if (copy_from_user(cud, user_cud, sizeof(*cud))) {
515		ret = -EFAULT;
516		goto out_free;
517	}
518	sscud_area->request.length = 0x0010;
519	sscud_area->request.code = 0x0006;
520	sscud_area->m = cud->schid.m;
521	sscud_area->fmt1 = cud->fmt;
522	sscud_area->ssid = cud->schid.ssid;
523	sscud_area->first_sch = cud->schid.sch_no;
524	sscud_area->cssid = cud->schid.cssid;
525	sscud_area->last_sch = cud->schid.sch_no;
526
527	ccode = chsc(sscud_area);
528	if (ccode != 0) {
529		ret = -EIO;
530		goto out_free;
531	}
532	if (sscud_area->response.code != 0x0001) {
533		ret = -EIO;
534		CHSC_MSG(0, "sscud: response code=%x\n",
535			 sscud_area->response.code);
536		goto out_free;
537	}
538	memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
539	if (copy_to_user(user_cud, cud, sizeof(*cud)))
540		ret = -EFAULT;
541	else
542		ret = 0;
543out_free:
544	kfree(cud);
545	free_page((unsigned long)sscud_area);
546	return ret;
547}
548
549static int chsc_ioctl_conf_info(void __user *user_ci)
550{
551	struct chsc_conf_info *ci;
552	int ret, ccode;
553	struct {
554		struct chsc_header request;
555		u32 : 2;
556		u32 m : 1;
557		u32 : 1;
558		u32 fmt1 : 4;
559		u32 cssid : 8;
560		u32 : 6;
561		u32 ssid : 2;
562		u32 : 8;
563		u64 : 64;
564		struct chsc_header response;
565		u8 data[PAGE_SIZE - 20];
566	} __attribute__ ((packed)) *sci_area;
567
568	sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
569	if (!sci_area)
570		return -ENOMEM;
571	ci = kzalloc(sizeof(*ci), GFP_KERNEL);
572	if (!ci) {
573		ret = -ENOMEM;
574		goto out_free;
575	}
576	if (copy_from_user(ci, user_ci, sizeof(*ci))) {
577		ret = -EFAULT;
578		goto out_free;
579	}
580	sci_area->request.length = 0x0010;
581	sci_area->request.code = 0x0012;
582	sci_area->m = ci->id.m;
583	sci_area->fmt1 = ci->fmt;
584	sci_area->cssid = ci->id.cssid;
585	sci_area->ssid = ci->id.ssid;
586
587	ccode = chsc(sci_area);
588	if (ccode != 0) {
589		ret = -EIO;
590		goto out_free;
591	}
592	if (sci_area->response.code != 0x0001) {
593		ret = -EIO;
594		CHSC_MSG(0, "sci: response code=%x\n",
595			 sci_area->response.code);
596		goto out_free;
597	}
598	memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
599	if (copy_to_user(user_ci, ci, sizeof(*ci)))
600		ret = -EFAULT;
601	else
602		ret = 0;
603out_free:
604	kfree(ci);
605	free_page((unsigned long)sci_area);
606	return ret;
607}
608
609static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
610{
611	struct chsc_comp_list *ccl;
612	int ret, ccode;
613	struct {
614		struct chsc_header request;
615		u32 ctype : 8;
616		u32 : 4;
617		u32 fmt : 4;
618		u32 : 16;
619		u64 : 64;
620		u32 list_parm[2];
621		u64 : 64;
622		struct chsc_header response;
623		u8 data[PAGE_SIZE - 36];
624	} __attribute__ ((packed)) *sccl_area;
625	struct {
626		u32 m : 1;
627		u32 : 31;
628		u32 cssid : 8;
629		u32 : 16;
630		u32 chpid : 8;
631	} __attribute__ ((packed)) *chpid_parm;
632	struct {
633		u32 f_cssid : 8;
634		u32 l_cssid : 8;
635		u32 : 16;
636		u32 res;
637	} __attribute__ ((packed)) *cssids_parm;
638
639	sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
640	if (!sccl_area)
641		return -ENOMEM;
642	ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
643	if (!ccl) {
644		ret = -ENOMEM;
645		goto out_free;
646	}
647	if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
648		ret = -EFAULT;
649		goto out_free;
650	}
651	sccl_area->request.length = 0x0020;
652	sccl_area->request.code = 0x0030;
653	sccl_area->fmt = ccl->req.fmt;
654	sccl_area->ctype = ccl->req.ctype;
655	switch (sccl_area->ctype) {
656	case CCL_CU_ON_CHP:
657	case CCL_IOP_CHP:
658		chpid_parm = (void *)&sccl_area->list_parm;
659		chpid_parm->m = ccl->req.chpid.m;
660		chpid_parm->cssid = ccl->req.chpid.chp.cssid;
661		chpid_parm->chpid = ccl->req.chpid.chp.id;
662		break;
663	case CCL_CSS_IMG:
664	case CCL_CSS_IMG_CONF_CHAR:
665		cssids_parm = (void *)&sccl_area->list_parm;
666		cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
667		cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
668		break;
669	}
670	ccode = chsc(sccl_area);
671	if (ccode != 0) {
672		ret = -EIO;
673		goto out_free;
674	}
675	if (sccl_area->response.code != 0x0001) {
676		ret = -EIO;
677		CHSC_MSG(0, "sccl: response code=%x\n",
678			 sccl_area->response.code);
679		goto out_free;
680	}
681	memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
682	if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
683		ret = -EFAULT;
684	else
685		ret = 0;
686out_free:
687	kfree(ccl);
688	free_page((unsigned long)sccl_area);
689	return ret;
690}
691
692static int chsc_ioctl_chpd(void __user *user_chpd)
693{
694	struct chsc_scpd *scpd_area;
695	struct chsc_cpd_info *chpd;
696	int ret;
697
698	chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
699	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
700	if (!scpd_area || !chpd) {
701		ret = -ENOMEM;
702		goto out_free;
703	}
704	if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
705		ret = -EFAULT;
706		goto out_free;
707	}
708	ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
709					       chpd->rfmt, chpd->c, chpd->m,
710					       scpd_area);
711	if (ret)
712		goto out_free;
713	memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
714	if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
715		ret = -EFAULT;
716out_free:
717	kfree(chpd);
718	free_page((unsigned long)scpd_area);
719	return ret;
720}
721
722static int chsc_ioctl_dcal(void __user *user_dcal)
723{
724	struct chsc_dcal *dcal;
725	int ret, ccode;
726	struct {
727		struct chsc_header request;
728		u32 atype : 8;
729		u32 : 4;
730		u32 fmt : 4;
731		u32 : 16;
732		u32 res0[2];
733		u32 list_parm[2];
734		u32 res1[2];
735		struct chsc_header response;
736		u8 data[PAGE_SIZE - 36];
737	} __attribute__ ((packed)) *sdcal_area;
738
739	sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
740	if (!sdcal_area)
741		return -ENOMEM;
742	dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
743	if (!dcal) {
744		ret = -ENOMEM;
745		goto out_free;
746	}
747	if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
748		ret = -EFAULT;
749		goto out_free;
750	}
751	sdcal_area->request.length = 0x0020;
752	sdcal_area->request.code = 0x0034;
753	sdcal_area->atype = dcal->req.atype;
754	sdcal_area->fmt = dcal->req.fmt;
755	memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
756	       sizeof(sdcal_area->list_parm));
757
758	ccode = chsc(sdcal_area);
759	if (ccode != 0) {
760		ret = -EIO;
761		goto out_free;
762	}
763	if (sdcal_area->response.code != 0x0001) {
764		ret = -EIO;
765		CHSC_MSG(0, "sdcal: response code=%x\n",
766			 sdcal_area->response.code);
767		goto out_free;
768	}
769	memcpy(&dcal->sdcal, &sdcal_area->response,
770	       sdcal_area->response.length);
771	if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
772		ret = -EFAULT;
773	else
774		ret = 0;
775out_free:
776	kfree(dcal);
777	free_page((unsigned long)sdcal_area);
778	return ret;
779}
780
781static long chsc_ioctl(struct file *filp, unsigned int cmd,
782		       unsigned long arg)
783{
784	void __user *argp;
785
786	CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
787	if (is_compat_task())
788		argp = compat_ptr(arg);
789	else
790		argp = (void __user *)arg;
791	switch (cmd) {
792	case CHSC_START:
793		return chsc_ioctl_start(argp);
 
 
794	case CHSC_INFO_CHANNEL_PATH:
795		return chsc_ioctl_info_channel_path(argp);
796	case CHSC_INFO_CU:
797		return chsc_ioctl_info_cu(argp);
798	case CHSC_INFO_SCH_CU:
799		return chsc_ioctl_info_sch_cu(argp);
800	case CHSC_INFO_CI:
801		return chsc_ioctl_conf_info(argp);
802	case CHSC_INFO_CCL:
803		return chsc_ioctl_conf_comp_list(argp);
804	case CHSC_INFO_CPD:
805		return chsc_ioctl_chpd(argp);
806	case CHSC_INFO_DCAL:
807		return chsc_ioctl_dcal(argp);
 
 
 
 
808	default: /* unknown ioctl number */
809		return -ENOIOCTLCMD;
810	}
811}
812
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
813static const struct file_operations chsc_fops = {
814	.owner = THIS_MODULE,
815	.open = nonseekable_open,
 
816	.unlocked_ioctl = chsc_ioctl,
817	.compat_ioctl = chsc_ioctl,
818	.llseek = no_llseek,
819};
820
821static struct miscdevice chsc_misc_device = {
822	.minor = MISC_DYNAMIC_MINOR,
823	.name = "chsc",
824	.fops = &chsc_fops,
825};
826
827static int __init chsc_misc_init(void)
828{
829	return misc_register(&chsc_misc_device);
830}
831
832static void chsc_misc_cleanup(void)
833{
834	misc_deregister(&chsc_misc_device);
835}
836
837static int __init chsc_sch_init(void)
838{
839	int ret;
840
841	ret = chsc_init_dbfs();
842	if (ret)
843		return ret;
844	isc_register(CHSC_SCH_ISC);
845	ret = chsc_init_sch_driver();
846	if (ret)
847		goto out_dbf;
848	ret = chsc_misc_init();
849	if (ret)
850		goto out_driver;
851	return ret;
852out_driver:
853	chsc_cleanup_sch_driver();
854out_dbf:
855	isc_unregister(CHSC_SCH_ISC);
856	chsc_remove_dbfs();
857	return ret;
858}
859
860static void __exit chsc_sch_exit(void)
861{
862	chsc_misc_cleanup();
863	chsc_cleanup_sch_driver();
864	isc_unregister(CHSC_SCH_ISC);
865	chsc_remove_dbfs();
866}
867
868module_init(chsc_sch_init);
869module_exit(chsc_sch_exit);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for s390 chsc subchannels
   4 *
   5 * Copyright IBM Corp. 2008, 2011
   6 *
   7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
   8 *
   9 */
  10
  11#include <linux/slab.h>
  12#include <linux/compat.h>
  13#include <linux/device.h>
  14#include <linux/module.h>
  15#include <linux/uaccess.h>
  16#include <linux/miscdevice.h>
  17#include <linux/kernel_stat.h>
  18
 
  19#include <asm/cio.h>
  20#include <asm/chsc.h>
  21#include <asm/isc.h>
  22
  23#include "cio.h"
  24#include "cio_debug.h"
  25#include "css.h"
  26#include "chsc_sch.h"
  27#include "ioasm.h"
  28
  29static debug_info_t *chsc_debug_msg_id;
  30static debug_info_t *chsc_debug_log_id;
  31
  32static struct chsc_request *on_close_request;
  33static struct chsc_async_area *on_close_chsc_area;
  34static DEFINE_MUTEX(on_close_mutex);
  35
  36#define CHSC_MSG(imp, args...) do {					\
  37		debug_sprintf_event(chsc_debug_msg_id, imp , ##args);	\
  38	} while (0)
  39
  40#define CHSC_LOG(imp, txt) do {					\
  41		debug_text_event(chsc_debug_log_id, imp , txt);	\
  42	} while (0)
  43
  44static void CHSC_LOG_HEX(int level, void *data, int length)
  45{
  46	debug_event(chsc_debug_log_id, level, data, length);
 
 
 
 
  47}
  48
  49MODULE_AUTHOR("IBM Corporation");
  50MODULE_DESCRIPTION("driver for s390 chsc subchannels");
  51MODULE_LICENSE("GPL");
  52
  53static void chsc_subchannel_irq(struct subchannel *sch)
  54{
  55	struct chsc_private *private = dev_get_drvdata(&sch->dev);
  56	struct chsc_request *request = private->request;
  57	struct irb *irb = this_cpu_ptr(&cio_irb);
  58
  59	CHSC_LOG(4, "irb");
  60	CHSC_LOG_HEX(4, irb, sizeof(*irb));
  61	inc_irq_stat(IRQIO_CSC);
  62
  63	/* Copy irb to provided request and set done. */
  64	if (!request) {
  65		CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
  66			 sch->schid.ssid, sch->schid.sch_no);
  67		return;
  68	}
  69	private->request = NULL;
  70	memcpy(&request->irb, irb, sizeof(*irb));
  71	cio_update_schib(sch);
  72	complete(&request->completion);
  73	put_device(&sch->dev);
  74}
  75
  76static int chsc_subchannel_probe(struct subchannel *sch)
  77{
  78	struct chsc_private *private;
  79	int ret;
  80
  81	CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
  82		 sch->schid.ssid, sch->schid.sch_no);
  83	sch->isc = CHSC_SCH_ISC;
  84	private = kzalloc(sizeof(*private), GFP_KERNEL);
  85	if (!private)
  86		return -ENOMEM;
  87	dev_set_drvdata(&sch->dev, private);
  88	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
  89	if (ret) {
  90		CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
  91			 sch->schid.ssid, sch->schid.sch_no, ret);
  92		dev_set_drvdata(&sch->dev, NULL);
  93		kfree(private);
  94	} else {
  95		if (dev_get_uevent_suppress(&sch->dev)) {
  96			dev_set_uevent_suppress(&sch->dev, 0);
  97			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
  98		}
  99	}
 100	return ret;
 101}
 102
 103static int chsc_subchannel_remove(struct subchannel *sch)
 104{
 105	struct chsc_private *private;
 106
 107	cio_disable_subchannel(sch);
 108	private = dev_get_drvdata(&sch->dev);
 109	dev_set_drvdata(&sch->dev, NULL);
 110	if (private->request) {
 111		complete(&private->request->completion);
 112		put_device(&sch->dev);
 113	}
 114	kfree(private);
 115	return 0;
 116}
 117
 118static void chsc_subchannel_shutdown(struct subchannel *sch)
 119{
 120	cio_disable_subchannel(sch);
 121}
 122
 123static int chsc_subchannel_prepare(struct subchannel *sch)
 124{
 125	int cc;
 126	struct schib schib;
 127	/*
 128	 * Don't allow suspend while the subchannel is not idle
 129	 * since we don't have a way to clear the subchannel and
 130	 * cannot disable it with a request running.
 131	 */
 132	cc = stsch(sch->schid, &schib);
 133	if (!cc && scsw_stctl(&schib.scsw))
 134		return -EAGAIN;
 135	return 0;
 136}
 137
 138static int chsc_subchannel_freeze(struct subchannel *sch)
 139{
 140	return cio_disable_subchannel(sch);
 141}
 142
 143static int chsc_subchannel_restore(struct subchannel *sch)
 144{
 145	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
 146}
 147
 148static struct css_device_id chsc_subchannel_ids[] = {
 149	{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
 150	{ /* end of list */ },
 151};
 152MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
 153
 154static struct css_driver chsc_subchannel_driver = {
 155	.drv = {
 156		.owner = THIS_MODULE,
 157		.name = "chsc_subchannel",
 158	},
 159	.subchannel_type = chsc_subchannel_ids,
 160	.irq = chsc_subchannel_irq,
 161	.probe = chsc_subchannel_probe,
 162	.remove = chsc_subchannel_remove,
 163	.shutdown = chsc_subchannel_shutdown,
 164	.prepare = chsc_subchannel_prepare,
 165	.freeze = chsc_subchannel_freeze,
 166	.thaw = chsc_subchannel_restore,
 167	.restore = chsc_subchannel_restore,
 168};
 169
 170static int __init chsc_init_dbfs(void)
 171{
 172	chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
 
 173	if (!chsc_debug_msg_id)
 174		goto out;
 175	debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
 176	debug_set_level(chsc_debug_msg_id, 2);
 177	chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
 178	if (!chsc_debug_log_id)
 179		goto out;
 180	debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
 181	debug_set_level(chsc_debug_log_id, 2);
 182	return 0;
 183out:
 184	debug_unregister(chsc_debug_msg_id);
 
 185	return -ENOMEM;
 186}
 187
 188static void chsc_remove_dbfs(void)
 189{
 190	debug_unregister(chsc_debug_log_id);
 191	debug_unregister(chsc_debug_msg_id);
 192}
 193
 194static int __init chsc_init_sch_driver(void)
 195{
 196	return css_driver_register(&chsc_subchannel_driver);
 197}
 198
 199static void chsc_cleanup_sch_driver(void)
 200{
 201	css_driver_unregister(&chsc_subchannel_driver);
 202}
 203
 204static DEFINE_SPINLOCK(chsc_lock);
 205
 206static int chsc_subchannel_match_next_free(struct device *dev, const void *data)
 207{
 208	struct subchannel *sch = to_subchannel(dev);
 209
 210	return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
 211}
 212
 213static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
 214{
 215	struct device *dev;
 216
 217	dev = driver_find_device(&chsc_subchannel_driver.drv,
 218				 sch ? &sch->dev : NULL, NULL,
 219				 chsc_subchannel_match_next_free);
 220	return dev ? to_subchannel(dev) : NULL;
 221}
 222
 223/**
 224 * chsc_async() - try to start a chsc request asynchronously
 225 * @chsc_area: request to be started
 226 * @request: request structure to associate
 227 *
 228 * Tries to start a chsc request on one of the existing chsc subchannels.
 229 * Returns:
 230 *  %0 if the request was performed synchronously
 231 *  %-EINPROGRESS if the request was successfully started
 232 *  %-EBUSY if all chsc subchannels are busy
 233 *  %-ENODEV if no chsc subchannels are available
 234 * Context:
 235 *  interrupts disabled, chsc_lock held
 236 */
 237static int chsc_async(struct chsc_async_area *chsc_area,
 238		      struct chsc_request *request)
 239{
 240	int cc;
 241	struct chsc_private *private;
 242	struct subchannel *sch = NULL;
 243	int ret = -ENODEV;
 244	char dbf[10];
 245
 246	chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
 247	while ((sch = chsc_get_next_subchannel(sch))) {
 248		spin_lock(sch->lock);
 249		private = dev_get_drvdata(&sch->dev);
 250		if (private->request) {
 251			spin_unlock(sch->lock);
 252			ret = -EBUSY;
 253			continue;
 254		}
 255		chsc_area->header.sid = sch->schid;
 256		CHSC_LOG(2, "schid");
 257		CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
 258		cc = chsc(chsc_area);
 259		snprintf(dbf, sizeof(dbf), "cc:%d", cc);
 260		CHSC_LOG(2, dbf);
 261		switch (cc) {
 262		case 0:
 263			ret = 0;
 264			break;
 265		case 1:
 266			sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
 267			ret = -EINPROGRESS;
 268			private->request = request;
 269			break;
 270		case 2:
 271			ret = -EBUSY;
 272			break;
 273		default:
 274			ret = -ENODEV;
 275		}
 276		spin_unlock(sch->lock);
 277		CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
 278			 sch->schid.ssid, sch->schid.sch_no, cc);
 279		if (ret == -EINPROGRESS)
 280			return -EINPROGRESS;
 281		put_device(&sch->dev);
 282		if (ret == 0)
 283			return 0;
 284	}
 285	return ret;
 286}
 287
 288static void chsc_log_command(void *chsc_area)
 289{
 290	char dbf[10];
 291
 292	snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
 293	CHSC_LOG(0, dbf);
 294	CHSC_LOG_HEX(0, chsc_area, 32);
 295}
 296
 297static int chsc_examine_irb(struct chsc_request *request)
 298{
 299	int backed_up;
 300
 301	if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
 302		return -EIO;
 303	backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
 304	request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
 305	if (scsw_cstat(&request->irb.scsw) == 0)
 306		return 0;
 307	if (!backed_up)
 308		return 0;
 309	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
 310		return -EIO;
 311	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
 312		return -EPERM;
 313	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
 314		return -EAGAIN;
 315	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
 316		return -EAGAIN;
 317	return -EIO;
 318}
 319
 320static int chsc_ioctl_start(void __user *user_area)
 321{
 322	struct chsc_request *request;
 323	struct chsc_async_area *chsc_area;
 324	int ret;
 325	char dbf[10];
 326
 327	if (!css_general_characteristics.dynio)
 328		/* It makes no sense to try. */
 329		return -EOPNOTSUPP;
 330	chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
 331	if (!chsc_area)
 332		return -ENOMEM;
 333	request = kzalloc(sizeof(*request), GFP_KERNEL);
 334	if (!request) {
 335		ret = -ENOMEM;
 336		goto out_free;
 337	}
 338	init_completion(&request->completion);
 339	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
 340		ret = -EFAULT;
 341		goto out_free;
 342	}
 343	chsc_log_command(chsc_area);
 344	spin_lock_irq(&chsc_lock);
 345	ret = chsc_async(chsc_area, request);
 346	spin_unlock_irq(&chsc_lock);
 347	if (ret == -EINPROGRESS) {
 348		wait_for_completion(&request->completion);
 349		ret = chsc_examine_irb(request);
 350	}
 351	/* copy area back to user */
 352	if (!ret)
 353		if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
 354			ret = -EFAULT;
 355out_free:
 356	snprintf(dbf, sizeof(dbf), "ret:%d", ret);
 357	CHSC_LOG(0, dbf);
 358	kfree(request);
 359	free_page((unsigned long)chsc_area);
 360	return ret;
 361}
 362
 363static int chsc_ioctl_on_close_set(void __user *user_area)
 364{
 365	char dbf[13];
 366	int ret;
 367
 368	mutex_lock(&on_close_mutex);
 369	if (on_close_chsc_area) {
 370		ret = -EBUSY;
 371		goto out_unlock;
 372	}
 373	on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
 374	if (!on_close_request) {
 375		ret = -ENOMEM;
 376		goto out_unlock;
 377	}
 378	on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
 379	if (!on_close_chsc_area) {
 380		ret = -ENOMEM;
 381		goto out_free_request;
 382	}
 383	if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
 384		ret = -EFAULT;
 385		goto out_free_chsc;
 386	}
 387	ret = 0;
 388	goto out_unlock;
 389
 390out_free_chsc:
 391	free_page((unsigned long)on_close_chsc_area);
 392	on_close_chsc_area = NULL;
 393out_free_request:
 394	kfree(on_close_request);
 395	on_close_request = NULL;
 396out_unlock:
 397	mutex_unlock(&on_close_mutex);
 398	snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
 399	CHSC_LOG(0, dbf);
 400	return ret;
 401}
 402
 403static int chsc_ioctl_on_close_remove(void)
 404{
 405	char dbf[13];
 406	int ret;
 407
 408	mutex_lock(&on_close_mutex);
 409	if (!on_close_chsc_area) {
 410		ret = -ENOENT;
 411		goto out_unlock;
 412	}
 413	free_page((unsigned long)on_close_chsc_area);
 414	on_close_chsc_area = NULL;
 415	kfree(on_close_request);
 416	on_close_request = NULL;
 417	ret = 0;
 418out_unlock:
 419	mutex_unlock(&on_close_mutex);
 420	snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
 421	CHSC_LOG(0, dbf);
 422	return ret;
 423}
 424
 425static int chsc_ioctl_start_sync(void __user *user_area)
 426{
 427	struct chsc_sync_area *chsc_area;
 428	int ret, ccode;
 429
 430	chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 431	if (!chsc_area)
 432		return -ENOMEM;
 433	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
 434		ret = -EFAULT;
 435		goto out_free;
 436	}
 437	if (chsc_area->header.code & 0x4000) {
 438		ret = -EINVAL;
 439		goto out_free;
 440	}
 441	chsc_log_command(chsc_area);
 442	ccode = chsc(chsc_area);
 443	if (ccode != 0) {
 444		ret = -EIO;
 445		goto out_free;
 446	}
 447	if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
 448		ret = -EFAULT;
 449	else
 450		ret = 0;
 451out_free:
 452	free_page((unsigned long)chsc_area);
 453	return ret;
 454}
 455
 456static int chsc_ioctl_info_channel_path(void __user *user_cd)
 457{
 458	struct chsc_chp_cd *cd;
 459	int ret, ccode;
 460	struct {
 461		struct chsc_header request;
 462		u32 : 2;
 463		u32 m : 1;
 464		u32 : 1;
 465		u32 fmt1 : 4;
 466		u32 cssid : 8;
 467		u32 : 8;
 468		u32 first_chpid : 8;
 469		u32 : 24;
 470		u32 last_chpid : 8;
 471		u32 : 32;
 472		struct chsc_header response;
 473		u8 data[PAGE_SIZE - 20];
 474	} __attribute__ ((packed)) *scpcd_area;
 475
 476	scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 477	if (!scpcd_area)
 478		return -ENOMEM;
 479	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
 480	if (!cd) {
 481		ret = -ENOMEM;
 482		goto out_free;
 483	}
 484	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
 485		ret = -EFAULT;
 486		goto out_free;
 487	}
 488	scpcd_area->request.length = 0x0010;
 489	scpcd_area->request.code = 0x0028;
 490	scpcd_area->m = cd->m;
 491	scpcd_area->fmt1 = cd->fmt;
 492	scpcd_area->cssid = cd->chpid.cssid;
 493	scpcd_area->first_chpid = cd->chpid.id;
 494	scpcd_area->last_chpid = cd->chpid.id;
 495
 496	ccode = chsc(scpcd_area);
 497	if (ccode != 0) {
 498		ret = -EIO;
 499		goto out_free;
 500	}
 501	if (scpcd_area->response.code != 0x0001) {
 502		ret = -EIO;
 503		CHSC_MSG(0, "scpcd: response code=%x\n",
 504			 scpcd_area->response.code);
 505		goto out_free;
 506	}
 507	memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
 508	if (copy_to_user(user_cd, cd, sizeof(*cd)))
 509		ret = -EFAULT;
 510	else
 511		ret = 0;
 512out_free:
 513	kfree(cd);
 514	free_page((unsigned long)scpcd_area);
 515	return ret;
 516}
 517
 518static int chsc_ioctl_info_cu(void __user *user_cd)
 519{
 520	struct chsc_cu_cd *cd;
 521	int ret, ccode;
 522	struct {
 523		struct chsc_header request;
 524		u32 : 2;
 525		u32 m : 1;
 526		u32 : 1;
 527		u32 fmt1 : 4;
 528		u32 cssid : 8;
 529		u32 : 8;
 530		u32 first_cun : 8;
 531		u32 : 24;
 532		u32 last_cun : 8;
 533		u32 : 32;
 534		struct chsc_header response;
 535		u8 data[PAGE_SIZE - 20];
 536	} __attribute__ ((packed)) *scucd_area;
 537
 538	scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 539	if (!scucd_area)
 540		return -ENOMEM;
 541	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
 542	if (!cd) {
 543		ret = -ENOMEM;
 544		goto out_free;
 545	}
 546	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
 547		ret = -EFAULT;
 548		goto out_free;
 549	}
 550	scucd_area->request.length = 0x0010;
 551	scucd_area->request.code = 0x0026;
 552	scucd_area->m = cd->m;
 553	scucd_area->fmt1 = cd->fmt;
 554	scucd_area->cssid = cd->cssid;
 555	scucd_area->first_cun = cd->cun;
 556	scucd_area->last_cun = cd->cun;
 557
 558	ccode = chsc(scucd_area);
 559	if (ccode != 0) {
 560		ret = -EIO;
 561		goto out_free;
 562	}
 563	if (scucd_area->response.code != 0x0001) {
 564		ret = -EIO;
 565		CHSC_MSG(0, "scucd: response code=%x\n",
 566			 scucd_area->response.code);
 567		goto out_free;
 568	}
 569	memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
 570	if (copy_to_user(user_cd, cd, sizeof(*cd)))
 571		ret = -EFAULT;
 572	else
 573		ret = 0;
 574out_free:
 575	kfree(cd);
 576	free_page((unsigned long)scucd_area);
 577	return ret;
 578}
 579
 580static int chsc_ioctl_info_sch_cu(void __user *user_cud)
 581{
 582	struct chsc_sch_cud *cud;
 583	int ret, ccode;
 584	struct {
 585		struct chsc_header request;
 586		u32 : 2;
 587		u32 m : 1;
 588		u32 : 5;
 589		u32 fmt1 : 4;
 590		u32 : 2;
 591		u32 ssid : 2;
 592		u32 first_sch : 16;
 593		u32 : 8;
 594		u32 cssid : 8;
 595		u32 last_sch : 16;
 596		u32 : 32;
 597		struct chsc_header response;
 598		u8 data[PAGE_SIZE - 20];
 599	} __attribute__ ((packed)) *sscud_area;
 600
 601	sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 602	if (!sscud_area)
 603		return -ENOMEM;
 604	cud = kzalloc(sizeof(*cud), GFP_KERNEL);
 605	if (!cud) {
 606		ret = -ENOMEM;
 607		goto out_free;
 608	}
 609	if (copy_from_user(cud, user_cud, sizeof(*cud))) {
 610		ret = -EFAULT;
 611		goto out_free;
 612	}
 613	sscud_area->request.length = 0x0010;
 614	sscud_area->request.code = 0x0006;
 615	sscud_area->m = cud->schid.m;
 616	sscud_area->fmt1 = cud->fmt;
 617	sscud_area->ssid = cud->schid.ssid;
 618	sscud_area->first_sch = cud->schid.sch_no;
 619	sscud_area->cssid = cud->schid.cssid;
 620	sscud_area->last_sch = cud->schid.sch_no;
 621
 622	ccode = chsc(sscud_area);
 623	if (ccode != 0) {
 624		ret = -EIO;
 625		goto out_free;
 626	}
 627	if (sscud_area->response.code != 0x0001) {
 628		ret = -EIO;
 629		CHSC_MSG(0, "sscud: response code=%x\n",
 630			 sscud_area->response.code);
 631		goto out_free;
 632	}
 633	memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
 634	if (copy_to_user(user_cud, cud, sizeof(*cud)))
 635		ret = -EFAULT;
 636	else
 637		ret = 0;
 638out_free:
 639	kfree(cud);
 640	free_page((unsigned long)sscud_area);
 641	return ret;
 642}
 643
 644static int chsc_ioctl_conf_info(void __user *user_ci)
 645{
 646	struct chsc_conf_info *ci;
 647	int ret, ccode;
 648	struct {
 649		struct chsc_header request;
 650		u32 : 2;
 651		u32 m : 1;
 652		u32 : 1;
 653		u32 fmt1 : 4;
 654		u32 cssid : 8;
 655		u32 : 6;
 656		u32 ssid : 2;
 657		u32 : 8;
 658		u64 : 64;
 659		struct chsc_header response;
 660		u8 data[PAGE_SIZE - 20];
 661	} __attribute__ ((packed)) *sci_area;
 662
 663	sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 664	if (!sci_area)
 665		return -ENOMEM;
 666	ci = kzalloc(sizeof(*ci), GFP_KERNEL);
 667	if (!ci) {
 668		ret = -ENOMEM;
 669		goto out_free;
 670	}
 671	if (copy_from_user(ci, user_ci, sizeof(*ci))) {
 672		ret = -EFAULT;
 673		goto out_free;
 674	}
 675	sci_area->request.length = 0x0010;
 676	sci_area->request.code = 0x0012;
 677	sci_area->m = ci->id.m;
 678	sci_area->fmt1 = ci->fmt;
 679	sci_area->cssid = ci->id.cssid;
 680	sci_area->ssid = ci->id.ssid;
 681
 682	ccode = chsc(sci_area);
 683	if (ccode != 0) {
 684		ret = -EIO;
 685		goto out_free;
 686	}
 687	if (sci_area->response.code != 0x0001) {
 688		ret = -EIO;
 689		CHSC_MSG(0, "sci: response code=%x\n",
 690			 sci_area->response.code);
 691		goto out_free;
 692	}
 693	memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
 694	if (copy_to_user(user_ci, ci, sizeof(*ci)))
 695		ret = -EFAULT;
 696	else
 697		ret = 0;
 698out_free:
 699	kfree(ci);
 700	free_page((unsigned long)sci_area);
 701	return ret;
 702}
 703
 704static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
 705{
 706	struct chsc_comp_list *ccl;
 707	int ret, ccode;
 708	struct {
 709		struct chsc_header request;
 710		u32 ctype : 8;
 711		u32 : 4;
 712		u32 fmt : 4;
 713		u32 : 16;
 714		u64 : 64;
 715		u32 list_parm[2];
 716		u64 : 64;
 717		struct chsc_header response;
 718		u8 data[PAGE_SIZE - 36];
 719	} __attribute__ ((packed)) *sccl_area;
 720	struct {
 721		u32 m : 1;
 722		u32 : 31;
 723		u32 cssid : 8;
 724		u32 : 16;
 725		u32 chpid : 8;
 726	} __attribute__ ((packed)) *chpid_parm;
 727	struct {
 728		u32 f_cssid : 8;
 729		u32 l_cssid : 8;
 730		u32 : 16;
 731		u32 res;
 732	} __attribute__ ((packed)) *cssids_parm;
 733
 734	sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 735	if (!sccl_area)
 736		return -ENOMEM;
 737	ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
 738	if (!ccl) {
 739		ret = -ENOMEM;
 740		goto out_free;
 741	}
 742	if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
 743		ret = -EFAULT;
 744		goto out_free;
 745	}
 746	sccl_area->request.length = 0x0020;
 747	sccl_area->request.code = 0x0030;
 748	sccl_area->fmt = ccl->req.fmt;
 749	sccl_area->ctype = ccl->req.ctype;
 750	switch (sccl_area->ctype) {
 751	case CCL_CU_ON_CHP:
 752	case CCL_IOP_CHP:
 753		chpid_parm = (void *)&sccl_area->list_parm;
 754		chpid_parm->m = ccl->req.chpid.m;
 755		chpid_parm->cssid = ccl->req.chpid.chp.cssid;
 756		chpid_parm->chpid = ccl->req.chpid.chp.id;
 757		break;
 758	case CCL_CSS_IMG:
 759	case CCL_CSS_IMG_CONF_CHAR:
 760		cssids_parm = (void *)&sccl_area->list_parm;
 761		cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
 762		cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
 763		break;
 764	}
 765	ccode = chsc(sccl_area);
 766	if (ccode != 0) {
 767		ret = -EIO;
 768		goto out_free;
 769	}
 770	if (sccl_area->response.code != 0x0001) {
 771		ret = -EIO;
 772		CHSC_MSG(0, "sccl: response code=%x\n",
 773			 sccl_area->response.code);
 774		goto out_free;
 775	}
 776	memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
 777	if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
 778		ret = -EFAULT;
 779	else
 780		ret = 0;
 781out_free:
 782	kfree(ccl);
 783	free_page((unsigned long)sccl_area);
 784	return ret;
 785}
 786
 787static int chsc_ioctl_chpd(void __user *user_chpd)
 788{
 789	struct chsc_scpd *scpd_area;
 790	struct chsc_cpd_info *chpd;
 791	int ret;
 792
 793	chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
 794	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 795	if (!scpd_area || !chpd) {
 796		ret = -ENOMEM;
 797		goto out_free;
 798	}
 799	if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
 800		ret = -EFAULT;
 801		goto out_free;
 802	}
 803	ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
 804					       chpd->rfmt, chpd->c, chpd->m,
 805					       scpd_area);
 806	if (ret)
 807		goto out_free;
 808	memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
 809	if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
 810		ret = -EFAULT;
 811out_free:
 812	kfree(chpd);
 813	free_page((unsigned long)scpd_area);
 814	return ret;
 815}
 816
 817static int chsc_ioctl_dcal(void __user *user_dcal)
 818{
 819	struct chsc_dcal *dcal;
 820	int ret, ccode;
 821	struct {
 822		struct chsc_header request;
 823		u32 atype : 8;
 824		u32 : 4;
 825		u32 fmt : 4;
 826		u32 : 16;
 827		u32 res0[2];
 828		u32 list_parm[2];
 829		u32 res1[2];
 830		struct chsc_header response;
 831		u8 data[PAGE_SIZE - 36];
 832	} __attribute__ ((packed)) *sdcal_area;
 833
 834	sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 835	if (!sdcal_area)
 836		return -ENOMEM;
 837	dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
 838	if (!dcal) {
 839		ret = -ENOMEM;
 840		goto out_free;
 841	}
 842	if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
 843		ret = -EFAULT;
 844		goto out_free;
 845	}
 846	sdcal_area->request.length = 0x0020;
 847	sdcal_area->request.code = 0x0034;
 848	sdcal_area->atype = dcal->req.atype;
 849	sdcal_area->fmt = dcal->req.fmt;
 850	memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
 851	       sizeof(sdcal_area->list_parm));
 852
 853	ccode = chsc(sdcal_area);
 854	if (ccode != 0) {
 855		ret = -EIO;
 856		goto out_free;
 857	}
 858	if (sdcal_area->response.code != 0x0001) {
 859		ret = -EIO;
 860		CHSC_MSG(0, "sdcal: response code=%x\n",
 861			 sdcal_area->response.code);
 862		goto out_free;
 863	}
 864	memcpy(&dcal->sdcal, &sdcal_area->response,
 865	       sdcal_area->response.length);
 866	if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
 867		ret = -EFAULT;
 868	else
 869		ret = 0;
 870out_free:
 871	kfree(dcal);
 872	free_page((unsigned long)sdcal_area);
 873	return ret;
 874}
 875
 876static long chsc_ioctl(struct file *filp, unsigned int cmd,
 877		       unsigned long arg)
 878{
 879	void __user *argp;
 880
 881	CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
 882	if (is_compat_task())
 883		argp = compat_ptr(arg);
 884	else
 885		argp = (void __user *)arg;
 886	switch (cmd) {
 887	case CHSC_START:
 888		return chsc_ioctl_start(argp);
 889	case CHSC_START_SYNC:
 890		return chsc_ioctl_start_sync(argp);
 891	case CHSC_INFO_CHANNEL_PATH:
 892		return chsc_ioctl_info_channel_path(argp);
 893	case CHSC_INFO_CU:
 894		return chsc_ioctl_info_cu(argp);
 895	case CHSC_INFO_SCH_CU:
 896		return chsc_ioctl_info_sch_cu(argp);
 897	case CHSC_INFO_CI:
 898		return chsc_ioctl_conf_info(argp);
 899	case CHSC_INFO_CCL:
 900		return chsc_ioctl_conf_comp_list(argp);
 901	case CHSC_INFO_CPD:
 902		return chsc_ioctl_chpd(argp);
 903	case CHSC_INFO_DCAL:
 904		return chsc_ioctl_dcal(argp);
 905	case CHSC_ON_CLOSE_SET:
 906		return chsc_ioctl_on_close_set(argp);
 907	case CHSC_ON_CLOSE_REMOVE:
 908		return chsc_ioctl_on_close_remove();
 909	default: /* unknown ioctl number */
 910		return -ENOIOCTLCMD;
 911	}
 912}
 913
 914static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
 915
 916static int chsc_open(struct inode *inode, struct file *file)
 917{
 918	if (!atomic_dec_and_test(&chsc_ready_for_use)) {
 919		atomic_inc(&chsc_ready_for_use);
 920		return -EBUSY;
 921	}
 922	return nonseekable_open(inode, file);
 923}
 924
 925static int chsc_release(struct inode *inode, struct file *filp)
 926{
 927	char dbf[13];
 928	int ret;
 929
 930	mutex_lock(&on_close_mutex);
 931	if (!on_close_chsc_area)
 932		goto out_unlock;
 933	init_completion(&on_close_request->completion);
 934	CHSC_LOG(0, "on_close");
 935	chsc_log_command(on_close_chsc_area);
 936	spin_lock_irq(&chsc_lock);
 937	ret = chsc_async(on_close_chsc_area, on_close_request);
 938	spin_unlock_irq(&chsc_lock);
 939	if (ret == -EINPROGRESS) {
 940		wait_for_completion(&on_close_request->completion);
 941		ret = chsc_examine_irb(on_close_request);
 942	}
 943	snprintf(dbf, sizeof(dbf), "relret:%d", ret);
 944	CHSC_LOG(0, dbf);
 945	free_page((unsigned long)on_close_chsc_area);
 946	on_close_chsc_area = NULL;
 947	kfree(on_close_request);
 948	on_close_request = NULL;
 949out_unlock:
 950	mutex_unlock(&on_close_mutex);
 951	atomic_inc(&chsc_ready_for_use);
 952	return 0;
 953}
 954
 955static const struct file_operations chsc_fops = {
 956	.owner = THIS_MODULE,
 957	.open = chsc_open,
 958	.release = chsc_release,
 959	.unlocked_ioctl = chsc_ioctl,
 960	.compat_ioctl = chsc_ioctl,
 961	.llseek = no_llseek,
 962};
 963
 964static struct miscdevice chsc_misc_device = {
 965	.minor = MISC_DYNAMIC_MINOR,
 966	.name = "chsc",
 967	.fops = &chsc_fops,
 968};
 969
 970static int __init chsc_misc_init(void)
 971{
 972	return misc_register(&chsc_misc_device);
 973}
 974
 975static void chsc_misc_cleanup(void)
 976{
 977	misc_deregister(&chsc_misc_device);
 978}
 979
 980static int __init chsc_sch_init(void)
 981{
 982	int ret;
 983
 984	ret = chsc_init_dbfs();
 985	if (ret)
 986		return ret;
 987	isc_register(CHSC_SCH_ISC);
 988	ret = chsc_init_sch_driver();
 989	if (ret)
 990		goto out_dbf;
 991	ret = chsc_misc_init();
 992	if (ret)
 993		goto out_driver;
 994	return ret;
 995out_driver:
 996	chsc_cleanup_sch_driver();
 997out_dbf:
 998	isc_unregister(CHSC_SCH_ISC);
 999	chsc_remove_dbfs();
1000	return ret;
1001}
1002
1003static void __exit chsc_sch_exit(void)
1004{
1005	chsc_misc_cleanup();
1006	chsc_cleanup_sch_driver();
1007	isc_unregister(CHSC_SCH_ISC);
1008	chsc_remove_dbfs();
1009}
1010
1011module_init(chsc_sch_init);
1012module_exit(chsc_sch_exit);