Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *  drivers/s390/cio/device.c
   3 *  bus driver for ccw devices
   4 *
   5 *    Copyright IBM Corp. 2002,2008
   6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
   7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
   8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/init.h>
  16#include <linux/spinlock.h>
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/slab.h>
  20#include <linux/list.h>
  21#include <linux/device.h>
  22#include <linux/workqueue.h>
 
  23#include <linux/timer.h>
 
 
 
  24
  25#include <asm/ccwdev.h>
  26#include <asm/cio.h>
  27#include <asm/param.h>		/* HZ */
  28#include <asm/cmb.h>
  29#include <asm/isc.h>
  30
  31#include "chp.h"
  32#include "cio.h"
  33#include "cio_debug.h"
  34#include "css.h"
  35#include "device.h"
  36#include "ioasm.h"
  37#include "io_sch.h"
  38#include "blacklist.h"
  39#include "chsc.h"
  40
  41static struct timer_list recovery_timer;
  42static DEFINE_SPINLOCK(recovery_lock);
  43static int recovery_phase;
  44static const unsigned long recovery_delay[] = { 3, 30, 300 };
  45
 
 
 
 
  46/******************* bus type handling ***********************/
  47
  48/* The Linux driver model distinguishes between a bus type and
  49 * the bus itself. Of course we only have one channel
  50 * subsystem driver and one channel system per machine, but
  51 * we still use the abstraction. T.R. says it's a good idea. */
  52static int
  53ccw_bus_match (struct device * dev, struct device_driver * drv)
  54{
  55	struct ccw_device *cdev = to_ccwdev(dev);
  56	struct ccw_driver *cdrv = to_ccwdrv(drv);
  57	const struct ccw_device_id *ids = cdrv->ids, *found;
  58
  59	if (!ids)
  60		return 0;
  61
  62	found = ccw_device_id_match(ids, &cdev->id);
  63	if (!found)
  64		return 0;
  65
  66	cdev->id.driver_info = found->driver_info;
  67
  68	return 1;
  69}
  70
  71/* Store modalias string delimited by prefix/suffix string into buffer with
  72 * specified size. Return length of resulting string (excluding trailing '\0')
  73 * even if string doesn't fit buffer (snprintf semantics). */
  74static int snprint_alias(char *buf, size_t size,
  75			 struct ccw_device_id *id, const char *suffix)
  76{
  77	int len;
  78
  79	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
  80	if (len > size)
  81		return len;
  82	buf += len;
  83	size -= len;
  84
  85	if (id->dev_type != 0)
  86		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
  87				id->dev_model, suffix);
  88	else
  89		len += snprintf(buf, size, "dtdm%s", suffix);
  90
  91	return len;
  92}
  93
  94/* Set up environment variables for ccw device uevent. Return 0 on success,
  95 * non-zero otherwise. */
  96static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
  97{
  98	struct ccw_device *cdev = to_ccwdev(dev);
  99	struct ccw_device_id *id = &(cdev->id);
 100	int ret;
 101	char modalias_buf[30];
 102
 103	/* CU_TYPE= */
 104	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
 105	if (ret)
 106		return ret;
 107
 108	/* CU_MODEL= */
 109	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
 110	if (ret)
 111		return ret;
 112
 113	/* The next two can be zero, that's ok for us */
 114	/* DEV_TYPE= */
 115	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
 116	if (ret)
 117		return ret;
 118
 119	/* DEV_MODEL= */
 120	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
 121	if (ret)
 122		return ret;
 123
 124	/* MODALIAS=  */
 125	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
 126	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
 127	return ret;
 128}
 129
 130static struct bus_type ccw_bus_type;
 131
 132static void io_subchannel_irq(struct subchannel *);
 133static int io_subchannel_probe(struct subchannel *);
 134static int io_subchannel_remove(struct subchannel *);
 135static void io_subchannel_shutdown(struct subchannel *);
 136static int io_subchannel_sch_event(struct subchannel *, int);
 137static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
 138				   int);
 139static void recovery_func(unsigned long data);
 140wait_queue_head_t ccw_device_init_wq;
 141atomic_t ccw_device_init_count;
 142
 143static struct css_device_id io_subchannel_ids[] = {
 144	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 145	{ /* end of list */ },
 146};
 147MODULE_DEVICE_TABLE(css, io_subchannel_ids);
 148
 149static int io_subchannel_prepare(struct subchannel *sch)
 150{
 151	struct ccw_device *cdev;
 152	/*
 153	 * Don't allow suspend while a ccw device registration
 154	 * is still outstanding.
 155	 */
 156	cdev = sch_get_cdev(sch);
 157	if (cdev && !device_is_registered(&cdev->dev))
 158		return -EAGAIN;
 159	return 0;
 160}
 161
 162static int io_subchannel_settle(void)
 163{
 164	int ret;
 165
 166	ret = wait_event_interruptible(ccw_device_init_wq,
 167				atomic_read(&ccw_device_init_count) == 0);
 168	if (ret)
 169		return -EINTR;
 170	flush_workqueue(cio_work_q);
 171	return 0;
 172}
 173
 174static struct css_driver io_subchannel_driver = {
 175	.drv = {
 176		.owner = THIS_MODULE,
 177		.name = "io_subchannel",
 178	},
 179	.subchannel_type = io_subchannel_ids,
 180	.irq = io_subchannel_irq,
 181	.sch_event = io_subchannel_sch_event,
 182	.chp_event = io_subchannel_chp_event,
 183	.probe = io_subchannel_probe,
 184	.remove = io_subchannel_remove,
 185	.shutdown = io_subchannel_shutdown,
 186	.prepare = io_subchannel_prepare,
 187	.settle = io_subchannel_settle,
 188};
 189
 190int __init io_subchannel_init(void)
 191{
 192	int ret;
 193
 194	init_waitqueue_head(&ccw_device_init_wq);
 195	atomic_set(&ccw_device_init_count, 0);
 196	setup_timer(&recovery_timer, recovery_func, 0);
 197
 198	ret = bus_register(&ccw_bus_type);
 199	if (ret)
 200		return ret;
 201	ret = css_driver_register(&io_subchannel_driver);
 202	if (ret)
 203		bus_unregister(&ccw_bus_type);
 204
 205	return ret;
 206}
 207
 208
 209/************************ device handling **************************/
 210
 211/*
 212 * A ccw_device has some interfaces in sysfs in addition to the
 213 * standard ones.
 214 * The following entries are designed to export the information which
 215 * resided in 2.4 in /proc/subchannels. Subchannel and device number
 216 * are obvious, so they don't have an entry :)
 217 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
 218 */
 219static ssize_t
 220chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
 221{
 222	struct subchannel *sch = to_subchannel(dev);
 223	struct chsc_ssd_info *ssd = &sch->ssd_info;
 224	ssize_t ret = 0;
 225	int chp;
 226	int mask;
 227
 228	for (chp = 0; chp < 8; chp++) {
 229		mask = 0x80 >> chp;
 230		if (ssd->path_mask & mask)
 231			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
 232		else
 233			ret += sprintf(buf + ret, "00 ");
 234	}
 235	ret += sprintf (buf+ret, "\n");
 236	return min((ssize_t)PAGE_SIZE, ret);
 237}
 238
 239static ssize_t
 240pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
 241{
 242	struct subchannel *sch = to_subchannel(dev);
 243	struct pmcw *pmcw = &sch->schib.pmcw;
 244
 245	return sprintf (buf, "%02x %02x %02x\n",
 246			pmcw->pim, pmcw->pam, pmcw->pom);
 247}
 248
 249static ssize_t
 250devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
 251{
 252	struct ccw_device *cdev = to_ccwdev(dev);
 253	struct ccw_device_id *id = &(cdev->id);
 254
 255	if (id->dev_type != 0)
 256		return sprintf(buf, "%04x/%02x\n",
 257				id->dev_type, id->dev_model);
 258	else
 259		return sprintf(buf, "n/a\n");
 260}
 261
 262static ssize_t
 263cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
 264{
 265	struct ccw_device *cdev = to_ccwdev(dev);
 266	struct ccw_device_id *id = &(cdev->id);
 267
 268	return sprintf(buf, "%04x/%02x\n",
 269		       id->cu_type, id->cu_model);
 270}
 271
 272static ssize_t
 273modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
 274{
 275	struct ccw_device *cdev = to_ccwdev(dev);
 276	struct ccw_device_id *id = &(cdev->id);
 277	int len;
 278
 279	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
 280
 281	return len > PAGE_SIZE ? PAGE_SIZE : len;
 282}
 283
 284static ssize_t
 285online_show (struct device *dev, struct device_attribute *attr, char *buf)
 286{
 287	struct ccw_device *cdev = to_ccwdev(dev);
 288
 289	return sprintf(buf, cdev->online ? "1\n" : "0\n");
 290}
 291
 292int ccw_device_is_orphan(struct ccw_device *cdev)
 293{
 294	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
 295}
 296
 297static void ccw_device_unregister(struct ccw_device *cdev)
 298{
 
 299	if (device_is_registered(&cdev->dev)) {
 300		/* Undo device_add(). */
 301		device_del(&cdev->dev);
 302	}
 
 
 303	if (cdev->private->flags.initialized) {
 304		cdev->private->flags.initialized = 0;
 305		/* Release reference from device_initialize(). */
 306		put_device(&cdev->dev);
 307	}
 308}
 309
 310static void io_subchannel_quiesce(struct subchannel *);
 311
 312/**
 313 * ccw_device_set_offline() - disable a ccw device for I/O
 314 * @cdev: target ccw device
 315 *
 316 * This function calls the driver's set_offline() function for @cdev, if
 317 * given, and then disables @cdev.
 318 * Returns:
 319 *   %0 on success and a negative error value on failure.
 320 * Context:
 321 *  enabled, ccw device lock not held
 322 */
 323int ccw_device_set_offline(struct ccw_device *cdev)
 324{
 325	struct subchannel *sch;
 326	int ret, state;
 327
 328	if (!cdev)
 329		return -ENODEV;
 330	if (!cdev->online || !cdev->drv)
 331		return -EINVAL;
 332
 333	if (cdev->drv->set_offline) {
 334		ret = cdev->drv->set_offline(cdev);
 335		if (ret != 0)
 336			return ret;
 337	}
 338	cdev->online = 0;
 339	spin_lock_irq(cdev->ccwlock);
 340	sch = to_subchannel(cdev->dev.parent);
 
 341	/* Wait until a final state or DISCONNECTED is reached */
 342	while (!dev_fsm_final_state(cdev) &&
 343	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 344		spin_unlock_irq(cdev->ccwlock);
 345		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 346			   cdev->private->state == DEV_STATE_DISCONNECTED));
 347		spin_lock_irq(cdev->ccwlock);
 348	}
 349	do {
 350		ret = ccw_device_offline(cdev);
 351		if (!ret)
 352			break;
 353		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
 354			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
 355			      cdev->private->dev_id.devno);
 356		if (ret != -EBUSY)
 357			goto error;
 358		state = cdev->private->state;
 359		spin_unlock_irq(cdev->ccwlock);
 360		io_subchannel_quiesce(sch);
 361		spin_lock_irq(cdev->ccwlock);
 362		cdev->private->state = state;
 363	} while (ret == -EBUSY);
 364	spin_unlock_irq(cdev->ccwlock);
 365	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 366		   cdev->private->state == DEV_STATE_DISCONNECTED));
 367	/* Inform the user if set offline failed. */
 368	if (cdev->private->state == DEV_STATE_BOXED) {
 369		pr_warning("%s: The device entered boxed state while "
 370			   "being set offline\n", dev_name(&cdev->dev));
 371	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 372		pr_warning("%s: The device stopped operating while "
 373			   "being set offline\n", dev_name(&cdev->dev));
 374	}
 375	/* Give up reference from ccw_device_set_online(). */
 376	put_device(&cdev->dev);
 377	return 0;
 378
 379error:
 380	cdev->private->state = DEV_STATE_OFFLINE;
 381	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
 382	spin_unlock_irq(cdev->ccwlock);
 383	/* Give up reference from ccw_device_set_online(). */
 384	put_device(&cdev->dev);
 385	return -ENODEV;
 386}
 387
 388/**
 389 * ccw_device_set_online() - enable a ccw device for I/O
 390 * @cdev: target ccw device
 391 *
 392 * This function first enables @cdev and then calls the driver's set_online()
 393 * function for @cdev, if given. If set_online() returns an error, @cdev is
 394 * disabled again.
 395 * Returns:
 396 *   %0 on success and a negative error value on failure.
 397 * Context:
 398 *  enabled, ccw device lock not held
 399 */
 400int ccw_device_set_online(struct ccw_device *cdev)
 401{
 402	int ret;
 403	int ret2;
 404
 405	if (!cdev)
 406		return -ENODEV;
 407	if (cdev->online || !cdev->drv)
 408		return -EINVAL;
 409	/* Hold on to an extra reference while device is online. */
 410	if (!get_device(&cdev->dev))
 411		return -ENODEV;
 412
 413	spin_lock_irq(cdev->ccwlock);
 414	ret = ccw_device_online(cdev);
 415	spin_unlock_irq(cdev->ccwlock);
 416	if (ret == 0)
 417		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
 418	else {
 419		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
 420			      "device 0.%x.%04x\n",
 421			      ret, cdev->private->dev_id.ssid,
 422			      cdev->private->dev_id.devno);
 423		/* Give up online reference since onlining failed. */
 424		put_device(&cdev->dev);
 425		return ret;
 426	}
 427	spin_lock_irq(cdev->ccwlock);
 
 
 
 
 
 428	/* Check if online processing was successful */
 429	if ((cdev->private->state != DEV_STATE_ONLINE) &&
 430	    (cdev->private->state != DEV_STATE_W4SENSE)) {
 431		spin_unlock_irq(cdev->ccwlock);
 432		/* Inform the user that set online failed. */
 433		if (cdev->private->state == DEV_STATE_BOXED) {
 434			pr_warning("%s: Setting the device online failed "
 435				   "because it is boxed\n",
 436				   dev_name(&cdev->dev));
 437		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 438			pr_warning("%s: Setting the device online failed "
 439				   "because it is not operational\n",
 440				   dev_name(&cdev->dev));
 441		}
 442		/* Give up online reference since onlining failed. */
 443		put_device(&cdev->dev);
 444		return -ENODEV;
 445	}
 446	spin_unlock_irq(cdev->ccwlock);
 447	if (cdev->drv->set_online)
 448		ret = cdev->drv->set_online(cdev);
 449	if (ret)
 450		goto rollback;
 
 
 451	cdev->online = 1;
 
 452	return 0;
 453
 454rollback:
 455	spin_lock_irq(cdev->ccwlock);
 456	/* Wait until a final state or DISCONNECTED is reached */
 457	while (!dev_fsm_final_state(cdev) &&
 458	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 459		spin_unlock_irq(cdev->ccwlock);
 460		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 461			   cdev->private->state == DEV_STATE_DISCONNECTED));
 462		spin_lock_irq(cdev->ccwlock);
 463	}
 464	ret2 = ccw_device_offline(cdev);
 465	if (ret2)
 466		goto error;
 467	spin_unlock_irq(cdev->ccwlock);
 468	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 469		   cdev->private->state == DEV_STATE_DISCONNECTED));
 470	/* Give up online reference since onlining failed. */
 471	put_device(&cdev->dev);
 472	return ret;
 473
 474error:
 475	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
 476		      "device 0.%x.%04x\n",
 477		      ret2, cdev->private->dev_id.ssid,
 478		      cdev->private->dev_id.devno);
 479	cdev->private->state = DEV_STATE_OFFLINE;
 480	spin_unlock_irq(cdev->ccwlock);
 481	/* Give up online reference since onlining failed. */
 482	put_device(&cdev->dev);
 483	return ret;
 484}
 485
 486static int online_store_handle_offline(struct ccw_device *cdev)
 487{
 488	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
 489		spin_lock_irq(cdev->ccwlock);
 490		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
 491		spin_unlock_irq(cdev->ccwlock);
 492		return 0;
 493	}
 494	if (cdev->drv && cdev->drv->set_offline)
 495		return ccw_device_set_offline(cdev);
 496	return -EINVAL;
 497}
 498
 499static int online_store_recog_and_online(struct ccw_device *cdev)
 500{
 501	/* Do device recognition, if needed. */
 502	if (cdev->private->state == DEV_STATE_BOXED) {
 503		spin_lock_irq(cdev->ccwlock);
 504		ccw_device_recognition(cdev);
 505		spin_unlock_irq(cdev->ccwlock);
 506		wait_event(cdev->private->wait_q,
 507			   cdev->private->flags.recog_done);
 508		if (cdev->private->state != DEV_STATE_OFFLINE)
 509			/* recognition failed */
 510			return -EAGAIN;
 511	}
 512	if (cdev->drv && cdev->drv->set_online)
 513		return ccw_device_set_online(cdev);
 514	return -EINVAL;
 515}
 516
 517static int online_store_handle_online(struct ccw_device *cdev, int force)
 518{
 519	int ret;
 520
 521	ret = online_store_recog_and_online(cdev);
 522	if (ret && !force)
 523		return ret;
 524	if (force && cdev->private->state == DEV_STATE_BOXED) {
 525		ret = ccw_device_stlck(cdev);
 526		if (ret)
 527			return ret;
 528		if (cdev->id.cu_type == 0)
 529			cdev->private->state = DEV_STATE_NOT_OPER;
 530		ret = online_store_recog_and_online(cdev);
 531		if (ret)
 532			return ret;
 533	}
 534	return 0;
 535}
 536
 537static ssize_t online_store (struct device *dev, struct device_attribute *attr,
 538			     const char *buf, size_t count)
 539{
 540	struct ccw_device *cdev = to_ccwdev(dev);
 541	int force, ret;
 542	unsigned long i;
 543
 544	/* Prevent conflict between multiple on-/offline processing requests. */
 545	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
 546		return -EAGAIN;
 547	/* Prevent conflict between internal I/Os and on-/offline processing. */
 548	if (!dev_fsm_final_state(cdev) &&
 549	    cdev->private->state != DEV_STATE_DISCONNECTED) {
 550		ret = -EAGAIN;
 551		goto out_onoff;
 552	}
 553	/* Prevent conflict between pending work and on-/offline processing.*/
 554	if (work_pending(&cdev->private->todo_work)) {
 555		ret = -EAGAIN;
 556		goto out_onoff;
 557	}
 558
 559	if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
 560		ret = -EINVAL;
 561		goto out_onoff;
 562	}
 563	if (!strncmp(buf, "force\n", count)) {
 564		force = 1;
 565		i = 1;
 566		ret = 0;
 567	} else {
 568		force = 0;
 569		ret = strict_strtoul(buf, 16, &i);
 570	}
 571	if (ret)
 572		goto out;
 
 
 573	switch (i) {
 574	case 0:
 575		ret = online_store_handle_offline(cdev);
 576		break;
 577	case 1:
 578		ret = online_store_handle_online(cdev, force);
 579		break;
 580	default:
 581		ret = -EINVAL;
 582	}
 
 
 583out:
 584	if (cdev->drv)
 585		module_put(cdev->drv->driver.owner);
 586out_onoff:
 587	atomic_set(&cdev->private->onoff, 0);
 588	return (ret < 0) ? ret : count;
 589}
 590
 591static ssize_t
 592available_show (struct device *dev, struct device_attribute *attr, char *buf)
 593{
 594	struct ccw_device *cdev = to_ccwdev(dev);
 595	struct subchannel *sch;
 596
 597	if (ccw_device_is_orphan(cdev))
 598		return sprintf(buf, "no device\n");
 599	switch (cdev->private->state) {
 600	case DEV_STATE_BOXED:
 601		return sprintf(buf, "boxed\n");
 602	case DEV_STATE_DISCONNECTED:
 603	case DEV_STATE_DISCONNECTED_SENSE_ID:
 604	case DEV_STATE_NOT_OPER:
 605		sch = to_subchannel(dev->parent);
 606		if (!sch->lpm)
 607			return sprintf(buf, "no path\n");
 608		else
 609			return sprintf(buf, "no device\n");
 610	default:
 611		/* All other states considered fine. */
 612		return sprintf(buf, "good\n");
 613	}
 614}
 615
 616static ssize_t
 617initiate_logging(struct device *dev, struct device_attribute *attr,
 618		 const char *buf, size_t count)
 619{
 620	struct subchannel *sch = to_subchannel(dev);
 621	int rc;
 622
 623	rc = chsc_siosl(sch->schid);
 624	if (rc < 0) {
 625		pr_warning("Logging for subchannel 0.%x.%04x failed with "
 626			   "errno=%d\n",
 627			   sch->schid.ssid, sch->schid.sch_no, rc);
 628		return rc;
 629	}
 630	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
 631		  sch->schid.ssid, sch->schid.sch_no);
 632	return count;
 633}
 634
 635static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
 636static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
 637static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
 638static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
 639static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
 640static DEVICE_ATTR(online, 0644, online_show, online_store);
 
 
 
 
 
 
 641static DEVICE_ATTR(availability, 0444, available_show, NULL);
 642static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
 
 643
 644static struct attribute *io_subchannel_attrs[] = {
 645	&dev_attr_chpids.attr,
 646	&dev_attr_pimpampom.attr,
 647	&dev_attr_logging.attr,
 
 648	NULL,
 649};
 650
 651static struct attribute_group io_subchannel_attr_group = {
 652	.attrs = io_subchannel_attrs,
 653};
 654
 655static struct attribute * ccwdev_attrs[] = {
 656	&dev_attr_devtype.attr,
 657	&dev_attr_cutype.attr,
 658	&dev_attr_modalias.attr,
 659	&dev_attr_online.attr,
 660	&dev_attr_cmb_enable.attr,
 661	&dev_attr_availability.attr,
 662	NULL,
 663};
 664
 665static struct attribute_group ccwdev_attr_group = {
 666	.attrs = ccwdev_attrs,
 667};
 668
 669static const struct attribute_group *ccwdev_attr_groups[] = {
 670	&ccwdev_attr_group,
 671	NULL,
 672};
 673
 674/* this is a simple abstraction for device_register that sets the
 675 * correct bus type and adds the bus specific files */
 676static int ccw_device_register(struct ccw_device *cdev)
 677{
 678	struct device *dev = &cdev->dev;
 679	int ret;
 680
 681	dev->bus = &ccw_bus_type;
 682	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
 683			   cdev->private->dev_id.devno);
 684	if (ret)
 685		return ret;
 686	return device_add(dev);
 687}
 688
 689static int match_dev_id(struct device *dev, void *data)
 690{
 691	struct ccw_device *cdev = to_ccwdev(dev);
 692	struct ccw_dev_id *dev_id = data;
 693
 694	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
 695}
 696
 697static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
 
 
 
 
 
 
 
 
 
 
 698{
 699	struct device *dev;
 700
 701	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
 702
 703	return dev ? to_ccwdev(dev) : NULL;
 704}
 
 705
 706static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
 707{
 708	int ret;
 709
 
 710	if (device_is_registered(&cdev->dev)) {
 711		device_release_driver(&cdev->dev);
 712		ret = device_attach(&cdev->dev);
 713		WARN_ON(ret == -ENODEV);
 714	}
 
 715}
 716
 717static void
 718ccw_device_release(struct device *dev)
 719{
 720	struct ccw_device *cdev;
 721
 722	cdev = to_ccwdev(dev);
 
 
 
 723	/* Release reference of parent subchannel. */
 724	put_device(cdev->dev.parent);
 725	kfree(cdev->private);
 726	kfree(cdev);
 727}
 728
 729static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
 730{
 731	struct ccw_device *cdev;
 
 
 732
 733	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
 734	if (cdev) {
 735		cdev->private = kzalloc(sizeof(struct ccw_device_private),
 736					GFP_KERNEL | GFP_DMA);
 737		if (cdev->private)
 738			return cdev;
 
 
 
 
 739	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740	kfree(cdev);
 741	return ERR_PTR(-ENOMEM);
 
 742}
 743
 744static void ccw_device_todo(struct work_struct *work);
 745
 746static int io_subchannel_initialize_dev(struct subchannel *sch,
 747					struct ccw_device *cdev)
 748{
 749	cdev->private->cdev = cdev;
 750	atomic_set(&cdev->private->onoff, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 751	cdev->dev.parent = &sch->dev;
 752	cdev->dev.release = ccw_device_release;
 753	INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
 754	cdev->dev.groups = ccwdev_attr_groups;
 755	/* Do first half of device_register. */
 756	device_initialize(&cdev->dev);
 
 
 
 
 757	if (!get_device(&sch->dev)) {
 758		/* Release reference from device_initialize(). */
 759		put_device(&cdev->dev);
 760		return -ENODEV;
 761	}
 762	cdev->private->flags.initialized = 1;
 
 
 
 763	return 0;
 
 
 
 
 
 764}
 765
 766static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
 767{
 768	struct ccw_device *cdev;
 769	int ret;
 770
 771	cdev = io_subchannel_allocate_dev(sch);
 772	if (!IS_ERR(cdev)) {
 773		ret = io_subchannel_initialize_dev(sch, cdev);
 774		if (ret)
 775			cdev = ERR_PTR(ret);
 776	}
 777	return cdev;
 778}
 779
 780static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
 781
 782static void sch_create_and_recog_new_device(struct subchannel *sch)
 783{
 784	struct ccw_device *cdev;
 785
 786	/* Need to allocate a new ccw device. */
 787	cdev = io_subchannel_create_ccwdev(sch);
 788	if (IS_ERR(cdev)) {
 789		/* OK, we did everything we could... */
 790		css_sch_device_unregister(sch);
 791		return;
 792	}
 793	/* Start recognition for the new ccw device. */
 794	io_subchannel_recog(cdev, sch);
 795}
 796
 797/*
 798 * Register recognized device.
 799 */
 800static void io_subchannel_register(struct ccw_device *cdev)
 801{
 802	struct subchannel *sch;
 803	int ret, adjust_init_count = 1;
 804	unsigned long flags;
 805
 806	sch = to_subchannel(cdev->dev.parent);
 807	/*
 808	 * Check if subchannel is still registered. It may have become
 809	 * unregistered if a machine check hit us after finishing
 810	 * device recognition but before the register work could be
 811	 * queued.
 812	 */
 813	if (!device_is_registered(&sch->dev))
 814		goto out_err;
 815	css_update_ssd_info(sch);
 816	/*
 817	 * io_subchannel_register() will also be called after device
 818	 * recognition has been done for a boxed device (which will already
 819	 * be registered). We need to reprobe since we may now have sense id
 820	 * information.
 821	 */
 
 822	if (device_is_registered(&cdev->dev)) {
 823		if (!cdev->drv) {
 824			ret = device_reprobe(&cdev->dev);
 825			if (ret)
 826				/* We can't do much here. */
 827				CIO_MSG_EVENT(0, "device_reprobe() returned"
 828					      " %d for 0.%x.%04x\n", ret,
 829					      cdev->private->dev_id.ssid,
 830					      cdev->private->dev_id.devno);
 831		}
 832		adjust_init_count = 0;
 833		goto out;
 834	}
 835	/*
 836	 * Now we know this subchannel will stay, we can throw
 837	 * our delayed uevent.
 838	 */
 839	dev_set_uevent_suppress(&sch->dev, 0);
 840	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 841	/* make it known to the system */
 842	ret = ccw_device_register(cdev);
 843	if (ret) {
 844		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
 845			      cdev->private->dev_id.ssid,
 846			      cdev->private->dev_id.devno, ret);
 847		spin_lock_irqsave(sch->lock, flags);
 848		sch_set_cdev(sch, NULL);
 849		spin_unlock_irqrestore(sch->lock, flags);
 
 850		/* Release initial device reference. */
 851		put_device(&cdev->dev);
 852		goto out_err;
 853	}
 854out:
 855	cdev->private->flags.recog_done = 1;
 
 856	wake_up(&cdev->private->wait_q);
 857out_err:
 858	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
 859		wake_up(&ccw_device_init_wq);
 860}
 861
 862static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
 863{
 864	struct subchannel *sch;
 865
 866	/* Get subchannel reference for local processing. */
 867	if (!get_device(cdev->dev.parent))
 868		return;
 869	sch = to_subchannel(cdev->dev.parent);
 870	css_sch_device_unregister(sch);
 871	/* Release subchannel reference for local processing. */
 872	put_device(&sch->dev);
 873}
 874
 875/*
 876 * subchannel recognition done. Called from the state machine.
 877 */
 878void
 879io_subchannel_recog_done(struct ccw_device *cdev)
 880{
 881	if (css_init_done == 0) {
 882		cdev->private->flags.recog_done = 1;
 883		return;
 884	}
 885	switch (cdev->private->state) {
 886	case DEV_STATE_BOXED:
 887		/* Device did not respond in time. */
 888	case DEV_STATE_NOT_OPER:
 889		cdev->private->flags.recog_done = 1;
 890		/* Remove device found not operational. */
 891		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
 892		if (atomic_dec_and_test(&ccw_device_init_count))
 893			wake_up(&ccw_device_init_wq);
 894		break;
 895	case DEV_STATE_OFFLINE:
 896		/* 
 897		 * We can't register the device in interrupt context so
 898		 * we schedule a work item.
 899		 */
 900		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
 901		break;
 902	}
 903}
 904
 905static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 906{
 907	struct ccw_device_private *priv;
 908
 909	cdev->ccwlock = sch->lock;
 910
 911	/* Init private data. */
 912	priv = cdev->private;
 913	priv->dev_id.devno = sch->schib.pmcw.dev;
 914	priv->dev_id.ssid = sch->schid.ssid;
 915	priv->schid = sch->schid;
 916	priv->state = DEV_STATE_NOT_OPER;
 917	INIT_LIST_HEAD(&priv->cmb_list);
 918	init_waitqueue_head(&priv->wait_q);
 919	init_timer(&priv->timer);
 920
 921	/* Increase counter of devices currently in recognition. */
 922	atomic_inc(&ccw_device_init_count);
 923
 924	/* Start async. device sensing. */
 925	spin_lock_irq(sch->lock);
 926	sch_set_cdev(sch, cdev);
 927	ccw_device_recognition(cdev);
 928	spin_unlock_irq(sch->lock);
 929}
 930
 931static int ccw_device_move_to_sch(struct ccw_device *cdev,
 932				  struct subchannel *sch)
 933{
 934	struct subchannel *old_sch;
 935	int rc, old_enabled = 0;
 936
 937	old_sch = to_subchannel(cdev->dev.parent);
 938	/* Obtain child reference for new parent. */
 939	if (!get_device(&sch->dev))
 940		return -ENODEV;
 941
 942	if (!sch_is_pseudo_sch(old_sch)) {
 943		spin_lock_irq(old_sch->lock);
 944		old_enabled = old_sch->schib.pmcw.ena;
 945		rc = 0;
 946		if (old_enabled)
 947			rc = cio_disable_subchannel(old_sch);
 948		spin_unlock_irq(old_sch->lock);
 949		if (rc == -EBUSY) {
 950			/* Release child reference for new parent. */
 951			put_device(&sch->dev);
 952			return rc;
 953		}
 954	}
 955
 956	mutex_lock(&sch->reg_mutex);
 957	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
 958	mutex_unlock(&sch->reg_mutex);
 959	if (rc) {
 960		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
 961			      cdev->private->dev_id.ssid,
 962			      cdev->private->dev_id.devno, sch->schid.ssid,
 963			      sch->schib.pmcw.dev, rc);
 964		if (old_enabled) {
 965			/* Try to reenable the old subchannel. */
 966			spin_lock_irq(old_sch->lock);
 967			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
 968			spin_unlock_irq(old_sch->lock);
 969		}
 970		/* Release child reference for new parent. */
 971		put_device(&sch->dev);
 972		return rc;
 973	}
 974	/* Clean up old subchannel. */
 975	if (!sch_is_pseudo_sch(old_sch)) {
 976		spin_lock_irq(old_sch->lock);
 977		sch_set_cdev(old_sch, NULL);
 978		spin_unlock_irq(old_sch->lock);
 979		css_schedule_eval(old_sch->schid);
 980	}
 981	/* Release child reference for old parent. */
 982	put_device(&old_sch->dev);
 983	/* Initialize new subchannel. */
 984	spin_lock_irq(sch->lock);
 985	cdev->private->schid = sch->schid;
 986	cdev->ccwlock = sch->lock;
 987	if (!sch_is_pseudo_sch(sch))
 988		sch_set_cdev(sch, cdev);
 989	spin_unlock_irq(sch->lock);
 990	if (!sch_is_pseudo_sch(sch))
 991		css_update_ssd_info(sch);
 992	return 0;
 993}
 994
 995static int ccw_device_move_to_orph(struct ccw_device *cdev)
 996{
 997	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 998	struct channel_subsystem *css = to_css(sch->dev.parent);
 999
1000	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1001}
1002
1003static void io_subchannel_irq(struct subchannel *sch)
1004{
1005	struct ccw_device *cdev;
1006
1007	cdev = sch_get_cdev(sch);
1008
1009	CIO_TRACE_EVENT(6, "IRQ");
1010	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1011	if (cdev)
1012		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
 
 
1013}
1014
1015void io_subchannel_init_config(struct subchannel *sch)
1016{
1017	memset(&sch->config, 0, sizeof(sch->config));
1018	sch->config.csense = 1;
1019}
1020
1021static void io_subchannel_init_fields(struct subchannel *sch)
1022{
1023	if (cio_is_console(sch->schid))
1024		sch->opm = 0xff;
1025	else
1026		sch->opm = chp_get_sch_opm(sch);
1027	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1028	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1029
1030	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1031		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1032		      sch->schib.pmcw.dev, sch->schid.ssid,
1033		      sch->schid.sch_no, sch->schib.pmcw.pim,
1034		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1035
1036	io_subchannel_init_config(sch);
1037}
1038
1039/*
1040 * Note: We always return 0 so that we bind to the device even on error.
1041 * This is needed so that our remove function is called on unregister.
1042 */
1043static int io_subchannel_probe(struct subchannel *sch)
1044{
1045	struct io_subchannel_private *io_priv;
1046	struct ccw_device *cdev;
1047	int rc;
1048
1049	if (cio_is_console(sch->schid)) {
1050		rc = sysfs_create_group(&sch->dev.kobj,
1051					&io_subchannel_attr_group);
1052		if (rc)
1053			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1054				      "attributes for subchannel "
1055				      "0.%x.%04x (rc=%d)\n",
1056				      sch->schid.ssid, sch->schid.sch_no, rc);
1057		/*
1058		 * The console subchannel already has an associated ccw_device.
1059		 * Throw the delayed uevent for the subchannel, register
1060		 * the ccw_device and exit.
1061		 */
1062		dev_set_uevent_suppress(&sch->dev, 0);
1063		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1064		cdev = sch_get_cdev(sch);
1065		cdev->dev.groups = ccwdev_attr_groups;
1066		device_initialize(&cdev->dev);
1067		cdev->private->flags.initialized = 1;
1068		ccw_device_register(cdev);
1069		/*
1070		 * Check if the device is already online. If it is
1071		 * the reference count needs to be corrected since we
1072		 * didn't obtain a reference in ccw_device_set_online.
1073		 */
1074		if (cdev->private->state != DEV_STATE_NOT_OPER &&
1075		    cdev->private->state != DEV_STATE_OFFLINE &&
1076		    cdev->private->state != DEV_STATE_BOXED)
1077			get_device(&cdev->dev);
1078		return 0;
1079	}
1080	io_subchannel_init_fields(sch);
1081	rc = cio_commit_config(sch);
1082	if (rc)
1083		goto out_schedule;
1084	rc = sysfs_create_group(&sch->dev.kobj,
1085				&io_subchannel_attr_group);
1086	if (rc)
1087		goto out_schedule;
1088	/* Allocate I/O subchannel private data. */
1089	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1090	if (!io_priv)
1091		goto out_schedule;
1092
 
 
 
 
 
 
 
 
1093	set_io_private(sch, io_priv);
1094	css_schedule_eval(sch->schid);
1095	return 0;
1096
1097out_schedule:
1098	spin_lock_irq(sch->lock);
1099	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1100	spin_unlock_irq(sch->lock);
1101	return 0;
1102}
1103
1104static int
1105io_subchannel_remove (struct subchannel *sch)
1106{
1107	struct io_subchannel_private *io_priv = to_io_private(sch);
1108	struct ccw_device *cdev;
1109
1110	cdev = sch_get_cdev(sch);
1111	if (!cdev)
1112		goto out_free;
1113	io_subchannel_quiesce(sch);
1114	/* Set ccw device to not operational and drop reference. */
1115	spin_lock_irq(cdev->ccwlock);
1116	sch_set_cdev(sch, NULL);
1117	set_io_private(sch, NULL);
1118	cdev->private->state = DEV_STATE_NOT_OPER;
1119	spin_unlock_irq(cdev->ccwlock);
1120	ccw_device_unregister(cdev);
1121out_free:
 
 
1122	kfree(io_priv);
1123	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1124	return 0;
1125}
1126
1127static void io_subchannel_verify(struct subchannel *sch)
1128{
1129	struct ccw_device *cdev;
1130
1131	cdev = sch_get_cdev(sch);
1132	if (cdev)
1133		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
 
 
1134}
1135
1136static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1137{
1138	struct ccw_device *cdev;
1139
1140	cdev = sch_get_cdev(sch);
1141	if (!cdev)
1142		return;
1143	if (cio_update_schib(sch))
1144		goto err;
1145	/* Check for I/O on path. */
1146	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1147		goto out;
1148	if (cdev->private->state == DEV_STATE_ONLINE) {
1149		ccw_device_kill_io(cdev);
1150		goto out;
1151	}
1152	if (cio_clear(sch))
1153		goto err;
1154out:
1155	/* Trigger path verification. */
1156	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1157	return;
1158
1159err:
1160	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1161}
1162
1163static int io_subchannel_chp_event(struct subchannel *sch,
1164				   struct chp_link *link, int event)
1165{
1166	struct ccw_device *cdev = sch_get_cdev(sch);
1167	int mask;
 
1168
1169	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1170	if (!mask)
1171		return 0;
1172	switch (event) {
1173	case CHP_VARY_OFF:
1174		sch->opm &= ~mask;
1175		sch->lpm &= ~mask;
1176		if (cdev)
1177			cdev->private->path_gone_mask |= mask;
1178		io_subchannel_terminate_path(sch, mask);
1179		break;
1180	case CHP_VARY_ON:
1181		sch->opm |= mask;
1182		sch->lpm |= mask;
1183		if (cdev)
1184			cdev->private->path_new_mask |= mask;
1185		io_subchannel_verify(sch);
1186		break;
1187	case CHP_OFFLINE:
1188		if (cio_update_schib(sch))
1189			return -ENODEV;
1190		if (cdev)
1191			cdev->private->path_gone_mask |= mask;
1192		io_subchannel_terminate_path(sch, mask);
1193		break;
1194	case CHP_ONLINE:
1195		if (cio_update_schib(sch))
1196			return -ENODEV;
1197		sch->lpm |= mask & sch->opm;
1198		if (cdev)
1199			cdev->private->path_new_mask |= mask;
1200		io_subchannel_verify(sch);
1201		break;
 
 
 
 
 
 
 
 
 
 
 
 
1202	}
1203	return 0;
1204}
1205
1206static void io_subchannel_quiesce(struct subchannel *sch)
1207{
1208	struct ccw_device *cdev;
1209	int ret;
1210
1211	spin_lock_irq(sch->lock);
1212	cdev = sch_get_cdev(sch);
1213	if (cio_is_console(sch->schid))
1214		goto out_unlock;
1215	if (!sch->schib.pmcw.ena)
1216		goto out_unlock;
1217	ret = cio_disable_subchannel(sch);
1218	if (ret != -EBUSY)
1219		goto out_unlock;
1220	if (cdev->handler)
1221		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1222	while (ret == -EBUSY) {
1223		cdev->private->state = DEV_STATE_QUIESCE;
1224		cdev->private->iretry = 255;
1225		ret = ccw_device_cancel_halt_clear(cdev);
1226		if (ret == -EBUSY) {
1227			ccw_device_set_timeout(cdev, HZ/10);
1228			spin_unlock_irq(sch->lock);
1229			wait_event(cdev->private->wait_q,
1230				   cdev->private->state != DEV_STATE_QUIESCE);
1231			spin_lock_irq(sch->lock);
1232		}
1233		ret = cio_disable_subchannel(sch);
1234	}
1235out_unlock:
1236	spin_unlock_irq(sch->lock);
1237}
1238
1239static void io_subchannel_shutdown(struct subchannel *sch)
1240{
1241	io_subchannel_quiesce(sch);
1242}
1243
1244static int device_is_disconnected(struct ccw_device *cdev)
1245{
1246	if (!cdev)
1247		return 0;
1248	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1249		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1250}
1251
1252static int recovery_check(struct device *dev, void *data)
1253{
1254	struct ccw_device *cdev = to_ccwdev(dev);
 
1255	int *redo = data;
1256
1257	spin_lock_irq(cdev->ccwlock);
1258	switch (cdev->private->state) {
 
 
 
 
 
1259	case DEV_STATE_DISCONNECTED:
1260		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1261			      cdev->private->dev_id.ssid,
1262			      cdev->private->dev_id.devno);
1263		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1264		*redo = 1;
1265		break;
1266	case DEV_STATE_DISCONNECTED_SENSE_ID:
1267		*redo = 1;
1268		break;
1269	}
1270	spin_unlock_irq(cdev->ccwlock);
1271
1272	return 0;
1273}
1274
1275static void recovery_work_func(struct work_struct *unused)
1276{
1277	int redo = 0;
1278
1279	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1280	if (redo) {
1281		spin_lock_irq(&recovery_lock);
1282		if (!timer_pending(&recovery_timer)) {
1283			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1284				recovery_phase++;
1285			mod_timer(&recovery_timer, jiffies +
1286				  recovery_delay[recovery_phase] * HZ);
1287		}
1288		spin_unlock_irq(&recovery_lock);
1289	} else
1290		CIO_MSG_EVENT(4, "recovery: end\n");
1291}
1292
1293static DECLARE_WORK(recovery_work, recovery_work_func);
1294
1295static void recovery_func(unsigned long data)
1296{
1297	/*
1298	 * We can't do our recovery in softirq context and it's not
1299	 * performance critical, so we schedule it.
1300	 */
1301	schedule_work(&recovery_work);
1302}
1303
1304static void ccw_device_schedule_recovery(void)
1305{
1306	unsigned long flags;
1307
1308	CIO_MSG_EVENT(4, "recovery: schedule\n");
1309	spin_lock_irqsave(&recovery_lock, flags);
1310	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1311		recovery_phase = 0;
1312		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1313	}
1314	spin_unlock_irqrestore(&recovery_lock, flags);
1315}
1316
1317static int purge_fn(struct device *dev, void *data)
1318{
1319	struct ccw_device *cdev = to_ccwdev(dev);
1320	struct ccw_dev_id *id = &cdev->private->dev_id;
 
1321
1322	spin_lock_irq(cdev->ccwlock);
1323	if (is_blacklisted(id->ssid, id->devno) &&
1324	    (cdev->private->state == DEV_STATE_OFFLINE) &&
1325	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1326		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1327			      id->devno);
1328		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
 
1329		atomic_set(&cdev->private->onoff, 0);
1330	}
1331	spin_unlock_irq(cdev->ccwlock);
1332	/* Abort loop in case of pending signal. */
1333	if (signal_pending(current))
1334		return -EINTR;
1335
1336	return 0;
1337}
1338
1339/**
1340 * ccw_purge_blacklisted - purge unused, blacklisted devices
1341 *
1342 * Unregister all ccw devices that are offline and on the blacklist.
1343 */
1344int ccw_purge_blacklisted(void)
1345{
1346	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1347	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1348	return 0;
1349}
1350
1351void ccw_device_set_disconnected(struct ccw_device *cdev)
1352{
1353	if (!cdev)
1354		return;
1355	ccw_device_set_timeout(cdev, 0);
1356	cdev->private->flags.fake_irb = 0;
1357	cdev->private->state = DEV_STATE_DISCONNECTED;
1358	if (cdev->online)
1359		ccw_device_schedule_recovery();
1360}
1361
1362void ccw_device_set_notoper(struct ccw_device *cdev)
1363{
1364	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1365
1366	CIO_TRACE_EVENT(2, "notoper");
1367	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1368	ccw_device_set_timeout(cdev, 0);
1369	cio_disable_subchannel(sch);
1370	cdev->private->state = DEV_STATE_NOT_OPER;
1371}
1372
1373enum io_sch_action {
1374	IO_SCH_UNREG,
1375	IO_SCH_ORPH_UNREG,
 
1376	IO_SCH_ATTACH,
1377	IO_SCH_UNREG_ATTACH,
1378	IO_SCH_ORPH_ATTACH,
1379	IO_SCH_REPROBE,
1380	IO_SCH_VERIFY,
1381	IO_SCH_DISC,
1382	IO_SCH_NOP,
1383};
1384
1385static enum io_sch_action sch_get_action(struct subchannel *sch)
1386{
1387	struct ccw_device *cdev;
1388
1389	cdev = sch_get_cdev(sch);
1390	if (cio_update_schib(sch)) {
1391		/* Not operational. */
1392		if (!cdev)
1393			return IO_SCH_UNREG;
1394		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1395			return IO_SCH_UNREG;
1396		return IO_SCH_ORPH_UNREG;
1397	}
1398	/* Operational. */
1399	if (!cdev)
1400		return IO_SCH_ATTACH;
1401	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1402		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1403			return IO_SCH_UNREG_ATTACH;
1404		return IO_SCH_ORPH_ATTACH;
1405	}
1406	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1407		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1408			return IO_SCH_UNREG;
1409		return IO_SCH_DISC;
1410	}
1411	if (device_is_disconnected(cdev))
1412		return IO_SCH_REPROBE;
1413	if (cdev->online)
1414		return IO_SCH_VERIFY;
 
 
1415	return IO_SCH_NOP;
1416}
1417
1418/**
1419 * io_subchannel_sch_event - process subchannel event
1420 * @sch: subchannel
1421 * @process: non-zero if function is called in process context
1422 *
1423 * An unspecified event occurred for this subchannel. Adjust data according
1424 * to the current operational state of the subchannel and device. Return
1425 * zero when the event has been handled sufficiently or -EAGAIN when this
1426 * function should be called again in process context.
1427 */
1428static int io_subchannel_sch_event(struct subchannel *sch, int process)
1429{
1430	unsigned long flags;
1431	struct ccw_device *cdev;
1432	struct ccw_dev_id dev_id;
1433	enum io_sch_action action;
1434	int rc = -EAGAIN;
1435
1436	spin_lock_irqsave(sch->lock, flags);
1437	if (!device_is_registered(&sch->dev))
1438		goto out_unlock;
1439	if (work_pending(&sch->todo_work))
1440		goto out_unlock;
1441	cdev = sch_get_cdev(sch);
1442	if (cdev && work_pending(&cdev->private->todo_work))
1443		goto out_unlock;
1444	action = sch_get_action(sch);
1445	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1446		      sch->schid.ssid, sch->schid.sch_no, process,
1447		      action);
1448	/* Perform immediate actions while holding the lock. */
1449	switch (action) {
1450	case IO_SCH_REPROBE:
1451		/* Trigger device recognition. */
1452		ccw_device_trigger_reprobe(cdev);
1453		rc = 0;
1454		goto out_unlock;
1455	case IO_SCH_VERIFY:
1456		if (cdev->private->flags.resuming == 1) {
1457			if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
1458				ccw_device_set_notoper(cdev);
1459				break;
1460			}
1461		}
1462		/* Trigger path verification. */
1463		io_subchannel_verify(sch);
1464		rc = 0;
1465		goto out_unlock;
1466	case IO_SCH_DISC:
1467		ccw_device_set_disconnected(cdev);
1468		rc = 0;
1469		goto out_unlock;
1470	case IO_SCH_ORPH_UNREG:
1471	case IO_SCH_ORPH_ATTACH:
1472		ccw_device_set_disconnected(cdev);
1473		break;
 
1474	case IO_SCH_UNREG_ATTACH:
1475	case IO_SCH_UNREG:
1476		if (!cdev)
1477			break;
1478		if (cdev->private->state == DEV_STATE_SENSE_ID) {
1479			/*
1480			 * Note: delayed work triggered by this event
1481			 * and repeated calls to sch_event are synchronized
1482			 * by the above check for work_pending(cdev).
1483			 */
1484			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1485		} else
1486			ccw_device_set_notoper(cdev);
1487		break;
1488	case IO_SCH_NOP:
1489		rc = 0;
1490		goto out_unlock;
1491	default:
1492		break;
1493	}
1494	spin_unlock_irqrestore(sch->lock, flags);
1495	/* All other actions require process context. */
1496	if (!process)
1497		goto out;
1498	/* Handle attached ccw device. */
1499	switch (action) {
1500	case IO_SCH_ORPH_UNREG:
1501	case IO_SCH_ORPH_ATTACH:
1502		/* Move ccw device to orphanage. */
1503		rc = ccw_device_move_to_orph(cdev);
1504		if (rc)
1505			goto out;
1506		break;
 
1507	case IO_SCH_UNREG_ATTACH:
1508		if (cdev->private->flags.resuming) {
1509			/* Device will be handled later. */
1510			rc = 0;
1511			goto out;
1512		}
1513		/* Unregister ccw device. */
1514		ccw_device_unregister(cdev);
1515		break;
1516	default:
1517		break;
1518	}
1519	/* Handle subchannel. */
1520	switch (action) {
1521	case IO_SCH_ORPH_UNREG:
1522	case IO_SCH_UNREG:
1523		if (!cdev || !cdev->private->flags.resuming)
1524			css_sch_device_unregister(sch);
1525		break;
1526	case IO_SCH_ORPH_ATTACH:
1527	case IO_SCH_UNREG_ATTACH:
1528	case IO_SCH_ATTACH:
1529		dev_id.ssid = sch->schid.ssid;
1530		dev_id.devno = sch->schib.pmcw.dev;
1531		cdev = get_ccwdev_by_dev_id(&dev_id);
1532		if (!cdev) {
1533			sch_create_and_recog_new_device(sch);
1534			break;
1535		}
1536		rc = ccw_device_move_to_sch(cdev, sch);
1537		if (rc) {
1538			/* Release reference from get_ccwdev_by_dev_id() */
1539			put_device(&cdev->dev);
1540			goto out;
1541		}
1542		spin_lock_irqsave(sch->lock, flags);
1543		ccw_device_trigger_reprobe(cdev);
1544		spin_unlock_irqrestore(sch->lock, flags);
1545		/* Release reference from get_ccwdev_by_dev_id() */
1546		put_device(&cdev->dev);
1547		break;
1548	default:
1549		break;
1550	}
1551	return 0;
1552
1553out_unlock:
1554	spin_unlock_irqrestore(sch->lock, flags);
1555out:
1556	return rc;
1557}
1558
1559#ifdef CONFIG_CCW_CONSOLE
1560static struct ccw_device console_cdev;
1561static struct ccw_device_private console_private;
1562static int console_cdev_in_use;
1563
1564static DEFINE_SPINLOCK(ccw_console_lock);
1565
1566spinlock_t * cio_get_console_lock(void)
1567{
1568	return &ccw_console_lock;
 
 
 
 
 
 
 
1569}
1570
1571static int ccw_device_console_enable(struct ccw_device *cdev,
1572				     struct subchannel *sch)
1573{
1574	struct io_subchannel_private *io_priv = cio_get_console_priv();
1575	int rc;
1576
1577	/* Attach subchannel private data. */
1578	memset(io_priv, 0, sizeof(*io_priv));
1579	set_io_private(sch, io_priv);
1580	io_subchannel_init_fields(sch);
1581	rc = cio_commit_config(sch);
1582	if (rc)
1583		return rc;
1584	sch->driver = &io_subchannel_driver;
1585	/* Initialize the ccw_device structure. */
1586	cdev->dev.parent= &sch->dev;
1587	sch_set_cdev(sch, cdev);
1588	io_subchannel_recog(cdev, sch);
1589	/* Now wait for the async. recognition to come to an end. */
1590	spin_lock_irq(cdev->ccwlock);
1591	while (!dev_fsm_final_state(cdev))
1592		wait_cons_dev();
1593	rc = -EIO;
1594	if (cdev->private->state != DEV_STATE_OFFLINE)
 
 
 
1595		goto out_unlock;
1596	ccw_device_online(cdev);
1597	while (!dev_fsm_final_state(cdev))
1598		wait_cons_dev();
1599	if (cdev->private->state != DEV_STATE_ONLINE)
1600		goto out_unlock;
1601	rc = 0;
 
 
1602out_unlock:
1603	spin_unlock_irq(cdev->ccwlock);
 
 
1604	return rc;
1605}
1606
1607struct ccw_device *
1608ccw_device_probe_console(void)
1609{
 
 
1610	struct subchannel *sch;
1611	int ret;
1612
1613	if (xchg(&console_cdev_in_use, 1) != 0)
1614		return ERR_PTR(-EBUSY);
1615	sch = cio_probe_console();
1616	if (IS_ERR(sch)) {
1617		console_cdev_in_use = 0;
1618		return (void *) sch;
1619	}
1620	memset(&console_cdev, 0, sizeof(struct ccw_device));
1621	memset(&console_private, 0, sizeof(struct ccw_device_private));
1622	console_cdev.private = &console_private;
1623	console_private.cdev = &console_cdev;
1624	ret = ccw_device_console_enable(&console_cdev, sch);
1625	if (ret) {
1626		cio_release_console();
1627		console_cdev_in_use = 0;
1628		return ERR_PTR(ret);
 
 
 
 
 
 
 
1629	}
1630	console_cdev.online = 1;
1631	return &console_cdev;
1632}
1633
1634static int ccw_device_pm_restore(struct device *dev);
 
 
 
 
 
1635
1636int ccw_device_force_console(void)
1637{
1638	if (!console_cdev_in_use)
1639		return -ENODEV;
1640	return ccw_device_pm_restore(&console_cdev.dev);
 
 
 
 
 
 
1641}
1642EXPORT_SYMBOL_GPL(ccw_device_force_console);
1643#endif
1644
1645/*
1646 * get ccw_device matching the busid, but only if owned by cdrv
 
 
 
 
 
1647 */
1648static int
1649__ccwdev_check_busid(struct device *dev, void *id)
1650{
1651	char *bus_id;
1652
1653	bus_id = id;
1654
1655	return (strcmp(bus_id, dev_name(dev)) == 0);
 
 
 
 
 
1656}
1657
1658
1659/**
1660 * get_ccwdev_by_busid() - obtain device from a bus id
1661 * @cdrv: driver the device is owned by
1662 * @bus_id: bus id of the device to be searched
1663 *
1664 * This function searches all devices owned by @cdrv for a device with a bus
1665 * id matching @bus_id.
1666 * Returns:
1667 *  If a match is found, its reference count of the found device is increased
1668 *  and it is returned; else %NULL is returned.
1669 */
1670struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1671				       const char *bus_id)
1672{
1673	struct device *dev;
1674	struct device_driver *drv;
1675
1676	drv = get_driver(&cdrv->driver);
1677	if (!drv)
1678		return NULL;
1679
1680	dev = driver_find_device(drv, NULL, (void *)bus_id,
1681				 __ccwdev_check_busid);
1682	put_driver(drv);
1683
1684	return dev ? to_ccwdev(dev) : NULL;
1685}
1686
1687/************************** device driver handling ************************/
1688
1689/* This is the implementation of the ccw_driver class. The probe, remove
1690 * and release methods are initially very similar to the device_driver
1691 * implementations, with the difference that they have ccw_device
1692 * arguments.
1693 *
1694 * A ccw driver also contains the information that is needed for
1695 * device matching.
1696 */
1697static int
1698ccw_device_probe (struct device *dev)
1699{
1700	struct ccw_device *cdev = to_ccwdev(dev);
1701	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1702	int ret;
1703
1704	cdev->drv = cdrv; /* to let the driver call _set_online */
1705
1706	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1707
1708	if (ret) {
1709		cdev->drv = NULL;
 
1710		return ret;
1711	}
1712
1713	return 0;
1714}
1715
1716static int
1717ccw_device_remove (struct device *dev)
1718{
1719	struct ccw_device *cdev = to_ccwdev(dev);
1720	struct ccw_driver *cdrv = cdev->drv;
 
1721	int ret;
1722
1723	if (cdrv->remove)
1724		cdrv->remove(cdev);
 
 
1725	if (cdev->online) {
1726		cdev->online = 0;
1727		spin_lock_irq(cdev->ccwlock);
1728		ret = ccw_device_offline(cdev);
1729		spin_unlock_irq(cdev->ccwlock);
1730		if (ret == 0)
1731			wait_event(cdev->private->wait_q,
1732				   dev_fsm_final_state(cdev));
1733		else
1734			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1735				      "device 0.%x.%04x\n",
1736				      ret, cdev->private->dev_id.ssid,
1737				      cdev->private->dev_id.devno);
1738		/* Give up reference obtained in ccw_device_set_online(). */
1739		put_device(&cdev->dev);
 
1740	}
1741	ccw_device_set_timeout(cdev, 0);
1742	cdev->drv = NULL;
1743	return 0;
 
 
 
 
1744}
1745
1746static void ccw_device_shutdown(struct device *dev)
1747{
1748	struct ccw_device *cdev;
1749
1750	cdev = to_ccwdev(dev);
1751	if (cdev->drv && cdev->drv->shutdown)
1752		cdev->drv->shutdown(cdev);
1753	disable_cmf(cdev);
1754}
1755
1756static int ccw_device_pm_prepare(struct device *dev)
1757{
1758	struct ccw_device *cdev = to_ccwdev(dev);
1759
1760	if (work_pending(&cdev->private->todo_work))
1761		return -EAGAIN;
1762	/* Fail while device is being set online/offline. */
1763	if (atomic_read(&cdev->private->onoff))
1764		return -EAGAIN;
1765
1766	if (cdev->online && cdev->drv && cdev->drv->prepare)
1767		return cdev->drv->prepare(cdev);
1768
1769	return 0;
1770}
1771
1772static void ccw_device_pm_complete(struct device *dev)
1773{
1774	struct ccw_device *cdev = to_ccwdev(dev);
1775
1776	if (cdev->online && cdev->drv && cdev->drv->complete)
1777		cdev->drv->complete(cdev);
1778}
1779
1780static int ccw_device_pm_freeze(struct device *dev)
1781{
1782	struct ccw_device *cdev = to_ccwdev(dev);
1783	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1784	int ret, cm_enabled;
1785
1786	/* Fail suspend while device is in transistional state. */
1787	if (!dev_fsm_final_state(cdev))
1788		return -EAGAIN;
1789	if (!cdev->online)
1790		return 0;
1791	if (cdev->drv && cdev->drv->freeze) {
1792		ret = cdev->drv->freeze(cdev);
1793		if (ret)
1794			return ret;
1795	}
1796
1797	spin_lock_irq(sch->lock);
1798	cm_enabled = cdev->private->cmb != NULL;
1799	spin_unlock_irq(sch->lock);
1800	if (cm_enabled) {
1801		/* Don't have the css write on memory. */
1802		ret = ccw_set_cmf(cdev, 0);
1803		if (ret)
1804			return ret;
1805	}
1806	/* From here on, disallow device driver I/O. */
1807	spin_lock_irq(sch->lock);
1808	ret = cio_disable_subchannel(sch);
1809	spin_unlock_irq(sch->lock);
1810
1811	return ret;
1812}
1813
1814static int ccw_device_pm_thaw(struct device *dev)
1815{
1816	struct ccw_device *cdev = to_ccwdev(dev);
1817	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1818	int ret, cm_enabled;
1819
1820	if (!cdev->online)
1821		return 0;
1822
1823	spin_lock_irq(sch->lock);
1824	/* Allow device driver I/O again. */
1825	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1826	cm_enabled = cdev->private->cmb != NULL;
1827	spin_unlock_irq(sch->lock);
1828	if (ret)
1829		return ret;
1830
1831	if (cm_enabled) {
1832		ret = ccw_set_cmf(cdev, 1);
1833		if (ret)
1834			return ret;
1835	}
1836
1837	if (cdev->drv && cdev->drv->thaw)
1838		ret = cdev->drv->thaw(cdev);
1839
1840	return ret;
1841}
1842
1843static void __ccw_device_pm_restore(struct ccw_device *cdev)
1844{
1845	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1846
1847	spin_lock_irq(sch->lock);
1848	if (cio_is_console(sch->schid)) {
1849		cio_enable_subchannel(sch, (u32)(addr_t)sch);
1850		goto out_unlock;
1851	}
1852	/*
1853	 * While we were sleeping, devices may have gone or become
1854	 * available again. Kick re-detection.
1855	 */
1856	cdev->private->flags.resuming = 1;
1857	cdev->private->path_new_mask = LPM_ANYPATH;
1858	css_schedule_eval(sch->schid);
1859	spin_unlock_irq(sch->lock);
1860	css_complete_work();
1861
1862	/* cdev may have been moved to a different subchannel. */
1863	sch = to_subchannel(cdev->dev.parent);
1864	spin_lock_irq(sch->lock);
1865	if (cdev->private->state != DEV_STATE_ONLINE &&
1866	    cdev->private->state != DEV_STATE_OFFLINE)
1867		goto out_unlock;
1868
1869	ccw_device_recognition(cdev);
1870	spin_unlock_irq(sch->lock);
1871	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1872		   cdev->private->state == DEV_STATE_DISCONNECTED);
1873	spin_lock_irq(sch->lock);
1874
1875out_unlock:
1876	cdev->private->flags.resuming = 0;
1877	spin_unlock_irq(sch->lock);
1878}
1879
1880static int resume_handle_boxed(struct ccw_device *cdev)
1881{
1882	cdev->private->state = DEV_STATE_BOXED;
1883	if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1884		return 0;
1885	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1886	return -ENODEV;
1887}
1888
1889static int resume_handle_disc(struct ccw_device *cdev)
1890{
1891	cdev->private->state = DEV_STATE_DISCONNECTED;
1892	if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1893		return 0;
1894	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1895	return -ENODEV;
1896}
1897
1898static int ccw_device_pm_restore(struct device *dev)
1899{
1900	struct ccw_device *cdev = to_ccwdev(dev);
1901	struct subchannel *sch;
1902	int ret = 0;
1903
1904	__ccw_device_pm_restore(cdev);
1905	sch = to_subchannel(cdev->dev.parent);
1906	spin_lock_irq(sch->lock);
1907	if (cio_is_console(sch->schid))
1908		goto out_restore;
1909
1910	/* check recognition results */
1911	switch (cdev->private->state) {
1912	case DEV_STATE_OFFLINE:
1913	case DEV_STATE_ONLINE:
1914		cdev->private->flags.donotify = 0;
1915		break;
1916	case DEV_STATE_BOXED:
1917		ret = resume_handle_boxed(cdev);
1918		if (ret)
1919			goto out_unlock;
1920		goto out_restore;
1921	default:
1922		ret = resume_handle_disc(cdev);
1923		if (ret)
1924			goto out_unlock;
1925		goto out_restore;
1926	}
1927	/* check if the device type has changed */
1928	if (!ccw_device_test_sense_data(cdev)) {
1929		ccw_device_update_sense_data(cdev);
1930		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1931		ret = -ENODEV;
1932		goto out_unlock;
1933	}
1934	if (!cdev->online)
1935		goto out_unlock;
1936
1937	if (ccw_device_online(cdev)) {
1938		ret = resume_handle_disc(cdev);
1939		if (ret)
1940			goto out_unlock;
1941		goto out_restore;
1942	}
1943	spin_unlock_irq(sch->lock);
1944	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1945	spin_lock_irq(sch->lock);
1946
1947	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1948		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1949		ret = -ENODEV;
1950		goto out_unlock;
1951	}
1952
1953	/* reenable cmf, if needed */
1954	if (cdev->private->cmb) {
1955		spin_unlock_irq(sch->lock);
1956		ret = ccw_set_cmf(cdev, 1);
1957		spin_lock_irq(sch->lock);
1958		if (ret) {
1959			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1960				      "(rc=%d)\n", cdev->private->dev_id.ssid,
1961				      cdev->private->dev_id.devno, ret);
1962			ret = 0;
1963		}
1964	}
1965
1966out_restore:
1967	spin_unlock_irq(sch->lock);
1968	if (cdev->online && cdev->drv && cdev->drv->restore)
1969		ret = cdev->drv->restore(cdev);
1970	return ret;
1971
1972out_unlock:
1973	spin_unlock_irq(sch->lock);
1974	return ret;
1975}
1976
1977static const struct dev_pm_ops ccw_pm_ops = {
1978	.prepare = ccw_device_pm_prepare,
1979	.complete = ccw_device_pm_complete,
1980	.freeze = ccw_device_pm_freeze,
1981	.thaw = ccw_device_pm_thaw,
1982	.restore = ccw_device_pm_restore,
1983};
1984
1985static struct bus_type ccw_bus_type = {
1986	.name   = "ccw",
1987	.match  = ccw_bus_match,
1988	.uevent = ccw_uevent,
1989	.probe  = ccw_device_probe,
1990	.remove = ccw_device_remove,
1991	.shutdown = ccw_device_shutdown,
1992	.pm = &ccw_pm_ops,
1993};
1994
1995/**
1996 * ccw_driver_register() - register a ccw driver
1997 * @cdriver: driver to be registered
1998 *
1999 * This function is mainly a wrapper around driver_register().
2000 * Returns:
2001 *   %0 on success and a negative error value on failure.
2002 */
2003int ccw_driver_register(struct ccw_driver *cdriver)
2004{
2005	struct device_driver *drv = &cdriver->driver;
2006
2007	drv->bus = &ccw_bus_type;
2008
2009	return driver_register(drv);
2010}
2011
2012/**
2013 * ccw_driver_unregister() - deregister a ccw driver
2014 * @cdriver: driver to be deregistered
2015 *
2016 * This function is mainly a wrapper around driver_unregister().
2017 */
2018void ccw_driver_unregister(struct ccw_driver *cdriver)
2019{
2020	driver_unregister(&cdriver->driver);
2021}
2022
2023/* Helper func for qdio. */
2024struct subchannel_id
2025ccw_device_get_subchannel_id(struct ccw_device *cdev)
2026{
2027	struct subchannel *sch;
2028
2029	sch = to_subchannel(cdev->dev.parent);
2030	return sch->schid;
2031}
2032
2033static void ccw_device_todo(struct work_struct *work)
2034{
2035	struct ccw_device_private *priv;
2036	struct ccw_device *cdev;
2037	struct subchannel *sch;
2038	enum cdev_todo todo;
2039
2040	priv = container_of(work, struct ccw_device_private, todo_work);
2041	cdev = priv->cdev;
2042	sch = to_subchannel(cdev->dev.parent);
2043	/* Find out todo. */
2044	spin_lock_irq(cdev->ccwlock);
2045	todo = priv->todo;
2046	priv->todo = CDEV_TODO_NOTHING;
2047	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2048		      priv->dev_id.ssid, priv->dev_id.devno, todo);
2049	spin_unlock_irq(cdev->ccwlock);
2050	/* Perform todo. */
2051	switch (todo) {
2052	case CDEV_TODO_ENABLE_CMF:
2053		cmf_reenable(cdev);
2054		break;
2055	case CDEV_TODO_REBIND:
2056		ccw_device_do_unbind_bind(cdev);
2057		break;
2058	case CDEV_TODO_REGISTER:
2059		io_subchannel_register(cdev);
2060		break;
2061	case CDEV_TODO_UNREG_EVAL:
2062		if (!sch_is_pseudo_sch(sch))
2063			css_schedule_eval(sch->schid);
2064		/* fall-through */
2065	case CDEV_TODO_UNREG:
2066		if (sch_is_pseudo_sch(sch))
2067			ccw_device_unregister(cdev);
2068		else
2069			ccw_device_call_sch_unregister(cdev);
2070		break;
2071	default:
2072		break;
2073	}
2074	/* Release workqueue ref. */
2075	put_device(&cdev->dev);
2076}
2077
2078/**
2079 * ccw_device_sched_todo - schedule ccw device operation
2080 * @cdev: ccw device
2081 * @todo: todo
2082 *
2083 * Schedule the operation identified by @todo to be performed on the slow path
2084 * workqueue. Do nothing if another operation with higher priority is already
2085 * scheduled. Needs to be called with ccwdev lock held.
2086 */
2087void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2088{
2089	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2090		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2091		      todo);
2092	if (cdev->private->todo >= todo)
2093		return;
2094	cdev->private->todo = todo;
2095	/* Get workqueue ref. */
2096	if (!get_device(&cdev->dev))
2097		return;
2098	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2099		/* Already queued, release workqueue ref. */
2100		put_device(&cdev->dev);
2101	}
2102}
2103
2104/**
2105 * ccw_device_siosl() - initiate logging
2106 * @cdev: ccw device
2107 *
2108 * This function is used to invoke model-dependent logging within the channel
2109 * subsystem.
2110 */
2111int ccw_device_siosl(struct ccw_device *cdev)
2112{
2113	struct subchannel *sch = to_subchannel(cdev->dev.parent);
2114
2115	return chsc_siosl(sch->schid);
2116}
2117EXPORT_SYMBOL_GPL(ccw_device_siosl);
2118
2119MODULE_LICENSE("GPL");
2120EXPORT_SYMBOL(ccw_device_set_online);
2121EXPORT_SYMBOL(ccw_device_set_offline);
2122EXPORT_SYMBOL(ccw_driver_register);
2123EXPORT_SYMBOL(ccw_driver_unregister);
2124EXPORT_SYMBOL(get_ccwdev_by_busid);
2125EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
v6.9.4
   1// SPDX-License-Identifier: GPL-1.0+
   2/*
 
   3 *  bus driver for ccw devices
   4 *
   5 *    Copyright IBM Corp. 2002, 2008
   6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
   7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
   8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/spinlock.h>
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/slab.h>
  20#include <linux/list.h>
  21#include <linux/device.h>
  22#include <linux/workqueue.h>
  23#include <linux/delay.h>
  24#include <linux/timer.h>
  25#include <linux/kernel_stat.h>
  26#include <linux/sched/signal.h>
  27#include <linux/dma-mapping.h>
  28
  29#include <asm/ccwdev.h>
  30#include <asm/cio.h>
  31#include <asm/param.h>		/* HZ */
  32#include <asm/cmb.h>
  33#include <asm/isc.h>
  34
  35#include "chp.h"
  36#include "cio.h"
  37#include "cio_debug.h"
  38#include "css.h"
  39#include "device.h"
  40#include "ioasm.h"
  41#include "io_sch.h"
  42#include "blacklist.h"
  43#include "chsc.h"
  44
  45static struct timer_list recovery_timer;
  46static DEFINE_SPINLOCK(recovery_lock);
  47static int recovery_phase;
  48static const unsigned long recovery_delay[] = { 3, 30, 300 };
  49
  50static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
  51static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
  52static const struct bus_type ccw_bus_type;
  53
  54/******************* bus type handling ***********************/
  55
  56/* The Linux driver model distinguishes between a bus type and
  57 * the bus itself. Of course we only have one channel
  58 * subsystem driver and one channel system per machine, but
  59 * we still use the abstraction. T.R. says it's a good idea. */
  60static int
  61ccw_bus_match (struct device * dev, struct device_driver * drv)
  62{
  63	struct ccw_device *cdev = to_ccwdev(dev);
  64	struct ccw_driver *cdrv = to_ccwdrv(drv);
  65	const struct ccw_device_id *ids = cdrv->ids, *found;
  66
  67	if (!ids)
  68		return 0;
  69
  70	found = ccw_device_id_match(ids, &cdev->id);
  71	if (!found)
  72		return 0;
  73
  74	cdev->id.driver_info = found->driver_info;
  75
  76	return 1;
  77}
  78
  79/* Store modalias string delimited by prefix/suffix string into buffer with
  80 * specified size. Return length of resulting string (excluding trailing '\0')
  81 * even if string doesn't fit buffer (snprintf semantics). */
  82static int snprint_alias(char *buf, size_t size,
  83			 const struct ccw_device_id *id, const char *suffix)
  84{
  85	int len;
  86
  87	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
  88	if (len > size)
  89		return len;
  90	buf += len;
  91	size -= len;
  92
  93	if (id->dev_type != 0)
  94		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
  95				id->dev_model, suffix);
  96	else
  97		len += snprintf(buf, size, "dtdm%s", suffix);
  98
  99	return len;
 100}
 101
 102/* Set up environment variables for ccw device uevent. Return 0 on success,
 103 * non-zero otherwise. */
 104static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env)
 105{
 106	const struct ccw_device *cdev = to_ccwdev(dev);
 107	const struct ccw_device_id *id = &(cdev->id);
 108	int ret;
 109	char modalias_buf[30];
 110
 111	/* CU_TYPE= */
 112	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
 113	if (ret)
 114		return ret;
 115
 116	/* CU_MODEL= */
 117	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
 118	if (ret)
 119		return ret;
 120
 121	/* The next two can be zero, that's ok for us */
 122	/* DEV_TYPE= */
 123	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
 124	if (ret)
 125		return ret;
 126
 127	/* DEV_MODEL= */
 128	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
 129	if (ret)
 130		return ret;
 131
 132	/* MODALIAS=  */
 133	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
 134	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
 135	return ret;
 136}
 137
 
 
 138static void io_subchannel_irq(struct subchannel *);
 139static int io_subchannel_probe(struct subchannel *);
 140static void io_subchannel_remove(struct subchannel *);
 141static void io_subchannel_shutdown(struct subchannel *);
 142static int io_subchannel_sch_event(struct subchannel *, int);
 143static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
 144				   int);
 145static void recovery_func(struct timer_list *unused);
 
 
 146
 147static struct css_device_id io_subchannel_ids[] = {
 148	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 149	{ /* end of list */ },
 150};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 151
 152static int io_subchannel_settle(void)
 153{
 154	int ret;
 155
 156	ret = wait_event_interruptible(ccw_device_init_wq,
 157				atomic_read(&ccw_device_init_count) == 0);
 158	if (ret)
 159		return -EINTR;
 160	flush_workqueue(cio_work_q);
 161	return 0;
 162}
 163
 164static struct css_driver io_subchannel_driver = {
 165	.drv = {
 166		.owner = THIS_MODULE,
 167		.name = "io_subchannel",
 168	},
 169	.subchannel_type = io_subchannel_ids,
 170	.irq = io_subchannel_irq,
 171	.sch_event = io_subchannel_sch_event,
 172	.chp_event = io_subchannel_chp_event,
 173	.probe = io_subchannel_probe,
 174	.remove = io_subchannel_remove,
 175	.shutdown = io_subchannel_shutdown,
 
 176	.settle = io_subchannel_settle,
 177};
 178
 179int __init io_subchannel_init(void)
 180{
 181	int ret;
 182
 183	timer_setup(&recovery_timer, recovery_func, 0);
 
 
 
 184	ret = bus_register(&ccw_bus_type);
 185	if (ret)
 186		return ret;
 187	ret = css_driver_register(&io_subchannel_driver);
 188	if (ret)
 189		bus_unregister(&ccw_bus_type);
 190
 191	return ret;
 192}
 193
 194
 195/************************ device handling **************************/
 196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197static ssize_t
 198devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
 199{
 200	struct ccw_device *cdev = to_ccwdev(dev);
 201	struct ccw_device_id *id = &(cdev->id);
 202
 203	if (id->dev_type != 0)
 204		return sprintf(buf, "%04x/%02x\n",
 205				id->dev_type, id->dev_model);
 206	else
 207		return sprintf(buf, "n/a\n");
 208}
 209
 210static ssize_t
 211cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
 212{
 213	struct ccw_device *cdev = to_ccwdev(dev);
 214	struct ccw_device_id *id = &(cdev->id);
 215
 216	return sprintf(buf, "%04x/%02x\n",
 217		       id->cu_type, id->cu_model);
 218}
 219
 220static ssize_t
 221modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
 222{
 223	struct ccw_device *cdev = to_ccwdev(dev);
 224	struct ccw_device_id *id = &(cdev->id);
 225	int len;
 226
 227	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
 228
 229	return len > PAGE_SIZE ? PAGE_SIZE : len;
 230}
 231
 232static ssize_t
 233online_show (struct device *dev, struct device_attribute *attr, char *buf)
 234{
 235	struct ccw_device *cdev = to_ccwdev(dev);
 236
 237	return sprintf(buf, cdev->online ? "1\n" : "0\n");
 238}
 239
 240int ccw_device_is_orphan(struct ccw_device *cdev)
 241{
 242	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
 243}
 244
 245static void ccw_device_unregister(struct ccw_device *cdev)
 246{
 247	mutex_lock(&cdev->reg_mutex);
 248	if (device_is_registered(&cdev->dev)) {
 249		/* Undo device_add(). */
 250		device_del(&cdev->dev);
 251	}
 252	mutex_unlock(&cdev->reg_mutex);
 253
 254	if (cdev->private->flags.initialized) {
 255		cdev->private->flags.initialized = 0;
 256		/* Release reference from device_initialize(). */
 257		put_device(&cdev->dev);
 258	}
 259}
 260
 261static void io_subchannel_quiesce(struct subchannel *);
 262
 263/**
 264 * ccw_device_set_offline() - disable a ccw device for I/O
 265 * @cdev: target ccw device
 266 *
 267 * This function calls the driver's set_offline() function for @cdev, if
 268 * given, and then disables @cdev.
 269 * Returns:
 270 *   %0 on success and a negative error value on failure.
 271 * Context:
 272 *  enabled, ccw device lock not held
 273 */
 274int ccw_device_set_offline(struct ccw_device *cdev)
 275{
 276	struct subchannel *sch;
 277	int ret, state;
 278
 279	if (!cdev)
 280		return -ENODEV;
 281	if (!cdev->online || !cdev->drv)
 282		return -EINVAL;
 283
 284	if (cdev->drv->set_offline) {
 285		ret = cdev->drv->set_offline(cdev);
 286		if (ret != 0)
 287			return ret;
 288	}
 
 289	spin_lock_irq(cdev->ccwlock);
 290	sch = to_subchannel(cdev->dev.parent);
 291	cdev->online = 0;
 292	/* Wait until a final state or DISCONNECTED is reached */
 293	while (!dev_fsm_final_state(cdev) &&
 294	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 295		spin_unlock_irq(cdev->ccwlock);
 296		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 297			   cdev->private->state == DEV_STATE_DISCONNECTED));
 298		spin_lock_irq(cdev->ccwlock);
 299	}
 300	do {
 301		ret = ccw_device_offline(cdev);
 302		if (!ret)
 303			break;
 304		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
 305			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
 306			      cdev->private->dev_id.devno);
 307		if (ret != -EBUSY)
 308			goto error;
 309		state = cdev->private->state;
 310		spin_unlock_irq(cdev->ccwlock);
 311		io_subchannel_quiesce(sch);
 312		spin_lock_irq(cdev->ccwlock);
 313		cdev->private->state = state;
 314	} while (ret == -EBUSY);
 315	spin_unlock_irq(cdev->ccwlock);
 316	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 317		   cdev->private->state == DEV_STATE_DISCONNECTED));
 318	/* Inform the user if set offline failed. */
 319	if (cdev->private->state == DEV_STATE_BOXED) {
 320		pr_warn("%s: The device entered boxed state while being set offline\n",
 321			dev_name(&cdev->dev));
 322	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 323		pr_warn("%s: The device stopped operating while being set offline\n",
 324			dev_name(&cdev->dev));
 325	}
 326	/* Give up reference from ccw_device_set_online(). */
 327	put_device(&cdev->dev);
 328	return 0;
 329
 330error:
 331	cdev->private->state = DEV_STATE_OFFLINE;
 332	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
 333	spin_unlock_irq(cdev->ccwlock);
 334	/* Give up reference from ccw_device_set_online(). */
 335	put_device(&cdev->dev);
 336	return -ENODEV;
 337}
 338
 339/**
 340 * ccw_device_set_online() - enable a ccw device for I/O
 341 * @cdev: target ccw device
 342 *
 343 * This function first enables @cdev and then calls the driver's set_online()
 344 * function for @cdev, if given. If set_online() returns an error, @cdev is
 345 * disabled again.
 346 * Returns:
 347 *   %0 on success and a negative error value on failure.
 348 * Context:
 349 *  enabled, ccw device lock not held
 350 */
 351int ccw_device_set_online(struct ccw_device *cdev)
 352{
 353	int ret;
 354	int ret2;
 355
 356	if (!cdev)
 357		return -ENODEV;
 358	if (cdev->online || !cdev->drv)
 359		return -EINVAL;
 360	/* Hold on to an extra reference while device is online. */
 361	if (!get_device(&cdev->dev))
 362		return -ENODEV;
 363
 364	spin_lock_irq(cdev->ccwlock);
 365	ret = ccw_device_online(cdev);
 366	if (ret) {
 367		spin_unlock_irq(cdev->ccwlock);
 
 
 368		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
 369			      "device 0.%x.%04x\n",
 370			      ret, cdev->private->dev_id.ssid,
 371			      cdev->private->dev_id.devno);
 372		/* Give up online reference since onlining failed. */
 373		put_device(&cdev->dev);
 374		return ret;
 375	}
 376	/* Wait until a final state is reached */
 377	while (!dev_fsm_final_state(cdev)) {
 378		spin_unlock_irq(cdev->ccwlock);
 379		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
 380		spin_lock_irq(cdev->ccwlock);
 381	}
 382	/* Check if online processing was successful */
 383	if ((cdev->private->state != DEV_STATE_ONLINE) &&
 384	    (cdev->private->state != DEV_STATE_W4SENSE)) {
 385		spin_unlock_irq(cdev->ccwlock);
 386		/* Inform the user that set online failed. */
 387		if (cdev->private->state == DEV_STATE_BOXED) {
 388			pr_warn("%s: Setting the device online failed because it is boxed\n",
 389				dev_name(&cdev->dev));
 
 390		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 391			pr_warn("%s: Setting the device online failed because it is not operational\n",
 392				dev_name(&cdev->dev));
 
 393		}
 394		/* Give up online reference since onlining failed. */
 395		put_device(&cdev->dev);
 396		return -ENODEV;
 397	}
 398	spin_unlock_irq(cdev->ccwlock);
 399	if (cdev->drv->set_online)
 400		ret = cdev->drv->set_online(cdev);
 401	if (ret)
 402		goto rollback;
 403
 404	spin_lock_irq(cdev->ccwlock);
 405	cdev->online = 1;
 406	spin_unlock_irq(cdev->ccwlock);
 407	return 0;
 408
 409rollback:
 410	spin_lock_irq(cdev->ccwlock);
 411	/* Wait until a final state or DISCONNECTED is reached */
 412	while (!dev_fsm_final_state(cdev) &&
 413	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 414		spin_unlock_irq(cdev->ccwlock);
 415		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 416			   cdev->private->state == DEV_STATE_DISCONNECTED));
 417		spin_lock_irq(cdev->ccwlock);
 418	}
 419	ret2 = ccw_device_offline(cdev);
 420	if (ret2)
 421		goto error;
 422	spin_unlock_irq(cdev->ccwlock);
 423	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 424		   cdev->private->state == DEV_STATE_DISCONNECTED));
 425	/* Give up online reference since onlining failed. */
 426	put_device(&cdev->dev);
 427	return ret;
 428
 429error:
 430	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
 431		      "device 0.%x.%04x\n",
 432		      ret2, cdev->private->dev_id.ssid,
 433		      cdev->private->dev_id.devno);
 434	cdev->private->state = DEV_STATE_OFFLINE;
 435	spin_unlock_irq(cdev->ccwlock);
 436	/* Give up online reference since onlining failed. */
 437	put_device(&cdev->dev);
 438	return ret;
 439}
 440
 441static int online_store_handle_offline(struct ccw_device *cdev)
 442{
 443	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
 444		spin_lock_irq(cdev->ccwlock);
 445		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
 446		spin_unlock_irq(cdev->ccwlock);
 447		return 0;
 448	}
 449	if (cdev->drv && cdev->drv->set_offline)
 450		return ccw_device_set_offline(cdev);
 451	return -EINVAL;
 452}
 453
 454static int online_store_recog_and_online(struct ccw_device *cdev)
 455{
 456	/* Do device recognition, if needed. */
 457	if (cdev->private->state == DEV_STATE_BOXED) {
 458		spin_lock_irq(cdev->ccwlock);
 459		ccw_device_recognition(cdev);
 460		spin_unlock_irq(cdev->ccwlock);
 461		wait_event(cdev->private->wait_q,
 462			   cdev->private->flags.recog_done);
 463		if (cdev->private->state != DEV_STATE_OFFLINE)
 464			/* recognition failed */
 465			return -EAGAIN;
 466	}
 467	if (cdev->drv && cdev->drv->set_online)
 468		return ccw_device_set_online(cdev);
 469	return -EINVAL;
 470}
 471
 472static int online_store_handle_online(struct ccw_device *cdev, int force)
 473{
 474	int ret;
 475
 476	ret = online_store_recog_and_online(cdev);
 477	if (ret && !force)
 478		return ret;
 479	if (force && cdev->private->state == DEV_STATE_BOXED) {
 480		ret = ccw_device_stlck(cdev);
 481		if (ret)
 482			return ret;
 483		if (cdev->id.cu_type == 0)
 484			cdev->private->state = DEV_STATE_NOT_OPER;
 485		ret = online_store_recog_and_online(cdev);
 486		if (ret)
 487			return ret;
 488	}
 489	return 0;
 490}
 491
 492static ssize_t online_store (struct device *dev, struct device_attribute *attr,
 493			     const char *buf, size_t count)
 494{
 495	struct ccw_device *cdev = to_ccwdev(dev);
 496	int force, ret;
 497	unsigned long i;
 498
 499	/* Prevent conflict between multiple on-/offline processing requests. */
 500	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
 501		return -EAGAIN;
 502	/* Prevent conflict between internal I/Os and on-/offline processing. */
 503	if (!dev_fsm_final_state(cdev) &&
 504	    cdev->private->state != DEV_STATE_DISCONNECTED) {
 505		ret = -EAGAIN;
 506		goto out;
 507	}
 508	/* Prevent conflict between pending work and on-/offline processing.*/
 509	if (work_pending(&cdev->private->todo_work)) {
 510		ret = -EAGAIN;
 511		goto out;
 
 
 
 
 
 512	}
 513	if (!strncmp(buf, "force\n", count)) {
 514		force = 1;
 515		i = 1;
 516		ret = 0;
 517	} else {
 518		force = 0;
 519		ret = kstrtoul(buf, 16, &i);
 520	}
 521	if (ret)
 522		goto out;
 523
 524	device_lock(dev);
 525	switch (i) {
 526	case 0:
 527		ret = online_store_handle_offline(cdev);
 528		break;
 529	case 1:
 530		ret = online_store_handle_online(cdev, force);
 531		break;
 532	default:
 533		ret = -EINVAL;
 534	}
 535	device_unlock(dev);
 536
 537out:
 
 
 
 538	atomic_set(&cdev->private->onoff, 0);
 539	return (ret < 0) ? ret : count;
 540}
 541
 542static ssize_t
 543available_show (struct device *dev, struct device_attribute *attr, char *buf)
 544{
 545	struct ccw_device *cdev = to_ccwdev(dev);
 546	struct subchannel *sch;
 547
 548	if (ccw_device_is_orphan(cdev))
 549		return sprintf(buf, "no device\n");
 550	switch (cdev->private->state) {
 551	case DEV_STATE_BOXED:
 552		return sprintf(buf, "boxed\n");
 553	case DEV_STATE_DISCONNECTED:
 554	case DEV_STATE_DISCONNECTED_SENSE_ID:
 555	case DEV_STATE_NOT_OPER:
 556		sch = to_subchannel(dev->parent);
 557		if (!sch->lpm)
 558			return sprintf(buf, "no path\n");
 559		else
 560			return sprintf(buf, "no device\n");
 561	default:
 562		/* All other states considered fine. */
 563		return sprintf(buf, "good\n");
 564	}
 565}
 566
 567static ssize_t
 568initiate_logging(struct device *dev, struct device_attribute *attr,
 569		 const char *buf, size_t count)
 570{
 571	struct subchannel *sch = to_subchannel(dev);
 572	int rc;
 573
 574	rc = chsc_siosl(sch->schid);
 575	if (rc < 0) {
 576		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
 577			sch->schid.ssid, sch->schid.sch_no, rc);
 
 578		return rc;
 579	}
 580	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
 581		  sch->schid.ssid, sch->schid.sch_no);
 582	return count;
 583}
 584
 585static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
 586			char *buf)
 587{
 588	struct subchannel *sch = to_subchannel(dev);
 589
 590	return sprintf(buf, "%02x\n", sch->vpm);
 591}
 592
 593static DEVICE_ATTR_RO(devtype);
 594static DEVICE_ATTR_RO(cutype);
 595static DEVICE_ATTR_RO(modalias);
 596static DEVICE_ATTR_RW(online);
 597static DEVICE_ATTR(availability, 0444, available_show, NULL);
 598static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
 599static DEVICE_ATTR_RO(vpm);
 600
 601static struct attribute *io_subchannel_attrs[] = {
 
 
 602	&dev_attr_logging.attr,
 603	&dev_attr_vpm.attr,
 604	NULL,
 605};
 606
 607static const struct attribute_group io_subchannel_attr_group = {
 608	.attrs = io_subchannel_attrs,
 609};
 610
 611static struct attribute * ccwdev_attrs[] = {
 612	&dev_attr_devtype.attr,
 613	&dev_attr_cutype.attr,
 614	&dev_attr_modalias.attr,
 615	&dev_attr_online.attr,
 616	&dev_attr_cmb_enable.attr,
 617	&dev_attr_availability.attr,
 618	NULL,
 619};
 620
 621static const struct attribute_group ccwdev_attr_group = {
 622	.attrs = ccwdev_attrs,
 623};
 624
 625static const struct attribute_group *ccwdev_attr_groups[] = {
 626	&ccwdev_attr_group,
 627	NULL,
 628};
 629
 630static int match_dev_id(struct device *dev, const void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 631{
 632	struct ccw_device *cdev = to_ccwdev(dev);
 633	struct ccw_dev_id *dev_id = (void *)data;
 634
 635	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
 636}
 637
 638/**
 639 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
 640 * @dev_id: id of the device to be searched
 641 *
 642 * This function searches all devices attached to the ccw bus for a device
 643 * matching @dev_id.
 644 * Returns:
 645 *  If a device is found its reference count is increased and returned;
 646 *  else %NULL is returned.
 647 */
 648struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
 649{
 650	struct device *dev;
 651
 652	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
 653
 654	return dev ? to_ccwdev(dev) : NULL;
 655}
 656EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
 657
 658static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
 659{
 660	int ret;
 661
 662	mutex_lock(&cdev->reg_mutex);
 663	if (device_is_registered(&cdev->dev)) {
 664		device_release_driver(&cdev->dev);
 665		ret = device_attach(&cdev->dev);
 666		WARN_ON(ret == -ENODEV);
 667	}
 668	mutex_unlock(&cdev->reg_mutex);
 669}
 670
 671static void
 672ccw_device_release(struct device *dev)
 673{
 674	struct ccw_device *cdev;
 675
 676	cdev = to_ccwdev(dev);
 677	cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
 678			sizeof(*cdev->private->dma_area));
 679	cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
 680	/* Release reference of parent subchannel. */
 681	put_device(cdev->dev.parent);
 682	kfree(cdev->private);
 683	kfree(cdev);
 684}
 685
 686static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
 687{
 688	struct ccw_device *cdev;
 689	struct gen_pool *dma_pool;
 690	int ret;
 691
 692	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
 693	if (!cdev) {
 694		ret = -ENOMEM;
 695		goto err_cdev;
 696	}
 697	cdev->private = kzalloc(sizeof(struct ccw_device_private),
 698				GFP_KERNEL | GFP_DMA);
 699	if (!cdev->private) {
 700		ret = -ENOMEM;
 701		goto err_priv;
 702	}
 703
 704	cdev->dev.dma_mask = sch->dev.dma_mask;
 705	ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
 706	if (ret)
 707		goto err_coherent_mask;
 708
 709	dma_pool = cio_gp_dma_create(&cdev->dev, 1);
 710	if (!dma_pool) {
 711		ret = -ENOMEM;
 712		goto err_dma_pool;
 713	}
 714	cdev->private->dma_pool = dma_pool;
 715	cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
 716					sizeof(*cdev->private->dma_area));
 717	if (!cdev->private->dma_area) {
 718		ret = -ENOMEM;
 719		goto err_dma_area;
 720	}
 721	return cdev;
 722err_dma_area:
 723	cio_gp_dma_destroy(dma_pool, &cdev->dev);
 724err_dma_pool:
 725err_coherent_mask:
 726	kfree(cdev->private);
 727err_priv:
 728	kfree(cdev);
 729err_cdev:
 730	return ERR_PTR(ret);
 731}
 732
 733static void ccw_device_todo(struct work_struct *work);
 734
 735static int io_subchannel_initialize_dev(struct subchannel *sch,
 736					struct ccw_device *cdev)
 737{
 738	struct ccw_device_private *priv = cdev->private;
 739	int ret;
 740
 741	priv->cdev = cdev;
 742	priv->int_class = IRQIO_CIO;
 743	priv->state = DEV_STATE_NOT_OPER;
 744	priv->dev_id.devno = sch->schib.pmcw.dev;
 745	priv->dev_id.ssid = sch->schid.ssid;
 746
 747	INIT_WORK(&priv->todo_work, ccw_device_todo);
 748	INIT_LIST_HEAD(&priv->cmb_list);
 749	init_waitqueue_head(&priv->wait_q);
 750	timer_setup(&priv->timer, ccw_device_timeout, 0);
 751	mutex_init(&cdev->reg_mutex);
 752
 753	atomic_set(&priv->onoff, 0);
 754	cdev->ccwlock = &sch->lock;
 755	cdev->dev.parent = &sch->dev;
 756	cdev->dev.release = ccw_device_release;
 757	cdev->dev.bus = &ccw_bus_type;
 758	cdev->dev.groups = ccwdev_attr_groups;
 759	/* Do first half of device_register. */
 760	device_initialize(&cdev->dev);
 761	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
 762			   cdev->private->dev_id.devno);
 763	if (ret)
 764		goto out_put;
 765	if (!get_device(&sch->dev)) {
 766		ret = -ENODEV;
 767		goto out_put;
 
 768	}
 769	priv->flags.initialized = 1;
 770	spin_lock_irq(&sch->lock);
 771	sch_set_cdev(sch, cdev);
 772	spin_unlock_irq(&sch->lock);
 773	return 0;
 774
 775out_put:
 776	/* Release reference from device_initialize(). */
 777	put_device(&cdev->dev);
 778	return ret;
 779}
 780
 781static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
 782{
 783	struct ccw_device *cdev;
 784	int ret;
 785
 786	cdev = io_subchannel_allocate_dev(sch);
 787	if (!IS_ERR(cdev)) {
 788		ret = io_subchannel_initialize_dev(sch, cdev);
 789		if (ret)
 790			cdev = ERR_PTR(ret);
 791	}
 792	return cdev;
 793}
 794
 795static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
 796
 797static void sch_create_and_recog_new_device(struct subchannel *sch)
 798{
 799	struct ccw_device *cdev;
 800
 801	/* Need to allocate a new ccw device. */
 802	cdev = io_subchannel_create_ccwdev(sch);
 803	if (IS_ERR(cdev)) {
 804		/* OK, we did everything we could... */
 805		css_sch_device_unregister(sch);
 806		return;
 807	}
 808	/* Start recognition for the new ccw device. */
 809	io_subchannel_recog(cdev, sch);
 810}
 811
 812/*
 813 * Register recognized device.
 814 */
 815static void io_subchannel_register(struct ccw_device *cdev)
 816{
 817	struct subchannel *sch;
 818	int ret, adjust_init_count = 1;
 819	unsigned long flags;
 820
 821	sch = to_subchannel(cdev->dev.parent);
 822	/*
 823	 * Check if subchannel is still registered. It may have become
 824	 * unregistered if a machine check hit us after finishing
 825	 * device recognition but before the register work could be
 826	 * queued.
 827	 */
 828	if (!device_is_registered(&sch->dev))
 829		goto out_err;
 830	css_update_ssd_info(sch);
 831	/*
 832	 * io_subchannel_register() will also be called after device
 833	 * recognition has been done for a boxed device (which will already
 834	 * be registered). We need to reprobe since we may now have sense id
 835	 * information.
 836	 */
 837	mutex_lock(&cdev->reg_mutex);
 838	if (device_is_registered(&cdev->dev)) {
 839		if (!cdev->drv) {
 840			ret = device_reprobe(&cdev->dev);
 841			if (ret)
 842				/* We can't do much here. */
 843				CIO_MSG_EVENT(0, "device_reprobe() returned"
 844					      " %d for 0.%x.%04x\n", ret,
 845					      cdev->private->dev_id.ssid,
 846					      cdev->private->dev_id.devno);
 847		}
 848		adjust_init_count = 0;
 849		goto out;
 850	}
 
 
 
 
 
 
 851	/* make it known to the system */
 852	ret = device_add(&cdev->dev);
 853	if (ret) {
 854		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
 855			      cdev->private->dev_id.ssid,
 856			      cdev->private->dev_id.devno, ret);
 857		spin_lock_irqsave(&sch->lock, flags);
 858		sch_set_cdev(sch, NULL);
 859		spin_unlock_irqrestore(&sch->lock, flags);
 860		mutex_unlock(&cdev->reg_mutex);
 861		/* Release initial device reference. */
 862		put_device(&cdev->dev);
 863		goto out_err;
 864	}
 865out:
 866	cdev->private->flags.recog_done = 1;
 867	mutex_unlock(&cdev->reg_mutex);
 868	wake_up(&cdev->private->wait_q);
 869out_err:
 870	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
 871		wake_up(&ccw_device_init_wq);
 872}
 873
 
 
 
 
 
 
 
 
 
 
 
 
 
 874/*
 875 * subchannel recognition done. Called from the state machine.
 876 */
 877void
 878io_subchannel_recog_done(struct ccw_device *cdev)
 879{
 880	if (css_init_done == 0) {
 881		cdev->private->flags.recog_done = 1;
 882		return;
 883	}
 884	switch (cdev->private->state) {
 885	case DEV_STATE_BOXED:
 886		/* Device did not respond in time. */
 887	case DEV_STATE_NOT_OPER:
 888		cdev->private->flags.recog_done = 1;
 889		/* Remove device found not operational. */
 890		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
 891		if (atomic_dec_and_test(&ccw_device_init_count))
 892			wake_up(&ccw_device_init_wq);
 893		break;
 894	case DEV_STATE_OFFLINE:
 895		/*
 896		 * We can't register the device in interrupt context so
 897		 * we schedule a work item.
 898		 */
 899		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
 900		break;
 901	}
 902}
 903
 904static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 905{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906	/* Increase counter of devices currently in recognition. */
 907	atomic_inc(&ccw_device_init_count);
 908
 909	/* Start async. device sensing. */
 910	spin_lock_irq(&sch->lock);
 
 911	ccw_device_recognition(cdev);
 912	spin_unlock_irq(&sch->lock);
 913}
 914
 915static int ccw_device_move_to_sch(struct ccw_device *cdev,
 916				  struct subchannel *sch)
 917{
 918	struct subchannel *old_sch;
 919	int rc, old_enabled = 0;
 920
 921	old_sch = to_subchannel(cdev->dev.parent);
 922	/* Obtain child reference for new parent. */
 923	if (!get_device(&sch->dev))
 924		return -ENODEV;
 925
 926	if (!sch_is_pseudo_sch(old_sch)) {
 927		spin_lock_irq(&old_sch->lock);
 928		old_enabled = old_sch->schib.pmcw.ena;
 929		rc = 0;
 930		if (old_enabled)
 931			rc = cio_disable_subchannel(old_sch);
 932		spin_unlock_irq(&old_sch->lock);
 933		if (rc == -EBUSY) {
 934			/* Release child reference for new parent. */
 935			put_device(&sch->dev);
 936			return rc;
 937		}
 938	}
 939
 940	mutex_lock(&sch->reg_mutex);
 941	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
 942	mutex_unlock(&sch->reg_mutex);
 943	if (rc) {
 944		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
 945			      cdev->private->dev_id.ssid,
 946			      cdev->private->dev_id.devno, sch->schid.ssid,
 947			      sch->schib.pmcw.dev, rc);
 948		if (old_enabled) {
 949			/* Try to re-enable the old subchannel. */
 950			spin_lock_irq(&old_sch->lock);
 951			cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
 952			spin_unlock_irq(&old_sch->lock);
 953		}
 954		/* Release child reference for new parent. */
 955		put_device(&sch->dev);
 956		return rc;
 957	}
 958	/* Clean up old subchannel. */
 959	if (!sch_is_pseudo_sch(old_sch)) {
 960		spin_lock_irq(&old_sch->lock);
 961		sch_set_cdev(old_sch, NULL);
 962		spin_unlock_irq(&old_sch->lock);
 963		css_schedule_eval(old_sch->schid);
 964	}
 965	/* Release child reference for old parent. */
 966	put_device(&old_sch->dev);
 967	/* Initialize new subchannel. */
 968	spin_lock_irq(&sch->lock);
 969	cdev->ccwlock = &sch->lock;
 
 970	if (!sch_is_pseudo_sch(sch))
 971		sch_set_cdev(sch, cdev);
 972	spin_unlock_irq(&sch->lock);
 973	if (!sch_is_pseudo_sch(sch))
 974		css_update_ssd_info(sch);
 975	return 0;
 976}
 977
 978static int ccw_device_move_to_orph(struct ccw_device *cdev)
 979{
 980	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 981	struct channel_subsystem *css = to_css(sch->dev.parent);
 982
 983	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
 984}
 985
 986static void io_subchannel_irq(struct subchannel *sch)
 987{
 988	struct ccw_device *cdev;
 989
 990	cdev = sch_get_cdev(sch);
 991
 992	CIO_TRACE_EVENT(6, "IRQ");
 993	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
 994	if (cdev)
 995		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
 996	else
 997		inc_irq_stat(IRQIO_CIO);
 998}
 999
1000void io_subchannel_init_config(struct subchannel *sch)
1001{
1002	memset(&sch->config, 0, sizeof(sch->config));
1003	sch->config.csense = 1;
1004}
1005
1006static void io_subchannel_init_fields(struct subchannel *sch)
1007{
1008	if (cio_is_console(sch->schid))
1009		sch->opm = 0xff;
1010	else
1011		sch->opm = chp_get_sch_opm(sch);
1012	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1013	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1014
1015	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1016		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1017		      sch->schib.pmcw.dev, sch->schid.ssid,
1018		      sch->schid.sch_no, sch->schib.pmcw.pim,
1019		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1020
1021	io_subchannel_init_config(sch);
1022}
1023
1024/*
1025 * Note: We always return 0 so that we bind to the device even on error.
1026 * This is needed so that our remove function is called on unregister.
1027 */
1028static int io_subchannel_probe(struct subchannel *sch)
1029{
1030	struct io_subchannel_private *io_priv;
1031	struct ccw_device *cdev;
1032	int rc;
1033
1034	if (cio_is_console(sch->schid)) {
1035		rc = sysfs_create_group(&sch->dev.kobj,
1036					&io_subchannel_attr_group);
1037		if (rc)
1038			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1039				      "attributes for subchannel "
1040				      "0.%x.%04x (rc=%d)\n",
1041				      sch->schid.ssid, sch->schid.sch_no, rc);
1042		/*
1043		* The console subchannel already has an associated ccw_device.
1044		* Register it and exit.
1045		*/
 
 
 
1046		cdev = sch_get_cdev(sch);
1047		rc = device_add(&cdev->dev);
1048		if (rc) {
1049			/* Release online reference. */
1050			put_device(&cdev->dev);
1051			goto out_schedule;
1052		}
1053		if (atomic_dec_and_test(&ccw_device_init_count))
1054			wake_up(&ccw_device_init_wq);
 
 
 
 
 
1055		return 0;
1056	}
1057	io_subchannel_init_fields(sch);
1058	rc = cio_commit_config(sch);
1059	if (rc)
1060		goto out_schedule;
1061	rc = sysfs_create_group(&sch->dev.kobj,
1062				&io_subchannel_attr_group);
1063	if (rc)
1064		goto out_schedule;
1065	/* Allocate I/O subchannel private data. */
1066	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1067	if (!io_priv)
1068		goto out_schedule;
1069
1070	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1071				sizeof(*io_priv->dma_area),
1072				&io_priv->dma_area_dma, GFP_KERNEL);
1073	if (!io_priv->dma_area) {
1074		kfree(io_priv);
1075		goto out_schedule;
1076	}
1077
1078	set_io_private(sch, io_priv);
1079	css_schedule_eval(sch->schid);
1080	return 0;
1081
1082out_schedule:
1083	spin_lock_irq(&sch->lock);
1084	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1085	spin_unlock_irq(&sch->lock);
1086	return 0;
1087}
1088
1089static void io_subchannel_remove(struct subchannel *sch)
 
1090{
1091	struct io_subchannel_private *io_priv = to_io_private(sch);
1092	struct ccw_device *cdev;
1093
1094	cdev = sch_get_cdev(sch);
1095	if (!cdev)
1096		goto out_free;
1097
1098	ccw_device_unregister(cdev);
1099	spin_lock_irq(&sch->lock);
1100	sch_set_cdev(sch, NULL);
1101	set_io_private(sch, NULL);
1102	spin_unlock_irq(&sch->lock);
 
 
1103out_free:
1104	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1105			  io_priv->dma_area, io_priv->dma_area_dma);
1106	kfree(io_priv);
1107	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
 
1108}
1109
1110static void io_subchannel_verify(struct subchannel *sch)
1111{
1112	struct ccw_device *cdev;
1113
1114	cdev = sch_get_cdev(sch);
1115	if (cdev)
1116		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1117	else
1118		css_schedule_eval(sch->schid);
1119}
1120
1121static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1122{
1123	struct ccw_device *cdev;
1124
1125	cdev = sch_get_cdev(sch);
1126	if (!cdev)
1127		return;
1128	if (cio_update_schib(sch))
1129		goto err;
1130	/* Check for I/O on path. */
1131	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1132		goto out;
1133	if (cdev->private->state == DEV_STATE_ONLINE) {
1134		ccw_device_kill_io(cdev);
1135		goto out;
1136	}
1137	if (cio_clear(sch))
1138		goto err;
1139out:
1140	/* Trigger path verification. */
1141	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1142	return;
1143
1144err:
1145	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1146}
1147
1148static int io_subchannel_chp_event(struct subchannel *sch,
1149				   struct chp_link *link, int event)
1150{
1151	struct ccw_device *cdev = sch_get_cdev(sch);
1152	int mask, chpid, valid_bit;
1153	int path_event[8];
1154
1155	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1156	if (!mask)
1157		return 0;
1158	switch (event) {
1159	case CHP_VARY_OFF:
1160		sch->opm &= ~mask;
1161		sch->lpm &= ~mask;
1162		if (cdev)
1163			cdev->private->path_gone_mask |= mask;
1164		io_subchannel_terminate_path(sch, mask);
1165		break;
1166	case CHP_VARY_ON:
1167		sch->opm |= mask;
1168		sch->lpm |= mask;
1169		if (cdev)
1170			cdev->private->path_new_mask |= mask;
1171		io_subchannel_verify(sch);
1172		break;
1173	case CHP_OFFLINE:
1174		if (cio_update_schib(sch))
1175			return -ENODEV;
1176		if (cdev)
1177			cdev->private->path_gone_mask |= mask;
1178		io_subchannel_terminate_path(sch, mask);
1179		break;
1180	case CHP_ONLINE:
1181		if (cio_update_schib(sch))
1182			return -ENODEV;
1183		sch->lpm |= mask & sch->opm;
1184		if (cdev)
1185			cdev->private->path_new_mask |= mask;
1186		io_subchannel_verify(sch);
1187		break;
1188	case CHP_FCES_EVENT:
1189		/* Forward Endpoint Security event */
1190		for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
1191				valid_bit >>= 1) {
1192			if (mask & valid_bit)
1193				path_event[chpid] = PE_PATH_FCES_EVENT;
1194			else
1195				path_event[chpid] = PE_NONE;
1196		}
1197		if (cdev && cdev->drv && cdev->drv->path_event)
1198			cdev->drv->path_event(cdev, path_event);
1199		break;
1200	}
1201	return 0;
1202}
1203
1204static void io_subchannel_quiesce(struct subchannel *sch)
1205{
1206	struct ccw_device *cdev;
1207	int ret;
1208
1209	spin_lock_irq(&sch->lock);
1210	cdev = sch_get_cdev(sch);
1211	if (cio_is_console(sch->schid))
1212		goto out_unlock;
1213	if (!sch->schib.pmcw.ena)
1214		goto out_unlock;
1215	ret = cio_disable_subchannel(sch);
1216	if (ret != -EBUSY)
1217		goto out_unlock;
1218	if (cdev->handler)
1219		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1220	while (ret == -EBUSY) {
1221		cdev->private->state = DEV_STATE_QUIESCE;
1222		cdev->private->iretry = 255;
1223		ret = ccw_device_cancel_halt_clear(cdev);
1224		if (ret == -EBUSY) {
1225			ccw_device_set_timeout(cdev, HZ/10);
1226			spin_unlock_irq(&sch->lock);
1227			wait_event(cdev->private->wait_q,
1228				   cdev->private->state != DEV_STATE_QUIESCE);
1229			spin_lock_irq(&sch->lock);
1230		}
1231		ret = cio_disable_subchannel(sch);
1232	}
1233out_unlock:
1234	spin_unlock_irq(&sch->lock);
1235}
1236
1237static void io_subchannel_shutdown(struct subchannel *sch)
1238{
1239	io_subchannel_quiesce(sch);
1240}
1241
1242static int device_is_disconnected(struct ccw_device *cdev)
1243{
1244	if (!cdev)
1245		return 0;
1246	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1247		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1248}
1249
1250static int recovery_check(struct device *dev, void *data)
1251{
1252	struct ccw_device *cdev = to_ccwdev(dev);
1253	struct subchannel *sch;
1254	int *redo = data;
1255
1256	spin_lock_irq(cdev->ccwlock);
1257	switch (cdev->private->state) {
1258	case DEV_STATE_ONLINE:
1259		sch = to_subchannel(cdev->dev.parent);
1260		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1261			break;
1262		fallthrough;
1263	case DEV_STATE_DISCONNECTED:
1264		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1265			      cdev->private->dev_id.ssid,
1266			      cdev->private->dev_id.devno);
1267		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1268		*redo = 1;
1269		break;
1270	case DEV_STATE_DISCONNECTED_SENSE_ID:
1271		*redo = 1;
1272		break;
1273	}
1274	spin_unlock_irq(cdev->ccwlock);
1275
1276	return 0;
1277}
1278
1279static void recovery_work_func(struct work_struct *unused)
1280{
1281	int redo = 0;
1282
1283	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1284	if (redo) {
1285		spin_lock_irq(&recovery_lock);
1286		if (!timer_pending(&recovery_timer)) {
1287			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1288				recovery_phase++;
1289			mod_timer(&recovery_timer, jiffies +
1290				  recovery_delay[recovery_phase] * HZ);
1291		}
1292		spin_unlock_irq(&recovery_lock);
1293	} else
1294		CIO_MSG_EVENT(3, "recovery: end\n");
1295}
1296
1297static DECLARE_WORK(recovery_work, recovery_work_func);
1298
1299static void recovery_func(struct timer_list *unused)
1300{
1301	/*
1302	 * We can't do our recovery in softirq context and it's not
1303	 * performance critical, so we schedule it.
1304	 */
1305	schedule_work(&recovery_work);
1306}
1307
1308void ccw_device_schedule_recovery(void)
1309{
1310	unsigned long flags;
1311
1312	CIO_MSG_EVENT(3, "recovery: schedule\n");
1313	spin_lock_irqsave(&recovery_lock, flags);
1314	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1315		recovery_phase = 0;
1316		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1317	}
1318	spin_unlock_irqrestore(&recovery_lock, flags);
1319}
1320
1321static int purge_fn(struct device *dev, void *data)
1322{
1323	struct ccw_device *cdev = to_ccwdev(dev);
1324	struct ccw_dev_id *id = &cdev->private->dev_id;
1325	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1326
1327	spin_lock_irq(cdev->ccwlock);
1328	if (is_blacklisted(id->ssid, id->devno) &&
1329	    (cdev->private->state == DEV_STATE_OFFLINE) &&
1330	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1331		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1332			      id->devno);
1333		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1334		css_sched_sch_todo(sch, SCH_TODO_UNREG);
1335		atomic_set(&cdev->private->onoff, 0);
1336	}
1337	spin_unlock_irq(cdev->ccwlock);
1338	/* Abort loop in case of pending signal. */
1339	if (signal_pending(current))
1340		return -EINTR;
1341
1342	return 0;
1343}
1344
1345/**
1346 * ccw_purge_blacklisted - purge unused, blacklisted devices
1347 *
1348 * Unregister all ccw devices that are offline and on the blacklist.
1349 */
1350int ccw_purge_blacklisted(void)
1351{
1352	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1353	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1354	return 0;
1355}
1356
1357void ccw_device_set_disconnected(struct ccw_device *cdev)
1358{
1359	if (!cdev)
1360		return;
1361	ccw_device_set_timeout(cdev, 0);
1362	cdev->private->flags.fake_irb = 0;
1363	cdev->private->state = DEV_STATE_DISCONNECTED;
1364	if (cdev->online)
1365		ccw_device_schedule_recovery();
1366}
1367
1368void ccw_device_set_notoper(struct ccw_device *cdev)
1369{
1370	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1371
1372	CIO_TRACE_EVENT(2, "notoper");
1373	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1374	ccw_device_set_timeout(cdev, 0);
1375	cio_disable_subchannel(sch);
1376	cdev->private->state = DEV_STATE_NOT_OPER;
1377}
1378
1379enum io_sch_action {
1380	IO_SCH_UNREG,
1381	IO_SCH_ORPH_UNREG,
1382	IO_SCH_UNREG_CDEV,
1383	IO_SCH_ATTACH,
1384	IO_SCH_UNREG_ATTACH,
1385	IO_SCH_ORPH_ATTACH,
1386	IO_SCH_REPROBE,
1387	IO_SCH_VERIFY,
1388	IO_SCH_DISC,
1389	IO_SCH_NOP,
1390};
1391
1392static enum io_sch_action sch_get_action(struct subchannel *sch)
1393{
1394	struct ccw_device *cdev;
1395
1396	cdev = sch_get_cdev(sch);
1397	if (cio_update_schib(sch)) {
1398		/* Not operational. */
1399		if (!cdev)
1400			return IO_SCH_UNREG;
1401		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1402			return IO_SCH_UNREG;
1403		return IO_SCH_ORPH_UNREG;
1404	}
1405	/* Operational. */
1406	if (!cdev)
1407		return IO_SCH_ATTACH;
1408	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1409		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1410			return IO_SCH_UNREG_ATTACH;
1411		return IO_SCH_ORPH_ATTACH;
1412	}
1413	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1414		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1415			return IO_SCH_UNREG_CDEV;
1416		return IO_SCH_DISC;
1417	}
1418	if (device_is_disconnected(cdev))
1419		return IO_SCH_REPROBE;
1420	if (cdev->online)
1421		return IO_SCH_VERIFY;
1422	if (cdev->private->state == DEV_STATE_NOT_OPER)
1423		return IO_SCH_UNREG_ATTACH;
1424	return IO_SCH_NOP;
1425}
1426
1427/**
1428 * io_subchannel_sch_event - process subchannel event
1429 * @sch: subchannel
1430 * @process: non-zero if function is called in process context
1431 *
1432 * An unspecified event occurred for this subchannel. Adjust data according
1433 * to the current operational state of the subchannel and device. Return
1434 * zero when the event has been handled sufficiently or -EAGAIN when this
1435 * function should be called again in process context.
1436 */
1437static int io_subchannel_sch_event(struct subchannel *sch, int process)
1438{
1439	unsigned long flags;
1440	struct ccw_device *cdev;
1441	struct ccw_dev_id dev_id;
1442	enum io_sch_action action;
1443	int rc = -EAGAIN;
1444
1445	spin_lock_irqsave(&sch->lock, flags);
1446	if (!device_is_registered(&sch->dev))
1447		goto out_unlock;
1448	if (work_pending(&sch->todo_work))
1449		goto out_unlock;
1450	cdev = sch_get_cdev(sch);
1451	if (cdev && work_pending(&cdev->private->todo_work))
1452		goto out_unlock;
1453	action = sch_get_action(sch);
1454	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1455		      sch->schid.ssid, sch->schid.sch_no, process,
1456		      action);
1457	/* Perform immediate actions while holding the lock. */
1458	switch (action) {
1459	case IO_SCH_REPROBE:
1460		/* Trigger device recognition. */
1461		ccw_device_trigger_reprobe(cdev);
1462		rc = 0;
1463		goto out_unlock;
1464	case IO_SCH_VERIFY:
 
 
 
 
 
 
1465		/* Trigger path verification. */
1466		io_subchannel_verify(sch);
1467		rc = 0;
1468		goto out_unlock;
1469	case IO_SCH_DISC:
1470		ccw_device_set_disconnected(cdev);
1471		rc = 0;
1472		goto out_unlock;
1473	case IO_SCH_ORPH_UNREG:
1474	case IO_SCH_ORPH_ATTACH:
1475		ccw_device_set_disconnected(cdev);
1476		break;
1477	case IO_SCH_UNREG_CDEV:
1478	case IO_SCH_UNREG_ATTACH:
1479	case IO_SCH_UNREG:
1480		if (!cdev)
1481			break;
1482		if (cdev->private->state == DEV_STATE_SENSE_ID) {
1483			/*
1484			 * Note: delayed work triggered by this event
1485			 * and repeated calls to sch_event are synchronized
1486			 * by the above check for work_pending(cdev).
1487			 */
1488			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1489		} else
1490			ccw_device_set_notoper(cdev);
1491		break;
1492	case IO_SCH_NOP:
1493		rc = 0;
1494		goto out_unlock;
1495	default:
1496		break;
1497	}
1498	spin_unlock_irqrestore(&sch->lock, flags);
1499	/* All other actions require process context. */
1500	if (!process)
1501		goto out;
1502	/* Handle attached ccw device. */
1503	switch (action) {
1504	case IO_SCH_ORPH_UNREG:
1505	case IO_SCH_ORPH_ATTACH:
1506		/* Move ccw device to orphanage. */
1507		rc = ccw_device_move_to_orph(cdev);
1508		if (rc)
1509			goto out;
1510		break;
1511	case IO_SCH_UNREG_CDEV:
1512	case IO_SCH_UNREG_ATTACH:
1513		spin_lock_irqsave(&sch->lock, flags);
1514		sch_set_cdev(sch, NULL);
1515		spin_unlock_irqrestore(&sch->lock, flags);
 
 
1516		/* Unregister ccw device. */
1517		ccw_device_unregister(cdev);
1518		break;
1519	default:
1520		break;
1521	}
1522	/* Handle subchannel. */
1523	switch (action) {
1524	case IO_SCH_ORPH_UNREG:
1525	case IO_SCH_UNREG:
1526		css_sch_device_unregister(sch);
 
1527		break;
1528	case IO_SCH_ORPH_ATTACH:
1529	case IO_SCH_UNREG_ATTACH:
1530	case IO_SCH_ATTACH:
1531		dev_id.ssid = sch->schid.ssid;
1532		dev_id.devno = sch->schib.pmcw.dev;
1533		cdev = get_ccwdev_by_dev_id(&dev_id);
1534		if (!cdev) {
1535			sch_create_and_recog_new_device(sch);
1536			break;
1537		}
1538		rc = ccw_device_move_to_sch(cdev, sch);
1539		if (rc) {
1540			/* Release reference from get_ccwdev_by_dev_id() */
1541			put_device(&cdev->dev);
1542			goto out;
1543		}
1544		spin_lock_irqsave(&sch->lock, flags);
1545		ccw_device_trigger_reprobe(cdev);
1546		spin_unlock_irqrestore(&sch->lock, flags);
1547		/* Release reference from get_ccwdev_by_dev_id() */
1548		put_device(&cdev->dev);
1549		break;
1550	default:
1551		break;
1552	}
1553	return 0;
1554
1555out_unlock:
1556	spin_unlock_irqrestore(&sch->lock, flags);
1557out:
1558	return rc;
1559}
1560
1561static void ccw_device_set_int_class(struct ccw_device *cdev)
 
 
 
 
 
 
 
1562{
1563	struct ccw_driver *cdrv = cdev->drv;
1564
1565	/* Note: we interpret class 0 in this context as an uninitialized
1566	 * field since it translates to a non-I/O interrupt class. */
1567	if (cdrv->int_class != 0)
1568		cdev->private->int_class = cdrv->int_class;
1569	else
1570		cdev->private->int_class = IRQIO_CIO;
1571}
1572
1573#ifdef CONFIG_CCW_CONSOLE
1574int __init ccw_device_enable_console(struct ccw_device *cdev)
1575{
1576	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1577	int rc;
1578
1579	if (!cdev->drv || !cdev->handler)
1580		return -EINVAL;
1581
1582	io_subchannel_init_fields(sch);
1583	rc = cio_commit_config(sch);
1584	if (rc)
1585		return rc;
1586	sch->driver = &io_subchannel_driver;
 
 
 
1587	io_subchannel_recog(cdev, sch);
1588	/* Now wait for the async. recognition to come to an end. */
1589	spin_lock_irq(cdev->ccwlock);
1590	while (!dev_fsm_final_state(cdev))
1591		ccw_device_wait_idle(cdev);
1592
1593	/* Hold on to an extra reference while device is online. */
1594	get_device(&cdev->dev);
1595	rc = ccw_device_online(cdev);
1596	if (rc)
1597		goto out_unlock;
1598
1599	while (!dev_fsm_final_state(cdev))
1600		ccw_device_wait_idle(cdev);
1601
1602	if (cdev->private->state == DEV_STATE_ONLINE)
1603		cdev->online = 1;
1604	else
1605		rc = -EIO;
1606out_unlock:
1607	spin_unlock_irq(cdev->ccwlock);
1608	if (rc) /* Give up online reference since onlining failed. */
1609		put_device(&cdev->dev);
1610	return rc;
1611}
1612
1613struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
 
1614{
1615	struct io_subchannel_private *io_priv;
1616	struct ccw_device *cdev;
1617	struct subchannel *sch;
 
1618
 
 
1619	sch = cio_probe_console();
1620	if (IS_ERR(sch))
1621		return ERR_CAST(sch);
1622
1623	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1624	if (!io_priv)
1625		goto err_priv;
1626	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1627				sizeof(*io_priv->dma_area),
1628				&io_priv->dma_area_dma, GFP_KERNEL);
1629	if (!io_priv->dma_area)
1630		goto err_dma_area;
1631	set_io_private(sch, io_priv);
1632	cdev = io_subchannel_create_ccwdev(sch);
1633	if (IS_ERR(cdev)) {
1634		dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1635				  io_priv->dma_area, io_priv->dma_area_dma);
1636		set_io_private(sch, NULL);
1637		put_device(&sch->dev);
1638		kfree(io_priv);
1639		return cdev;
1640	}
1641	cdev->drv = drv;
1642	ccw_device_set_int_class(cdev);
1643	return cdev;
1644
1645err_dma_area:
1646	kfree(io_priv);
1647err_priv:
1648	put_device(&sch->dev);
1649	return ERR_PTR(-ENOMEM);
1650}
1651
1652void __init ccw_device_destroy_console(struct ccw_device *cdev)
1653{
1654	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1655	struct io_subchannel_private *io_priv = to_io_private(sch);
1656
1657	set_io_private(sch, NULL);
1658	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1659			  io_priv->dma_area, io_priv->dma_area_dma);
1660	put_device(&sch->dev);
1661	put_device(&cdev->dev);
1662	kfree(io_priv);
1663}
 
 
1664
1665/**
1666 * ccw_device_wait_idle() - busy wait for device to become idle
1667 * @cdev: ccw device
1668 *
1669 * Poll until activity control is zero, that is, no function or data
1670 * transfer is pending/active.
1671 * Called with device lock being held.
1672 */
1673void ccw_device_wait_idle(struct ccw_device *cdev)
 
1674{
1675	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 
 
1676
1677	while (1) {
1678		cio_tsch(sch);
1679		if (sch->schib.scsw.cmd.actl == 0)
1680			break;
1681		udelay(100);
1682	}
1683}
1684#endif
1685
1686/**
1687 * get_ccwdev_by_busid() - obtain device from a bus id
1688 * @cdrv: driver the device is owned by
1689 * @bus_id: bus id of the device to be searched
1690 *
1691 * This function searches all devices owned by @cdrv for a device with a bus
1692 * id matching @bus_id.
1693 * Returns:
1694 *  If a match is found, its reference count of the found device is increased
1695 *  and it is returned; else %NULL is returned.
1696 */
1697struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1698				       const char *bus_id)
1699{
1700	struct device *dev;
 
1701
1702	dev = driver_find_device_by_name(&cdrv->driver, bus_id);
 
 
 
 
 
 
1703
1704	return dev ? to_ccwdev(dev) : NULL;
1705}
1706
1707/************************** device driver handling ************************/
1708
1709/* This is the implementation of the ccw_driver class. The probe, remove
1710 * and release methods are initially very similar to the device_driver
1711 * implementations, with the difference that they have ccw_device
1712 * arguments.
1713 *
1714 * A ccw driver also contains the information that is needed for
1715 * device matching.
1716 */
1717static int
1718ccw_device_probe (struct device *dev)
1719{
1720	struct ccw_device *cdev = to_ccwdev(dev);
1721	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1722	int ret;
1723
1724	cdev->drv = cdrv; /* to let the driver call _set_online */
1725	ccw_device_set_int_class(cdev);
1726	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
 
1727	if (ret) {
1728		cdev->drv = NULL;
1729		cdev->private->int_class = IRQIO_CIO;
1730		return ret;
1731	}
1732
1733	return 0;
1734}
1735
1736static void ccw_device_remove(struct device *dev)
 
1737{
1738	struct ccw_device *cdev = to_ccwdev(dev);
1739	struct ccw_driver *cdrv = cdev->drv;
1740	struct subchannel *sch;
1741	int ret;
1742
1743	if (cdrv->remove)
1744		cdrv->remove(cdev);
1745
1746	spin_lock_irq(cdev->ccwlock);
1747	if (cdev->online) {
1748		cdev->online = 0;
 
1749		ret = ccw_device_offline(cdev);
1750		spin_unlock_irq(cdev->ccwlock);
1751		if (ret == 0)
1752			wait_event(cdev->private->wait_q,
1753				   dev_fsm_final_state(cdev));
1754		else
1755			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1756				      "device 0.%x.%04x\n",
1757				      ret, cdev->private->dev_id.ssid,
1758				      cdev->private->dev_id.devno);
1759		/* Give up reference obtained in ccw_device_set_online(). */
1760		put_device(&cdev->dev);
1761		spin_lock_irq(cdev->ccwlock);
1762	}
1763	ccw_device_set_timeout(cdev, 0);
1764	cdev->drv = NULL;
1765	cdev->private->int_class = IRQIO_CIO;
1766	sch = to_subchannel(cdev->dev.parent);
1767	spin_unlock_irq(cdev->ccwlock);
1768	io_subchannel_quiesce(sch);
1769	__disable_cmf(cdev);
1770}
1771
1772static void ccw_device_shutdown(struct device *dev)
1773{
1774	struct ccw_device *cdev;
1775
1776	cdev = to_ccwdev(dev);
1777	if (cdev->drv && cdev->drv->shutdown)
1778		cdev->drv->shutdown(cdev);
1779	__disable_cmf(cdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1780}
1781
1782static const struct bus_type ccw_bus_type = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1783	.name   = "ccw",
1784	.match  = ccw_bus_match,
1785	.uevent = ccw_uevent,
1786	.probe  = ccw_device_probe,
1787	.remove = ccw_device_remove,
1788	.shutdown = ccw_device_shutdown,
 
1789};
1790
1791/**
1792 * ccw_driver_register() - register a ccw driver
1793 * @cdriver: driver to be registered
1794 *
1795 * This function is mainly a wrapper around driver_register().
1796 * Returns:
1797 *   %0 on success and a negative error value on failure.
1798 */
1799int ccw_driver_register(struct ccw_driver *cdriver)
1800{
1801	struct device_driver *drv = &cdriver->driver;
1802
1803	drv->bus = &ccw_bus_type;
1804
1805	return driver_register(drv);
1806}
1807
1808/**
1809 * ccw_driver_unregister() - deregister a ccw driver
1810 * @cdriver: driver to be deregistered
1811 *
1812 * This function is mainly a wrapper around driver_unregister().
1813 */
1814void ccw_driver_unregister(struct ccw_driver *cdriver)
1815{
1816	driver_unregister(&cdriver->driver);
1817}
1818
 
 
 
 
 
 
 
 
 
 
1819static void ccw_device_todo(struct work_struct *work)
1820{
1821	struct ccw_device_private *priv;
1822	struct ccw_device *cdev;
1823	struct subchannel *sch;
1824	enum cdev_todo todo;
1825
1826	priv = container_of(work, struct ccw_device_private, todo_work);
1827	cdev = priv->cdev;
1828	sch = to_subchannel(cdev->dev.parent);
1829	/* Find out todo. */
1830	spin_lock_irq(cdev->ccwlock);
1831	todo = priv->todo;
1832	priv->todo = CDEV_TODO_NOTHING;
1833	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1834		      priv->dev_id.ssid, priv->dev_id.devno, todo);
1835	spin_unlock_irq(cdev->ccwlock);
1836	/* Perform todo. */
1837	switch (todo) {
1838	case CDEV_TODO_ENABLE_CMF:
1839		cmf_reenable(cdev);
1840		break;
1841	case CDEV_TODO_REBIND:
1842		ccw_device_do_unbind_bind(cdev);
1843		break;
1844	case CDEV_TODO_REGISTER:
1845		io_subchannel_register(cdev);
1846		break;
1847	case CDEV_TODO_UNREG_EVAL:
1848		if (!sch_is_pseudo_sch(sch))
1849			css_schedule_eval(sch->schid);
1850		fallthrough;
1851	case CDEV_TODO_UNREG:
1852		spin_lock_irq(&sch->lock);
1853		sch_set_cdev(sch, NULL);
1854		spin_unlock_irq(&sch->lock);
1855		ccw_device_unregister(cdev);
1856		break;
1857	default:
1858		break;
1859	}
1860	/* Release workqueue ref. */
1861	put_device(&cdev->dev);
1862}
1863
1864/**
1865 * ccw_device_sched_todo - schedule ccw device operation
1866 * @cdev: ccw device
1867 * @todo: todo
1868 *
1869 * Schedule the operation identified by @todo to be performed on the slow path
1870 * workqueue. Do nothing if another operation with higher priority is already
1871 * scheduled. Needs to be called with ccwdev lock held.
1872 */
1873void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
1874{
1875	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
1876		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
1877		      todo);
1878	if (cdev->private->todo >= todo)
1879		return;
1880	cdev->private->todo = todo;
1881	/* Get workqueue ref. */
1882	if (!get_device(&cdev->dev))
1883		return;
1884	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
1885		/* Already queued, release workqueue ref. */
1886		put_device(&cdev->dev);
1887	}
1888}
1889
1890/**
1891 * ccw_device_siosl() - initiate logging
1892 * @cdev: ccw device
1893 *
1894 * This function is used to invoke model-dependent logging within the channel
1895 * subsystem.
1896 */
1897int ccw_device_siosl(struct ccw_device *cdev)
1898{
1899	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1900
1901	return chsc_siosl(sch->schid);
1902}
1903EXPORT_SYMBOL_GPL(ccw_device_siosl);
1904
 
1905EXPORT_SYMBOL(ccw_device_set_online);
1906EXPORT_SYMBOL(ccw_device_set_offline);
1907EXPORT_SYMBOL(ccw_driver_register);
1908EXPORT_SYMBOL(ccw_driver_unregister);
1909EXPORT_SYMBOL(get_ccwdev_by_busid);