Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-1.0+
   2/*
   3 *  bus driver for ccw devices
   4 *
   5 *    Copyright IBM Corp. 2002, 2008
   6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
   7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
   8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/spinlock.h>
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/slab.h>
  20#include <linux/list.h>
  21#include <linux/device.h>
  22#include <linux/workqueue.h>
  23#include <linux/delay.h>
  24#include <linux/timer.h>
  25#include <linux/kernel_stat.h>
  26#include <linux/sched/signal.h>
  27#include <linux/dma-mapping.h>
  28
  29#include <asm/ccwdev.h>
  30#include <asm/cio.h>
  31#include <asm/param.h>		/* HZ */
  32#include <asm/cmb.h>
  33#include <asm/isc.h>
  34
  35#include "chp.h"
  36#include "cio.h"
  37#include "cio_debug.h"
  38#include "css.h"
  39#include "device.h"
  40#include "ioasm.h"
  41#include "io_sch.h"
  42#include "blacklist.h"
  43#include "chsc.h"
  44
  45static struct timer_list recovery_timer;
  46static DEFINE_SPINLOCK(recovery_lock);
  47static int recovery_phase;
  48static const unsigned long recovery_delay[] = { 3, 30, 300 };
  49
  50static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
  51static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
  52static struct bus_type ccw_bus_type;
  53
  54/******************* bus type handling ***********************/
  55
  56/* The Linux driver model distinguishes between a bus type and
  57 * the bus itself. Of course we only have one channel
  58 * subsystem driver and one channel system per machine, but
  59 * we still use the abstraction. T.R. says it's a good idea. */
  60static int
  61ccw_bus_match (struct device * dev, struct device_driver * drv)
  62{
  63	struct ccw_device *cdev = to_ccwdev(dev);
  64	struct ccw_driver *cdrv = to_ccwdrv(drv);
  65	const struct ccw_device_id *ids = cdrv->ids, *found;
  66
  67	if (!ids)
  68		return 0;
  69
  70	found = ccw_device_id_match(ids, &cdev->id);
  71	if (!found)
  72		return 0;
  73
  74	cdev->id.driver_info = found->driver_info;
  75
  76	return 1;
  77}
  78
  79/* Store modalias string delimited by prefix/suffix string into buffer with
  80 * specified size. Return length of resulting string (excluding trailing '\0')
  81 * even if string doesn't fit buffer (snprintf semantics). */
  82static int snprint_alias(char *buf, size_t size,
  83			 struct ccw_device_id *id, const char *suffix)
  84{
  85	int len;
  86
  87	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
  88	if (len > size)
  89		return len;
  90	buf += len;
  91	size -= len;
  92
  93	if (id->dev_type != 0)
  94		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
  95				id->dev_model, suffix);
  96	else
  97		len += snprintf(buf, size, "dtdm%s", suffix);
  98
  99	return len;
 100}
 101
 102/* Set up environment variables for ccw device uevent. Return 0 on success,
 103 * non-zero otherwise. */
 104static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
 105{
 106	struct ccw_device *cdev = to_ccwdev(dev);
 107	struct ccw_device_id *id = &(cdev->id);
 108	int ret;
 109	char modalias_buf[30];
 110
 111	/* CU_TYPE= */
 112	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
 113	if (ret)
 114		return ret;
 115
 116	/* CU_MODEL= */
 117	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
 118	if (ret)
 119		return ret;
 120
 121	/* The next two can be zero, that's ok for us */
 122	/* DEV_TYPE= */
 123	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
 124	if (ret)
 125		return ret;
 126
 127	/* DEV_MODEL= */
 128	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
 129	if (ret)
 130		return ret;
 131
 132	/* MODALIAS=  */
 133	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
 134	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
 135	return ret;
 136}
 137
 138static void io_subchannel_irq(struct subchannel *);
 139static int io_subchannel_probe(struct subchannel *);
 140static int io_subchannel_remove(struct subchannel *);
 141static void io_subchannel_shutdown(struct subchannel *);
 142static int io_subchannel_sch_event(struct subchannel *, int);
 143static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
 144				   int);
 145static void recovery_func(struct timer_list *unused);
 146
 147static struct css_device_id io_subchannel_ids[] = {
 148	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 149	{ /* end of list */ },
 150};
 151
 152static int io_subchannel_prepare(struct subchannel *sch)
 153{
 154	struct ccw_device *cdev;
 155	/*
 156	 * Don't allow suspend while a ccw device registration
 157	 * is still outstanding.
 158	 */
 159	cdev = sch_get_cdev(sch);
 160	if (cdev && !device_is_registered(&cdev->dev))
 161		return -EAGAIN;
 162	return 0;
 163}
 164
 165static int io_subchannel_settle(void)
 166{
 167	int ret;
 168
 169	ret = wait_event_interruptible(ccw_device_init_wq,
 170				atomic_read(&ccw_device_init_count) == 0);
 171	if (ret)
 172		return -EINTR;
 173	flush_workqueue(cio_work_q);
 174	return 0;
 175}
 176
 177static struct css_driver io_subchannel_driver = {
 178	.drv = {
 179		.owner = THIS_MODULE,
 180		.name = "io_subchannel",
 181	},
 182	.subchannel_type = io_subchannel_ids,
 183	.irq = io_subchannel_irq,
 184	.sch_event = io_subchannel_sch_event,
 185	.chp_event = io_subchannel_chp_event,
 186	.probe = io_subchannel_probe,
 187	.remove = io_subchannel_remove,
 188	.shutdown = io_subchannel_shutdown,
 189	.prepare = io_subchannel_prepare,
 190	.settle = io_subchannel_settle,
 191};
 192
 193int __init io_subchannel_init(void)
 194{
 195	int ret;
 196
 197	timer_setup(&recovery_timer, recovery_func, 0);
 198	ret = bus_register(&ccw_bus_type);
 199	if (ret)
 200		return ret;
 201	ret = css_driver_register(&io_subchannel_driver);
 202	if (ret)
 203		bus_unregister(&ccw_bus_type);
 204
 205	return ret;
 206}
 207
 208
 209/************************ device handling **************************/
 210
 211static ssize_t
 212devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
 213{
 214	struct ccw_device *cdev = to_ccwdev(dev);
 215	struct ccw_device_id *id = &(cdev->id);
 216
 217	if (id->dev_type != 0)
 218		return sprintf(buf, "%04x/%02x\n",
 219				id->dev_type, id->dev_model);
 220	else
 221		return sprintf(buf, "n/a\n");
 222}
 223
 224static ssize_t
 225cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
 226{
 227	struct ccw_device *cdev = to_ccwdev(dev);
 228	struct ccw_device_id *id = &(cdev->id);
 229
 230	return sprintf(buf, "%04x/%02x\n",
 231		       id->cu_type, id->cu_model);
 232}
 233
 234static ssize_t
 235modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
 236{
 237	struct ccw_device *cdev = to_ccwdev(dev);
 238	struct ccw_device_id *id = &(cdev->id);
 239	int len;
 240
 241	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
 242
 243	return len > PAGE_SIZE ? PAGE_SIZE : len;
 244}
 245
 246static ssize_t
 247online_show (struct device *dev, struct device_attribute *attr, char *buf)
 248{
 249	struct ccw_device *cdev = to_ccwdev(dev);
 250
 251	return sprintf(buf, cdev->online ? "1\n" : "0\n");
 252}
 253
 254int ccw_device_is_orphan(struct ccw_device *cdev)
 255{
 256	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
 257}
 258
 259static void ccw_device_unregister(struct ccw_device *cdev)
 260{
 261	if (device_is_registered(&cdev->dev)) {
 262		/* Undo device_add(). */
 263		device_del(&cdev->dev);
 264	}
 265	if (cdev->private->flags.initialized) {
 266		cdev->private->flags.initialized = 0;
 267		/* Release reference from device_initialize(). */
 268		put_device(&cdev->dev);
 269	}
 270}
 271
 272static void io_subchannel_quiesce(struct subchannel *);
 273
 274/**
 275 * ccw_device_set_offline() - disable a ccw device for I/O
 276 * @cdev: target ccw device
 277 *
 278 * This function calls the driver's set_offline() function for @cdev, if
 279 * given, and then disables @cdev.
 280 * Returns:
 281 *   %0 on success and a negative error value on failure.
 282 * Context:
 283 *  enabled, ccw device lock not held
 284 */
 285int ccw_device_set_offline(struct ccw_device *cdev)
 286{
 287	struct subchannel *sch;
 288	int ret, state;
 289
 290	if (!cdev)
 291		return -ENODEV;
 292	if (!cdev->online || !cdev->drv)
 293		return -EINVAL;
 294
 295	if (cdev->drv->set_offline) {
 296		ret = cdev->drv->set_offline(cdev);
 297		if (ret != 0)
 298			return ret;
 299	}
 300	spin_lock_irq(cdev->ccwlock);
 301	sch = to_subchannel(cdev->dev.parent);
 302	cdev->online = 0;
 303	/* Wait until a final state or DISCONNECTED is reached */
 304	while (!dev_fsm_final_state(cdev) &&
 305	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 306		spin_unlock_irq(cdev->ccwlock);
 307		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 308			   cdev->private->state == DEV_STATE_DISCONNECTED));
 309		spin_lock_irq(cdev->ccwlock);
 310	}
 311	do {
 312		ret = ccw_device_offline(cdev);
 313		if (!ret)
 314			break;
 315		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
 316			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
 317			      cdev->private->dev_id.devno);
 318		if (ret != -EBUSY)
 319			goto error;
 320		state = cdev->private->state;
 321		spin_unlock_irq(cdev->ccwlock);
 322		io_subchannel_quiesce(sch);
 323		spin_lock_irq(cdev->ccwlock);
 324		cdev->private->state = state;
 325	} while (ret == -EBUSY);
 326	spin_unlock_irq(cdev->ccwlock);
 327	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 328		   cdev->private->state == DEV_STATE_DISCONNECTED));
 329	/* Inform the user if set offline failed. */
 330	if (cdev->private->state == DEV_STATE_BOXED) {
 331		pr_warn("%s: The device entered boxed state while being set offline\n",
 332			dev_name(&cdev->dev));
 333	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 334		pr_warn("%s: The device stopped operating while being set offline\n",
 335			dev_name(&cdev->dev));
 336	}
 337	/* Give up reference from ccw_device_set_online(). */
 338	put_device(&cdev->dev);
 339	return 0;
 340
 341error:
 342	cdev->private->state = DEV_STATE_OFFLINE;
 343	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
 344	spin_unlock_irq(cdev->ccwlock);
 345	/* Give up reference from ccw_device_set_online(). */
 346	put_device(&cdev->dev);
 347	return -ENODEV;
 348}
 349
 350/**
 351 * ccw_device_set_online() - enable a ccw device for I/O
 352 * @cdev: target ccw device
 353 *
 354 * This function first enables @cdev and then calls the driver's set_online()
 355 * function for @cdev, if given. If set_online() returns an error, @cdev is
 356 * disabled again.
 357 * Returns:
 358 *   %0 on success and a negative error value on failure.
 359 * Context:
 360 *  enabled, ccw device lock not held
 361 */
 362int ccw_device_set_online(struct ccw_device *cdev)
 363{
 364	int ret;
 365	int ret2;
 366
 367	if (!cdev)
 368		return -ENODEV;
 369	if (cdev->online || !cdev->drv)
 370		return -EINVAL;
 371	/* Hold on to an extra reference while device is online. */
 372	if (!get_device(&cdev->dev))
 373		return -ENODEV;
 374
 375	spin_lock_irq(cdev->ccwlock);
 376	ret = ccw_device_online(cdev);
 377	spin_unlock_irq(cdev->ccwlock);
 378	if (ret == 0)
 379		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
 380	else {
 381		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
 382			      "device 0.%x.%04x\n",
 383			      ret, cdev->private->dev_id.ssid,
 384			      cdev->private->dev_id.devno);
 385		/* Give up online reference since onlining failed. */
 386		put_device(&cdev->dev);
 387		return ret;
 388	}
 389	spin_lock_irq(cdev->ccwlock);
 390	/* Check if online processing was successful */
 391	if ((cdev->private->state != DEV_STATE_ONLINE) &&
 392	    (cdev->private->state != DEV_STATE_W4SENSE)) {
 393		spin_unlock_irq(cdev->ccwlock);
 394		/* Inform the user that set online failed. */
 395		if (cdev->private->state == DEV_STATE_BOXED) {
 396			pr_warn("%s: Setting the device online failed because it is boxed\n",
 397				dev_name(&cdev->dev));
 398		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 399			pr_warn("%s: Setting the device online failed because it is not operational\n",
 400				dev_name(&cdev->dev));
 401		}
 402		/* Give up online reference since onlining failed. */
 403		put_device(&cdev->dev);
 404		return -ENODEV;
 405	}
 406	spin_unlock_irq(cdev->ccwlock);
 407	if (cdev->drv->set_online)
 408		ret = cdev->drv->set_online(cdev);
 409	if (ret)
 410		goto rollback;
 411
 412	spin_lock_irq(cdev->ccwlock);
 413	cdev->online = 1;
 414	spin_unlock_irq(cdev->ccwlock);
 415	return 0;
 416
 417rollback:
 418	spin_lock_irq(cdev->ccwlock);
 419	/* Wait until a final state or DISCONNECTED is reached */
 420	while (!dev_fsm_final_state(cdev) &&
 421	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 422		spin_unlock_irq(cdev->ccwlock);
 423		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 424			   cdev->private->state == DEV_STATE_DISCONNECTED));
 425		spin_lock_irq(cdev->ccwlock);
 426	}
 427	ret2 = ccw_device_offline(cdev);
 428	if (ret2)
 429		goto error;
 430	spin_unlock_irq(cdev->ccwlock);
 431	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 432		   cdev->private->state == DEV_STATE_DISCONNECTED));
 433	/* Give up online reference since onlining failed. */
 434	put_device(&cdev->dev);
 435	return ret;
 436
 437error:
 438	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
 439		      "device 0.%x.%04x\n",
 440		      ret2, cdev->private->dev_id.ssid,
 441		      cdev->private->dev_id.devno);
 442	cdev->private->state = DEV_STATE_OFFLINE;
 443	spin_unlock_irq(cdev->ccwlock);
 444	/* Give up online reference since onlining failed. */
 445	put_device(&cdev->dev);
 446	return ret;
 447}
 448
 449static int online_store_handle_offline(struct ccw_device *cdev)
 450{
 451	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
 452		spin_lock_irq(cdev->ccwlock);
 453		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
 454		spin_unlock_irq(cdev->ccwlock);
 455		return 0;
 456	}
 457	if (cdev->drv && cdev->drv->set_offline)
 458		return ccw_device_set_offline(cdev);
 459	return -EINVAL;
 460}
 461
 462static int online_store_recog_and_online(struct ccw_device *cdev)
 463{
 464	/* Do device recognition, if needed. */
 465	if (cdev->private->state == DEV_STATE_BOXED) {
 466		spin_lock_irq(cdev->ccwlock);
 467		ccw_device_recognition(cdev);
 468		spin_unlock_irq(cdev->ccwlock);
 469		wait_event(cdev->private->wait_q,
 470			   cdev->private->flags.recog_done);
 471		if (cdev->private->state != DEV_STATE_OFFLINE)
 472			/* recognition failed */
 473			return -EAGAIN;
 474	}
 475	if (cdev->drv && cdev->drv->set_online)
 476		return ccw_device_set_online(cdev);
 477	return -EINVAL;
 478}
 479
 480static int online_store_handle_online(struct ccw_device *cdev, int force)
 481{
 482	int ret;
 483
 484	ret = online_store_recog_and_online(cdev);
 485	if (ret && !force)
 486		return ret;
 487	if (force && cdev->private->state == DEV_STATE_BOXED) {
 488		ret = ccw_device_stlck(cdev);
 489		if (ret)
 490			return ret;
 491		if (cdev->id.cu_type == 0)
 492			cdev->private->state = DEV_STATE_NOT_OPER;
 493		ret = online_store_recog_and_online(cdev);
 494		if (ret)
 495			return ret;
 496	}
 497	return 0;
 498}
 499
 500static ssize_t online_store (struct device *dev, struct device_attribute *attr,
 501			     const char *buf, size_t count)
 502{
 503	struct ccw_device *cdev = to_ccwdev(dev);
 504	int force, ret;
 505	unsigned long i;
 506
 507	/* Prevent conflict between multiple on-/offline processing requests. */
 508	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
 509		return -EAGAIN;
 510	/* Prevent conflict between internal I/Os and on-/offline processing. */
 511	if (!dev_fsm_final_state(cdev) &&
 512	    cdev->private->state != DEV_STATE_DISCONNECTED) {
 513		ret = -EAGAIN;
 514		goto out;
 515	}
 516	/* Prevent conflict between pending work and on-/offline processing.*/
 517	if (work_pending(&cdev->private->todo_work)) {
 518		ret = -EAGAIN;
 519		goto out;
 520	}
 521	if (!strncmp(buf, "force\n", count)) {
 522		force = 1;
 523		i = 1;
 524		ret = 0;
 525	} else {
 526		force = 0;
 527		ret = kstrtoul(buf, 16, &i);
 528	}
 529	if (ret)
 530		goto out;
 531
 532	device_lock(dev);
 533	switch (i) {
 534	case 0:
 535		ret = online_store_handle_offline(cdev);
 536		break;
 537	case 1:
 538		ret = online_store_handle_online(cdev, force);
 539		break;
 540	default:
 541		ret = -EINVAL;
 542	}
 543	device_unlock(dev);
 544
 545out:
 546	atomic_set(&cdev->private->onoff, 0);
 547	return (ret < 0) ? ret : count;
 548}
 549
 550static ssize_t
 551available_show (struct device *dev, struct device_attribute *attr, char *buf)
 552{
 553	struct ccw_device *cdev = to_ccwdev(dev);
 554	struct subchannel *sch;
 555
 556	if (ccw_device_is_orphan(cdev))
 557		return sprintf(buf, "no device\n");
 558	switch (cdev->private->state) {
 559	case DEV_STATE_BOXED:
 560		return sprintf(buf, "boxed\n");
 561	case DEV_STATE_DISCONNECTED:
 562	case DEV_STATE_DISCONNECTED_SENSE_ID:
 563	case DEV_STATE_NOT_OPER:
 564		sch = to_subchannel(dev->parent);
 565		if (!sch->lpm)
 566			return sprintf(buf, "no path\n");
 567		else
 568			return sprintf(buf, "no device\n");
 569	default:
 570		/* All other states considered fine. */
 571		return sprintf(buf, "good\n");
 572	}
 573}
 574
 575static ssize_t
 576initiate_logging(struct device *dev, struct device_attribute *attr,
 577		 const char *buf, size_t count)
 578{
 579	struct subchannel *sch = to_subchannel(dev);
 580	int rc;
 581
 582	rc = chsc_siosl(sch->schid);
 583	if (rc < 0) {
 584		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
 585			sch->schid.ssid, sch->schid.sch_no, rc);
 586		return rc;
 587	}
 588	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
 589		  sch->schid.ssid, sch->schid.sch_no);
 590	return count;
 591}
 592
 593static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
 594			char *buf)
 595{
 596	struct subchannel *sch = to_subchannel(dev);
 597
 598	return sprintf(buf, "%02x\n", sch->vpm);
 599}
 600
 601static DEVICE_ATTR_RO(devtype);
 602static DEVICE_ATTR_RO(cutype);
 603static DEVICE_ATTR_RO(modalias);
 604static DEVICE_ATTR_RW(online);
 605static DEVICE_ATTR(availability, 0444, available_show, NULL);
 606static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
 607static DEVICE_ATTR_RO(vpm);
 608
 609static struct attribute *io_subchannel_attrs[] = {
 610	&dev_attr_logging.attr,
 611	&dev_attr_vpm.attr,
 612	NULL,
 613};
 614
 615static const struct attribute_group io_subchannel_attr_group = {
 616	.attrs = io_subchannel_attrs,
 617};
 618
 619static struct attribute * ccwdev_attrs[] = {
 620	&dev_attr_devtype.attr,
 621	&dev_attr_cutype.attr,
 622	&dev_attr_modalias.attr,
 623	&dev_attr_online.attr,
 624	&dev_attr_cmb_enable.attr,
 625	&dev_attr_availability.attr,
 626	NULL,
 627};
 628
 629static const struct attribute_group ccwdev_attr_group = {
 630	.attrs = ccwdev_attrs,
 631};
 632
 633static const struct attribute_group *ccwdev_attr_groups[] = {
 634	&ccwdev_attr_group,
 635	NULL,
 636};
 637
 638static int ccw_device_add(struct ccw_device *cdev)
 639{
 640	struct device *dev = &cdev->dev;
 641
 642	dev->bus = &ccw_bus_type;
 643	return device_add(dev);
 644}
 645
 646static int match_dev_id(struct device *dev, const void *data)
 647{
 648	struct ccw_device *cdev = to_ccwdev(dev);
 649	struct ccw_dev_id *dev_id = (void *)data;
 650
 651	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
 652}
 653
 654/**
 655 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
 656 * @dev_id: id of the device to be searched
 657 *
 658 * This function searches all devices attached to the ccw bus for a device
 659 * matching @dev_id.
 660 * Returns:
 661 *  If a device is found its reference count is increased and returned;
 662 *  else %NULL is returned.
 663 */
 664struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
 665{
 666	struct device *dev;
 667
 668	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
 669
 670	return dev ? to_ccwdev(dev) : NULL;
 671}
 672EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
 673
 674static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
 675{
 676	int ret;
 677
 678	if (device_is_registered(&cdev->dev)) {
 679		device_release_driver(&cdev->dev);
 680		ret = device_attach(&cdev->dev);
 681		WARN_ON(ret == -ENODEV);
 682	}
 683}
 684
 685static void
 686ccw_device_release(struct device *dev)
 687{
 688	struct ccw_device *cdev;
 689
 690	cdev = to_ccwdev(dev);
 691	cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
 692			sizeof(*cdev->private->dma_area));
 693	cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
 694	/* Release reference of parent subchannel. */
 695	put_device(cdev->dev.parent);
 696	kfree(cdev->private);
 697	kfree(cdev);
 698}
 699
 700static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
 701{
 702	struct ccw_device *cdev;
 703	struct gen_pool *dma_pool;
 704
 705	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
 706	if (!cdev)
 707		goto err_cdev;
 708	cdev->private = kzalloc(sizeof(struct ccw_device_private),
 709				GFP_KERNEL | GFP_DMA);
 710	if (!cdev->private)
 711		goto err_priv;
 712	cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
 713	cdev->dev.dma_mask = sch->dev.dma_mask;
 714	dma_pool = cio_gp_dma_create(&cdev->dev, 1);
 715	if (!dma_pool)
 716		goto err_dma_pool;
 717	cdev->private->dma_pool = dma_pool;
 718	cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
 719					sizeof(*cdev->private->dma_area));
 720	if (!cdev->private->dma_area)
 721		goto err_dma_area;
 722	return cdev;
 723err_dma_area:
 724	cio_gp_dma_destroy(dma_pool, &cdev->dev);
 725err_dma_pool:
 726	kfree(cdev->private);
 727err_priv:
 728	kfree(cdev);
 729err_cdev:
 730	return ERR_PTR(-ENOMEM);
 731}
 732
 733static void ccw_device_todo(struct work_struct *work);
 734
 735static int io_subchannel_initialize_dev(struct subchannel *sch,
 736					struct ccw_device *cdev)
 737{
 738	struct ccw_device_private *priv = cdev->private;
 739	int ret;
 740
 741	priv->cdev = cdev;
 742	priv->int_class = IRQIO_CIO;
 743	priv->state = DEV_STATE_NOT_OPER;
 744	priv->dev_id.devno = sch->schib.pmcw.dev;
 745	priv->dev_id.ssid = sch->schid.ssid;
 746
 747	INIT_WORK(&priv->todo_work, ccw_device_todo);
 748	INIT_LIST_HEAD(&priv->cmb_list);
 749	init_waitqueue_head(&priv->wait_q);
 750	timer_setup(&priv->timer, ccw_device_timeout, 0);
 751
 752	atomic_set(&priv->onoff, 0);
 753	cdev->ccwlock = sch->lock;
 754	cdev->dev.parent = &sch->dev;
 755	cdev->dev.release = ccw_device_release;
 756	cdev->dev.groups = ccwdev_attr_groups;
 757	/* Do first half of device_register. */
 758	device_initialize(&cdev->dev);
 759	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
 760			   cdev->private->dev_id.devno);
 761	if (ret)
 762		goto out_put;
 763	if (!get_device(&sch->dev)) {
 764		ret = -ENODEV;
 765		goto out_put;
 766	}
 767	priv->flags.initialized = 1;
 768	spin_lock_irq(sch->lock);
 769	sch_set_cdev(sch, cdev);
 770	spin_unlock_irq(sch->lock);
 771	return 0;
 772
 773out_put:
 774	/* Release reference from device_initialize(). */
 775	put_device(&cdev->dev);
 776	return ret;
 777}
 778
 779static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
 780{
 781	struct ccw_device *cdev;
 782	int ret;
 783
 784	cdev = io_subchannel_allocate_dev(sch);
 785	if (!IS_ERR(cdev)) {
 786		ret = io_subchannel_initialize_dev(sch, cdev);
 787		if (ret)
 788			cdev = ERR_PTR(ret);
 789	}
 790	return cdev;
 791}
 792
 793static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
 794
 795static void sch_create_and_recog_new_device(struct subchannel *sch)
 796{
 797	struct ccw_device *cdev;
 798
 799	/* Need to allocate a new ccw device. */
 800	cdev = io_subchannel_create_ccwdev(sch);
 801	if (IS_ERR(cdev)) {
 802		/* OK, we did everything we could... */
 803		css_sch_device_unregister(sch);
 804		return;
 805	}
 806	/* Start recognition for the new ccw device. */
 807	io_subchannel_recog(cdev, sch);
 808}
 809
 810/*
 811 * Register recognized device.
 812 */
 813static void io_subchannel_register(struct ccw_device *cdev)
 814{
 815	struct subchannel *sch;
 816	int ret, adjust_init_count = 1;
 817	unsigned long flags;
 818
 819	sch = to_subchannel(cdev->dev.parent);
 820	/*
 821	 * Check if subchannel is still registered. It may have become
 822	 * unregistered if a machine check hit us after finishing
 823	 * device recognition but before the register work could be
 824	 * queued.
 825	 */
 826	if (!device_is_registered(&sch->dev))
 827		goto out_err;
 828	css_update_ssd_info(sch);
 829	/*
 830	 * io_subchannel_register() will also be called after device
 831	 * recognition has been done for a boxed device (which will already
 832	 * be registered). We need to reprobe since we may now have sense id
 833	 * information.
 834	 */
 835	if (device_is_registered(&cdev->dev)) {
 836		if (!cdev->drv) {
 837			ret = device_reprobe(&cdev->dev);
 838			if (ret)
 839				/* We can't do much here. */
 840				CIO_MSG_EVENT(0, "device_reprobe() returned"
 841					      " %d for 0.%x.%04x\n", ret,
 842					      cdev->private->dev_id.ssid,
 843					      cdev->private->dev_id.devno);
 844		}
 845		adjust_init_count = 0;
 846		goto out;
 847	}
 848	/*
 849	 * Now we know this subchannel will stay, we can throw
 850	 * our delayed uevent.
 851	 */
 852	if (dev_get_uevent_suppress(&sch->dev)) {
 853		dev_set_uevent_suppress(&sch->dev, 0);
 854		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 855	}
 856	/* make it known to the system */
 857	ret = ccw_device_add(cdev);
 858	if (ret) {
 859		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
 860			      cdev->private->dev_id.ssid,
 861			      cdev->private->dev_id.devno, ret);
 862		spin_lock_irqsave(sch->lock, flags);
 863		sch_set_cdev(sch, NULL);
 864		spin_unlock_irqrestore(sch->lock, flags);
 865		/* Release initial device reference. */
 866		put_device(&cdev->dev);
 867		goto out_err;
 868	}
 869out:
 870	cdev->private->flags.recog_done = 1;
 871	wake_up(&cdev->private->wait_q);
 872out_err:
 873	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
 874		wake_up(&ccw_device_init_wq);
 875}
 876
 877static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
 878{
 879	struct subchannel *sch;
 880
 881	/* Get subchannel reference for local processing. */
 882	if (!get_device(cdev->dev.parent))
 883		return;
 884	sch = to_subchannel(cdev->dev.parent);
 885	css_sch_device_unregister(sch);
 886	/* Release subchannel reference for local processing. */
 887	put_device(&sch->dev);
 888}
 889
 890/*
 891 * subchannel recognition done. Called from the state machine.
 892 */
 893void
 894io_subchannel_recog_done(struct ccw_device *cdev)
 895{
 896	if (css_init_done == 0) {
 897		cdev->private->flags.recog_done = 1;
 898		return;
 899	}
 900	switch (cdev->private->state) {
 901	case DEV_STATE_BOXED:
 902		/* Device did not respond in time. */
 903	case DEV_STATE_NOT_OPER:
 904		cdev->private->flags.recog_done = 1;
 905		/* Remove device found not operational. */
 906		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
 907		if (atomic_dec_and_test(&ccw_device_init_count))
 908			wake_up(&ccw_device_init_wq);
 909		break;
 910	case DEV_STATE_OFFLINE:
 911		/*
 912		 * We can't register the device in interrupt context so
 913		 * we schedule a work item.
 914		 */
 915		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
 916		break;
 917	}
 918}
 919
 920static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 921{
 922	/* Increase counter of devices currently in recognition. */
 923	atomic_inc(&ccw_device_init_count);
 924
 925	/* Start async. device sensing. */
 926	spin_lock_irq(sch->lock);
 927	ccw_device_recognition(cdev);
 928	spin_unlock_irq(sch->lock);
 929}
 930
 931static int ccw_device_move_to_sch(struct ccw_device *cdev,
 932				  struct subchannel *sch)
 933{
 934	struct subchannel *old_sch;
 935	int rc, old_enabled = 0;
 936
 937	old_sch = to_subchannel(cdev->dev.parent);
 938	/* Obtain child reference for new parent. */
 939	if (!get_device(&sch->dev))
 940		return -ENODEV;
 941
 942	if (!sch_is_pseudo_sch(old_sch)) {
 943		spin_lock_irq(old_sch->lock);
 944		old_enabled = old_sch->schib.pmcw.ena;
 945		rc = 0;
 946		if (old_enabled)
 947			rc = cio_disable_subchannel(old_sch);
 948		spin_unlock_irq(old_sch->lock);
 949		if (rc == -EBUSY) {
 950			/* Release child reference for new parent. */
 951			put_device(&sch->dev);
 952			return rc;
 953		}
 954	}
 955
 956	mutex_lock(&sch->reg_mutex);
 957	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
 958	mutex_unlock(&sch->reg_mutex);
 959	if (rc) {
 960		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
 961			      cdev->private->dev_id.ssid,
 962			      cdev->private->dev_id.devno, sch->schid.ssid,
 963			      sch->schib.pmcw.dev, rc);
 964		if (old_enabled) {
 965			/* Try to reenable the old subchannel. */
 966			spin_lock_irq(old_sch->lock);
 967			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
 968			spin_unlock_irq(old_sch->lock);
 969		}
 970		/* Release child reference for new parent. */
 971		put_device(&sch->dev);
 972		return rc;
 973	}
 974	/* Clean up old subchannel. */
 975	if (!sch_is_pseudo_sch(old_sch)) {
 976		spin_lock_irq(old_sch->lock);
 977		sch_set_cdev(old_sch, NULL);
 978		spin_unlock_irq(old_sch->lock);
 979		css_schedule_eval(old_sch->schid);
 980	}
 981	/* Release child reference for old parent. */
 982	put_device(&old_sch->dev);
 983	/* Initialize new subchannel. */
 984	spin_lock_irq(sch->lock);
 985	cdev->ccwlock = sch->lock;
 986	if (!sch_is_pseudo_sch(sch))
 987		sch_set_cdev(sch, cdev);
 988	spin_unlock_irq(sch->lock);
 989	if (!sch_is_pseudo_sch(sch))
 990		css_update_ssd_info(sch);
 991	return 0;
 992}
 993
 994static int ccw_device_move_to_orph(struct ccw_device *cdev)
 995{
 996	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 997	struct channel_subsystem *css = to_css(sch->dev.parent);
 998
 999	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1000}
1001
1002static void io_subchannel_irq(struct subchannel *sch)
1003{
1004	struct ccw_device *cdev;
1005
1006	cdev = sch_get_cdev(sch);
1007
1008	CIO_TRACE_EVENT(6, "IRQ");
1009	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1010	if (cdev)
1011		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1012	else
1013		inc_irq_stat(IRQIO_CIO);
1014}
1015
1016void io_subchannel_init_config(struct subchannel *sch)
1017{
1018	memset(&sch->config, 0, sizeof(sch->config));
1019	sch->config.csense = 1;
1020}
1021
1022static void io_subchannel_init_fields(struct subchannel *sch)
1023{
1024	if (cio_is_console(sch->schid))
1025		sch->opm = 0xff;
1026	else
1027		sch->opm = chp_get_sch_opm(sch);
1028	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1029	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1030
1031	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1032		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1033		      sch->schib.pmcw.dev, sch->schid.ssid,
1034		      sch->schid.sch_no, sch->schib.pmcw.pim,
1035		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1036
1037	io_subchannel_init_config(sch);
1038}
1039
1040/*
1041 * Note: We always return 0 so that we bind to the device even on error.
1042 * This is needed so that our remove function is called on unregister.
1043 */
1044static int io_subchannel_probe(struct subchannel *sch)
1045{
1046	struct io_subchannel_private *io_priv;
1047	struct ccw_device *cdev;
1048	int rc;
1049
1050	if (cio_is_console(sch->schid)) {
1051		rc = sysfs_create_group(&sch->dev.kobj,
1052					&io_subchannel_attr_group);
1053		if (rc)
1054			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1055				      "attributes for subchannel "
1056				      "0.%x.%04x (rc=%d)\n",
1057				      sch->schid.ssid, sch->schid.sch_no, rc);
1058		/*
1059		 * The console subchannel already has an associated ccw_device.
1060		 * Throw the delayed uevent for the subchannel, register
1061		 * the ccw_device and exit.
1062		 */
1063		if (dev_get_uevent_suppress(&sch->dev)) {
1064			/* should always be the case for the console */
1065			dev_set_uevent_suppress(&sch->dev, 0);
1066			kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1067		}
1068		cdev = sch_get_cdev(sch);
1069		rc = ccw_device_add(cdev);
1070		if (rc) {
1071			/* Release online reference. */
1072			put_device(&cdev->dev);
1073			goto out_schedule;
1074		}
1075		if (atomic_dec_and_test(&ccw_device_init_count))
1076			wake_up(&ccw_device_init_wq);
1077		return 0;
1078	}
1079	io_subchannel_init_fields(sch);
1080	rc = cio_commit_config(sch);
1081	if (rc)
1082		goto out_schedule;
1083	rc = sysfs_create_group(&sch->dev.kobj,
1084				&io_subchannel_attr_group);
1085	if (rc)
1086		goto out_schedule;
1087	/* Allocate I/O subchannel private data. */
1088	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1089	if (!io_priv)
1090		goto out_schedule;
1091
1092	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1093				sizeof(*io_priv->dma_area),
1094				&io_priv->dma_area_dma, GFP_KERNEL);
1095	if (!io_priv->dma_area) {
1096		kfree(io_priv);
1097		goto out_schedule;
1098	}
1099
1100	set_io_private(sch, io_priv);
1101	css_schedule_eval(sch->schid);
1102	return 0;
1103
1104out_schedule:
1105	spin_lock_irq(sch->lock);
1106	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1107	spin_unlock_irq(sch->lock);
1108	return 0;
1109}
1110
1111static int io_subchannel_remove(struct subchannel *sch)
1112{
1113	struct io_subchannel_private *io_priv = to_io_private(sch);
1114	struct ccw_device *cdev;
1115
1116	cdev = sch_get_cdev(sch);
1117	if (!cdev)
1118		goto out_free;
1119
1120	ccw_device_unregister(cdev);
1121	spin_lock_irq(sch->lock);
1122	sch_set_cdev(sch, NULL);
1123	set_io_private(sch, NULL);
1124	spin_unlock_irq(sch->lock);
1125out_free:
1126	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1127			  io_priv->dma_area, io_priv->dma_area_dma);
1128	kfree(io_priv);
1129	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1130	return 0;
1131}
1132
1133static void io_subchannel_verify(struct subchannel *sch)
1134{
1135	struct ccw_device *cdev;
1136
1137	cdev = sch_get_cdev(sch);
1138	if (cdev)
1139		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1140}
1141
1142static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1143{
1144	struct ccw_device *cdev;
1145
1146	cdev = sch_get_cdev(sch);
1147	if (!cdev)
1148		return;
1149	if (cio_update_schib(sch))
1150		goto err;
1151	/* Check for I/O on path. */
1152	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1153		goto out;
1154	if (cdev->private->state == DEV_STATE_ONLINE) {
1155		ccw_device_kill_io(cdev);
1156		goto out;
1157	}
1158	if (cio_clear(sch))
1159		goto err;
1160out:
1161	/* Trigger path verification. */
1162	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1163	return;
1164
1165err:
1166	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1167}
1168
1169static int io_subchannel_chp_event(struct subchannel *sch,
1170				   struct chp_link *link, int event)
1171{
1172	struct ccw_device *cdev = sch_get_cdev(sch);
1173	int mask;
1174
1175	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1176	if (!mask)
1177		return 0;
1178	switch (event) {
1179	case CHP_VARY_OFF:
1180		sch->opm &= ~mask;
1181		sch->lpm &= ~mask;
1182		if (cdev)
1183			cdev->private->path_gone_mask |= mask;
1184		io_subchannel_terminate_path(sch, mask);
1185		break;
1186	case CHP_VARY_ON:
1187		sch->opm |= mask;
1188		sch->lpm |= mask;
1189		if (cdev)
1190			cdev->private->path_new_mask |= mask;
1191		io_subchannel_verify(sch);
1192		break;
1193	case CHP_OFFLINE:
1194		if (cio_update_schib(sch))
1195			return -ENODEV;
1196		if (cdev)
1197			cdev->private->path_gone_mask |= mask;
1198		io_subchannel_terminate_path(sch, mask);
1199		break;
1200	case CHP_ONLINE:
1201		if (cio_update_schib(sch))
1202			return -ENODEV;
1203		sch->lpm |= mask & sch->opm;
1204		if (cdev)
1205			cdev->private->path_new_mask |= mask;
1206		io_subchannel_verify(sch);
1207		break;
1208	}
1209	return 0;
1210}
1211
1212static void io_subchannel_quiesce(struct subchannel *sch)
1213{
1214	struct ccw_device *cdev;
1215	int ret;
1216
1217	spin_lock_irq(sch->lock);
1218	cdev = sch_get_cdev(sch);
1219	if (cio_is_console(sch->schid))
1220		goto out_unlock;
1221	if (!sch->schib.pmcw.ena)
1222		goto out_unlock;
1223	ret = cio_disable_subchannel(sch);
1224	if (ret != -EBUSY)
1225		goto out_unlock;
1226	if (cdev->handler)
1227		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1228	while (ret == -EBUSY) {
1229		cdev->private->state = DEV_STATE_QUIESCE;
1230		cdev->private->iretry = 255;
1231		ret = ccw_device_cancel_halt_clear(cdev);
1232		if (ret == -EBUSY) {
1233			ccw_device_set_timeout(cdev, HZ/10);
1234			spin_unlock_irq(sch->lock);
1235			wait_event(cdev->private->wait_q,
1236				   cdev->private->state != DEV_STATE_QUIESCE);
1237			spin_lock_irq(sch->lock);
1238		}
1239		ret = cio_disable_subchannel(sch);
1240	}
1241out_unlock:
1242	spin_unlock_irq(sch->lock);
1243}
1244
1245static void io_subchannel_shutdown(struct subchannel *sch)
1246{
1247	io_subchannel_quiesce(sch);
1248}
1249
1250static int device_is_disconnected(struct ccw_device *cdev)
1251{
1252	if (!cdev)
1253		return 0;
1254	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1255		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1256}
1257
1258static int recovery_check(struct device *dev, void *data)
1259{
1260	struct ccw_device *cdev = to_ccwdev(dev);
1261	struct subchannel *sch;
1262	int *redo = data;
1263
1264	spin_lock_irq(cdev->ccwlock);
1265	switch (cdev->private->state) {
1266	case DEV_STATE_ONLINE:
1267		sch = to_subchannel(cdev->dev.parent);
1268		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1269			break;
1270		fallthrough;
1271	case DEV_STATE_DISCONNECTED:
1272		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1273			      cdev->private->dev_id.ssid,
1274			      cdev->private->dev_id.devno);
1275		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1276		*redo = 1;
1277		break;
1278	case DEV_STATE_DISCONNECTED_SENSE_ID:
1279		*redo = 1;
1280		break;
1281	}
1282	spin_unlock_irq(cdev->ccwlock);
1283
1284	return 0;
1285}
1286
1287static void recovery_work_func(struct work_struct *unused)
1288{
1289	int redo = 0;
1290
1291	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1292	if (redo) {
1293		spin_lock_irq(&recovery_lock);
1294		if (!timer_pending(&recovery_timer)) {
1295			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1296				recovery_phase++;
1297			mod_timer(&recovery_timer, jiffies +
1298				  recovery_delay[recovery_phase] * HZ);
1299		}
1300		spin_unlock_irq(&recovery_lock);
1301	} else
1302		CIO_MSG_EVENT(3, "recovery: end\n");
1303}
1304
1305static DECLARE_WORK(recovery_work, recovery_work_func);
1306
1307static void recovery_func(struct timer_list *unused)
1308{
1309	/*
1310	 * We can't do our recovery in softirq context and it's not
1311	 * performance critical, so we schedule it.
1312	 */
1313	schedule_work(&recovery_work);
1314}
1315
1316void ccw_device_schedule_recovery(void)
1317{
1318	unsigned long flags;
1319
1320	CIO_MSG_EVENT(3, "recovery: schedule\n");
1321	spin_lock_irqsave(&recovery_lock, flags);
1322	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1323		recovery_phase = 0;
1324		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1325	}
1326	spin_unlock_irqrestore(&recovery_lock, flags);
1327}
1328
1329static int purge_fn(struct device *dev, void *data)
1330{
1331	struct ccw_device *cdev = to_ccwdev(dev);
1332	struct ccw_dev_id *id = &cdev->private->dev_id;
1333
1334	spin_lock_irq(cdev->ccwlock);
1335	if (is_blacklisted(id->ssid, id->devno) &&
1336	    (cdev->private->state == DEV_STATE_OFFLINE) &&
1337	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1338		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1339			      id->devno);
1340		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1341		atomic_set(&cdev->private->onoff, 0);
1342	}
1343	spin_unlock_irq(cdev->ccwlock);
1344	/* Abort loop in case of pending signal. */
1345	if (signal_pending(current))
1346		return -EINTR;
1347
1348	return 0;
1349}
1350
1351/**
1352 * ccw_purge_blacklisted - purge unused, blacklisted devices
1353 *
1354 * Unregister all ccw devices that are offline and on the blacklist.
1355 */
1356int ccw_purge_blacklisted(void)
1357{
1358	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1359	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1360	return 0;
1361}
1362
1363void ccw_device_set_disconnected(struct ccw_device *cdev)
1364{
1365	if (!cdev)
1366		return;
1367	ccw_device_set_timeout(cdev, 0);
1368	cdev->private->flags.fake_irb = 0;
1369	cdev->private->state = DEV_STATE_DISCONNECTED;
1370	if (cdev->online)
1371		ccw_device_schedule_recovery();
1372}
1373
1374void ccw_device_set_notoper(struct ccw_device *cdev)
1375{
1376	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1377
1378	CIO_TRACE_EVENT(2, "notoper");
1379	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1380	ccw_device_set_timeout(cdev, 0);
1381	cio_disable_subchannel(sch);
1382	cdev->private->state = DEV_STATE_NOT_OPER;
1383}
1384
1385enum io_sch_action {
1386	IO_SCH_UNREG,
1387	IO_SCH_ORPH_UNREG,
1388	IO_SCH_ATTACH,
1389	IO_SCH_UNREG_ATTACH,
1390	IO_SCH_ORPH_ATTACH,
1391	IO_SCH_REPROBE,
1392	IO_SCH_VERIFY,
1393	IO_SCH_DISC,
1394	IO_SCH_NOP,
1395};
1396
1397static enum io_sch_action sch_get_action(struct subchannel *sch)
1398{
1399	struct ccw_device *cdev;
1400
1401	cdev = sch_get_cdev(sch);
1402	if (cio_update_schib(sch)) {
1403		/* Not operational. */
1404		if (!cdev)
1405			return IO_SCH_UNREG;
1406		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1407			return IO_SCH_UNREG;
1408		return IO_SCH_ORPH_UNREG;
1409	}
1410	/* Operational. */
1411	if (!cdev)
1412		return IO_SCH_ATTACH;
1413	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1414		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1415			return IO_SCH_UNREG_ATTACH;
1416		return IO_SCH_ORPH_ATTACH;
1417	}
1418	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1419		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1420			return IO_SCH_UNREG;
1421		return IO_SCH_DISC;
1422	}
1423	if (device_is_disconnected(cdev))
1424		return IO_SCH_REPROBE;
1425	if (cdev->online && !cdev->private->flags.resuming)
1426		return IO_SCH_VERIFY;
1427	if (cdev->private->state == DEV_STATE_NOT_OPER)
1428		return IO_SCH_UNREG_ATTACH;
1429	return IO_SCH_NOP;
1430}
1431
1432/**
1433 * io_subchannel_sch_event - process subchannel event
1434 * @sch: subchannel
1435 * @process: non-zero if function is called in process context
1436 *
1437 * An unspecified event occurred for this subchannel. Adjust data according
1438 * to the current operational state of the subchannel and device. Return
1439 * zero when the event has been handled sufficiently or -EAGAIN when this
1440 * function should be called again in process context.
1441 */
1442static int io_subchannel_sch_event(struct subchannel *sch, int process)
1443{
1444	unsigned long flags;
1445	struct ccw_device *cdev;
1446	struct ccw_dev_id dev_id;
1447	enum io_sch_action action;
1448	int rc = -EAGAIN;
1449
1450	spin_lock_irqsave(sch->lock, flags);
1451	if (!device_is_registered(&sch->dev))
1452		goto out_unlock;
1453	if (work_pending(&sch->todo_work))
1454		goto out_unlock;
1455	cdev = sch_get_cdev(sch);
1456	if (cdev && work_pending(&cdev->private->todo_work))
1457		goto out_unlock;
1458	action = sch_get_action(sch);
1459	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1460		      sch->schid.ssid, sch->schid.sch_no, process,
1461		      action);
1462	/* Perform immediate actions while holding the lock. */
1463	switch (action) {
1464	case IO_SCH_REPROBE:
1465		/* Trigger device recognition. */
1466		ccw_device_trigger_reprobe(cdev);
1467		rc = 0;
1468		goto out_unlock;
1469	case IO_SCH_VERIFY:
1470		/* Trigger path verification. */
1471		io_subchannel_verify(sch);
1472		rc = 0;
1473		goto out_unlock;
1474	case IO_SCH_DISC:
1475		ccw_device_set_disconnected(cdev);
1476		rc = 0;
1477		goto out_unlock;
1478	case IO_SCH_ORPH_UNREG:
1479	case IO_SCH_ORPH_ATTACH:
1480		ccw_device_set_disconnected(cdev);
1481		break;
1482	case IO_SCH_UNREG_ATTACH:
1483	case IO_SCH_UNREG:
1484		if (!cdev)
1485			break;
1486		if (cdev->private->state == DEV_STATE_SENSE_ID) {
1487			/*
1488			 * Note: delayed work triggered by this event
1489			 * and repeated calls to sch_event are synchronized
1490			 * by the above check for work_pending(cdev).
1491			 */
1492			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1493		} else
1494			ccw_device_set_notoper(cdev);
1495		break;
1496	case IO_SCH_NOP:
1497		rc = 0;
1498		goto out_unlock;
1499	default:
1500		break;
1501	}
1502	spin_unlock_irqrestore(sch->lock, flags);
1503	/* All other actions require process context. */
1504	if (!process)
1505		goto out;
1506	/* Handle attached ccw device. */
1507	switch (action) {
1508	case IO_SCH_ORPH_UNREG:
1509	case IO_SCH_ORPH_ATTACH:
1510		/* Move ccw device to orphanage. */
1511		rc = ccw_device_move_to_orph(cdev);
1512		if (rc)
1513			goto out;
1514		break;
1515	case IO_SCH_UNREG_ATTACH:
1516		spin_lock_irqsave(sch->lock, flags);
1517		if (cdev->private->flags.resuming) {
1518			/* Device will be handled later. */
1519			rc = 0;
1520			goto out_unlock;
1521		}
1522		sch_set_cdev(sch, NULL);
1523		spin_unlock_irqrestore(sch->lock, flags);
1524		/* Unregister ccw device. */
1525		ccw_device_unregister(cdev);
1526		break;
1527	default:
1528		break;
1529	}
1530	/* Handle subchannel. */
1531	switch (action) {
1532	case IO_SCH_ORPH_UNREG:
1533	case IO_SCH_UNREG:
1534		if (!cdev || !cdev->private->flags.resuming)
1535			css_sch_device_unregister(sch);
1536		break;
1537	case IO_SCH_ORPH_ATTACH:
1538	case IO_SCH_UNREG_ATTACH:
1539	case IO_SCH_ATTACH:
1540		dev_id.ssid = sch->schid.ssid;
1541		dev_id.devno = sch->schib.pmcw.dev;
1542		cdev = get_ccwdev_by_dev_id(&dev_id);
1543		if (!cdev) {
1544			sch_create_and_recog_new_device(sch);
1545			break;
1546		}
1547		rc = ccw_device_move_to_sch(cdev, sch);
1548		if (rc) {
1549			/* Release reference from get_ccwdev_by_dev_id() */
1550			put_device(&cdev->dev);
1551			goto out;
1552		}
1553		spin_lock_irqsave(sch->lock, flags);
1554		ccw_device_trigger_reprobe(cdev);
1555		spin_unlock_irqrestore(sch->lock, flags);
1556		/* Release reference from get_ccwdev_by_dev_id() */
1557		put_device(&cdev->dev);
1558		break;
1559	default:
1560		break;
1561	}
1562	return 0;
1563
1564out_unlock:
1565	spin_unlock_irqrestore(sch->lock, flags);
1566out:
1567	return rc;
1568}
1569
1570static void ccw_device_set_int_class(struct ccw_device *cdev)
1571{
1572	struct ccw_driver *cdrv = cdev->drv;
1573
1574	/* Note: we interpret class 0 in this context as an uninitialized
1575	 * field since it translates to a non-I/O interrupt class. */
1576	if (cdrv->int_class != 0)
1577		cdev->private->int_class = cdrv->int_class;
1578	else
1579		cdev->private->int_class = IRQIO_CIO;
1580}
1581
1582#ifdef CONFIG_CCW_CONSOLE
1583int __init ccw_device_enable_console(struct ccw_device *cdev)
1584{
1585	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1586	int rc;
1587
1588	if (!cdev->drv || !cdev->handler)
1589		return -EINVAL;
1590
1591	io_subchannel_init_fields(sch);
1592	rc = cio_commit_config(sch);
1593	if (rc)
1594		return rc;
1595	sch->driver = &io_subchannel_driver;
1596	io_subchannel_recog(cdev, sch);
1597	/* Now wait for the async. recognition to come to an end. */
1598	spin_lock_irq(cdev->ccwlock);
1599	while (!dev_fsm_final_state(cdev))
1600		ccw_device_wait_idle(cdev);
1601
1602	/* Hold on to an extra reference while device is online. */
1603	get_device(&cdev->dev);
1604	rc = ccw_device_online(cdev);
1605	if (rc)
1606		goto out_unlock;
1607
1608	while (!dev_fsm_final_state(cdev))
1609		ccw_device_wait_idle(cdev);
1610
1611	if (cdev->private->state == DEV_STATE_ONLINE)
1612		cdev->online = 1;
1613	else
1614		rc = -EIO;
1615out_unlock:
1616	spin_unlock_irq(cdev->ccwlock);
1617	if (rc) /* Give up online reference since onlining failed. */
1618		put_device(&cdev->dev);
1619	return rc;
1620}
1621
1622struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1623{
1624	struct io_subchannel_private *io_priv;
1625	struct ccw_device *cdev;
1626	struct subchannel *sch;
1627
1628	sch = cio_probe_console();
1629	if (IS_ERR(sch))
1630		return ERR_CAST(sch);
1631
1632	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1633	if (!io_priv)
1634		goto err_priv;
1635	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1636				sizeof(*io_priv->dma_area),
1637				&io_priv->dma_area_dma, GFP_KERNEL);
1638	if (!io_priv->dma_area)
1639		goto err_dma_area;
1640	set_io_private(sch, io_priv);
1641	cdev = io_subchannel_create_ccwdev(sch);
1642	if (IS_ERR(cdev)) {
1643		dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1644				  io_priv->dma_area, io_priv->dma_area_dma);
1645		set_io_private(sch, NULL);
1646		put_device(&sch->dev);
1647		kfree(io_priv);
1648		return cdev;
1649	}
1650	cdev->drv = drv;
1651	ccw_device_set_int_class(cdev);
1652	return cdev;
1653
1654err_dma_area:
1655	kfree(io_priv);
1656err_priv:
1657	put_device(&sch->dev);
1658	return ERR_PTR(-ENOMEM);
1659}
1660
1661void __init ccw_device_destroy_console(struct ccw_device *cdev)
1662{
1663	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1664	struct io_subchannel_private *io_priv = to_io_private(sch);
1665
1666	set_io_private(sch, NULL);
1667	put_device(&sch->dev);
1668	put_device(&cdev->dev);
1669	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1670			  io_priv->dma_area, io_priv->dma_area_dma);
1671	kfree(io_priv);
1672}
1673
1674/**
1675 * ccw_device_wait_idle() - busy wait for device to become idle
1676 * @cdev: ccw device
1677 *
1678 * Poll until activity control is zero, that is, no function or data
1679 * transfer is pending/active.
1680 * Called with device lock being held.
1681 */
1682void ccw_device_wait_idle(struct ccw_device *cdev)
1683{
1684	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1685
1686	while (1) {
1687		cio_tsch(sch);
1688		if (sch->schib.scsw.cmd.actl == 0)
1689			break;
1690		udelay_simple(100);
1691	}
1692}
1693
1694static int ccw_device_pm_restore(struct device *dev);
1695
1696int ccw_device_force_console(struct ccw_device *cdev)
1697{
1698	return ccw_device_pm_restore(&cdev->dev);
1699}
1700EXPORT_SYMBOL_GPL(ccw_device_force_console);
1701#endif
1702
1703/**
1704 * get_ccwdev_by_busid() - obtain device from a bus id
1705 * @cdrv: driver the device is owned by
1706 * @bus_id: bus id of the device to be searched
1707 *
1708 * This function searches all devices owned by @cdrv for a device with a bus
1709 * id matching @bus_id.
1710 * Returns:
1711 *  If a match is found, its reference count of the found device is increased
1712 *  and it is returned; else %NULL is returned.
1713 */
1714struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1715				       const char *bus_id)
1716{
1717	struct device *dev;
1718
1719	dev = driver_find_device_by_name(&cdrv->driver, bus_id);
1720
1721	return dev ? to_ccwdev(dev) : NULL;
1722}
1723
1724/************************** device driver handling ************************/
1725
1726/* This is the implementation of the ccw_driver class. The probe, remove
1727 * and release methods are initially very similar to the device_driver
1728 * implementations, with the difference that they have ccw_device
1729 * arguments.
1730 *
1731 * A ccw driver also contains the information that is needed for
1732 * device matching.
1733 */
1734static int
1735ccw_device_probe (struct device *dev)
1736{
1737	struct ccw_device *cdev = to_ccwdev(dev);
1738	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1739	int ret;
1740
1741	cdev->drv = cdrv; /* to let the driver call _set_online */
1742	ccw_device_set_int_class(cdev);
1743	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1744	if (ret) {
1745		cdev->drv = NULL;
1746		cdev->private->int_class = IRQIO_CIO;
1747		return ret;
1748	}
1749
1750	return 0;
1751}
1752
1753static int ccw_device_remove(struct device *dev)
1754{
1755	struct ccw_device *cdev = to_ccwdev(dev);
1756	struct ccw_driver *cdrv = cdev->drv;
1757	struct subchannel *sch;
1758	int ret;
1759
1760	if (cdrv->remove)
1761		cdrv->remove(cdev);
1762
1763	spin_lock_irq(cdev->ccwlock);
1764	if (cdev->online) {
1765		cdev->online = 0;
1766		ret = ccw_device_offline(cdev);
1767		spin_unlock_irq(cdev->ccwlock);
1768		if (ret == 0)
1769			wait_event(cdev->private->wait_q,
1770				   dev_fsm_final_state(cdev));
1771		else
1772			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1773				      "device 0.%x.%04x\n",
1774				      ret, cdev->private->dev_id.ssid,
1775				      cdev->private->dev_id.devno);
1776		/* Give up reference obtained in ccw_device_set_online(). */
1777		put_device(&cdev->dev);
1778		spin_lock_irq(cdev->ccwlock);
1779	}
1780	ccw_device_set_timeout(cdev, 0);
1781	cdev->drv = NULL;
1782	cdev->private->int_class = IRQIO_CIO;
1783	sch = to_subchannel(cdev->dev.parent);
1784	spin_unlock_irq(cdev->ccwlock);
1785	io_subchannel_quiesce(sch);
1786	__disable_cmf(cdev);
1787
1788	return 0;
1789}
1790
1791static void ccw_device_shutdown(struct device *dev)
1792{
1793	struct ccw_device *cdev;
1794
1795	cdev = to_ccwdev(dev);
1796	if (cdev->drv && cdev->drv->shutdown)
1797		cdev->drv->shutdown(cdev);
1798	__disable_cmf(cdev);
1799}
1800
1801static int ccw_device_pm_prepare(struct device *dev)
1802{
1803	struct ccw_device *cdev = to_ccwdev(dev);
1804
1805	if (work_pending(&cdev->private->todo_work))
1806		return -EAGAIN;
1807	/* Fail while device is being set online/offline. */
1808	if (atomic_read(&cdev->private->onoff))
1809		return -EAGAIN;
1810
1811	if (cdev->online && cdev->drv && cdev->drv->prepare)
1812		return cdev->drv->prepare(cdev);
1813
1814	return 0;
1815}
1816
1817static void ccw_device_pm_complete(struct device *dev)
1818{
1819	struct ccw_device *cdev = to_ccwdev(dev);
1820
1821	if (cdev->online && cdev->drv && cdev->drv->complete)
1822		cdev->drv->complete(cdev);
1823}
1824
1825static int ccw_device_pm_freeze(struct device *dev)
1826{
1827	struct ccw_device *cdev = to_ccwdev(dev);
1828	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1829	int ret, cm_enabled;
1830
1831	/* Fail suspend while device is in transistional state. */
1832	if (!dev_fsm_final_state(cdev))
1833		return -EAGAIN;
1834	if (!cdev->online)
1835		return 0;
1836	if (cdev->drv && cdev->drv->freeze) {
1837		ret = cdev->drv->freeze(cdev);
1838		if (ret)
1839			return ret;
1840	}
1841
1842	spin_lock_irq(sch->lock);
1843	cm_enabled = cdev->private->cmb != NULL;
1844	spin_unlock_irq(sch->lock);
1845	if (cm_enabled) {
1846		/* Don't have the css write on memory. */
1847		ret = ccw_set_cmf(cdev, 0);
1848		if (ret)
1849			return ret;
1850	}
1851	/* From here on, disallow device driver I/O. */
1852	spin_lock_irq(sch->lock);
1853	ret = cio_disable_subchannel(sch);
1854	spin_unlock_irq(sch->lock);
1855
1856	return ret;
1857}
1858
1859static int ccw_device_pm_thaw(struct device *dev)
1860{
1861	struct ccw_device *cdev = to_ccwdev(dev);
1862	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1863	int ret, cm_enabled;
1864
1865	if (!cdev->online)
1866		return 0;
1867
1868	spin_lock_irq(sch->lock);
1869	/* Allow device driver I/O again. */
1870	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1871	cm_enabled = cdev->private->cmb != NULL;
1872	spin_unlock_irq(sch->lock);
1873	if (ret)
1874		return ret;
1875
1876	if (cm_enabled) {
1877		ret = ccw_set_cmf(cdev, 1);
1878		if (ret)
1879			return ret;
1880	}
1881
1882	if (cdev->drv && cdev->drv->thaw)
1883		ret = cdev->drv->thaw(cdev);
1884
1885	return ret;
1886}
1887
1888static void __ccw_device_pm_restore(struct ccw_device *cdev)
1889{
1890	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1891
1892	spin_lock_irq(sch->lock);
1893	if (cio_is_console(sch->schid)) {
1894		cio_enable_subchannel(sch, (u32)(addr_t)sch);
1895		goto out_unlock;
1896	}
1897	/*
1898	 * While we were sleeping, devices may have gone or become
1899	 * available again. Kick re-detection.
1900	 */
1901	cdev->private->flags.resuming = 1;
1902	cdev->private->path_new_mask = LPM_ANYPATH;
1903	css_sched_sch_todo(sch, SCH_TODO_EVAL);
1904	spin_unlock_irq(sch->lock);
1905	css_wait_for_slow_path();
1906
1907	/* cdev may have been moved to a different subchannel. */
1908	sch = to_subchannel(cdev->dev.parent);
1909	spin_lock_irq(sch->lock);
1910	if (cdev->private->state != DEV_STATE_ONLINE &&
1911	    cdev->private->state != DEV_STATE_OFFLINE)
1912		goto out_unlock;
1913
1914	ccw_device_recognition(cdev);
1915	spin_unlock_irq(sch->lock);
1916	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1917		   cdev->private->state == DEV_STATE_DISCONNECTED);
1918	spin_lock_irq(sch->lock);
1919
1920out_unlock:
1921	cdev->private->flags.resuming = 0;
1922	spin_unlock_irq(sch->lock);
1923}
1924
1925static int resume_handle_boxed(struct ccw_device *cdev)
1926{
1927	cdev->private->state = DEV_STATE_BOXED;
1928	if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1929		return 0;
1930	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1931	return -ENODEV;
1932}
1933
1934static int resume_handle_disc(struct ccw_device *cdev)
1935{
1936	cdev->private->state = DEV_STATE_DISCONNECTED;
1937	if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1938		return 0;
1939	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1940	return -ENODEV;
1941}
1942
1943static int ccw_device_pm_restore(struct device *dev)
1944{
1945	struct ccw_device *cdev = to_ccwdev(dev);
1946	struct subchannel *sch;
1947	int ret = 0;
1948
1949	__ccw_device_pm_restore(cdev);
1950	sch = to_subchannel(cdev->dev.parent);
1951	spin_lock_irq(sch->lock);
1952	if (cio_is_console(sch->schid))
1953		goto out_restore;
1954
1955	/* check recognition results */
1956	switch (cdev->private->state) {
1957	case DEV_STATE_OFFLINE:
1958	case DEV_STATE_ONLINE:
1959		cdev->private->flags.donotify = 0;
1960		break;
1961	case DEV_STATE_BOXED:
1962		ret = resume_handle_boxed(cdev);
1963		if (ret)
1964			goto out_unlock;
1965		goto out_restore;
1966	default:
1967		ret = resume_handle_disc(cdev);
1968		if (ret)
1969			goto out_unlock;
1970		goto out_restore;
1971	}
1972	/* check if the device type has changed */
1973	if (!ccw_device_test_sense_data(cdev)) {
1974		ccw_device_update_sense_data(cdev);
1975		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1976		ret = -ENODEV;
1977		goto out_unlock;
1978	}
1979	if (!cdev->online)
1980		goto out_unlock;
1981
1982	if (ccw_device_online(cdev)) {
1983		ret = resume_handle_disc(cdev);
1984		if (ret)
1985			goto out_unlock;
1986		goto out_restore;
1987	}
1988	spin_unlock_irq(sch->lock);
1989	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1990	spin_lock_irq(sch->lock);
1991
1992	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1993		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1994		ret = -ENODEV;
1995		goto out_unlock;
1996	}
1997
1998	/* reenable cmf, if needed */
1999	if (cdev->private->cmb) {
2000		spin_unlock_irq(sch->lock);
2001		ret = ccw_set_cmf(cdev, 1);
2002		spin_lock_irq(sch->lock);
2003		if (ret) {
2004			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2005				      "(rc=%d)\n", cdev->private->dev_id.ssid,
2006				      cdev->private->dev_id.devno, ret);
2007			ret = 0;
2008		}
2009	}
2010
2011out_restore:
2012	spin_unlock_irq(sch->lock);
2013	if (cdev->online && cdev->drv && cdev->drv->restore)
2014		ret = cdev->drv->restore(cdev);
2015	return ret;
2016
2017out_unlock:
2018	spin_unlock_irq(sch->lock);
2019	return ret;
2020}
2021
2022static const struct dev_pm_ops ccw_pm_ops = {
2023	.prepare = ccw_device_pm_prepare,
2024	.complete = ccw_device_pm_complete,
2025	.freeze = ccw_device_pm_freeze,
2026	.thaw = ccw_device_pm_thaw,
2027	.restore = ccw_device_pm_restore,
2028};
2029
2030static struct bus_type ccw_bus_type = {
2031	.name   = "ccw",
2032	.match  = ccw_bus_match,
2033	.uevent = ccw_uevent,
2034	.probe  = ccw_device_probe,
2035	.remove = ccw_device_remove,
2036	.shutdown = ccw_device_shutdown,
2037	.pm = &ccw_pm_ops,
2038};
2039
2040/**
2041 * ccw_driver_register() - register a ccw driver
2042 * @cdriver: driver to be registered
2043 *
2044 * This function is mainly a wrapper around driver_register().
2045 * Returns:
2046 *   %0 on success and a negative error value on failure.
2047 */
2048int ccw_driver_register(struct ccw_driver *cdriver)
2049{
2050	struct device_driver *drv = &cdriver->driver;
2051
2052	drv->bus = &ccw_bus_type;
2053
2054	return driver_register(drv);
2055}
2056
2057/**
2058 * ccw_driver_unregister() - deregister a ccw driver
2059 * @cdriver: driver to be deregistered
2060 *
2061 * This function is mainly a wrapper around driver_unregister().
2062 */
2063void ccw_driver_unregister(struct ccw_driver *cdriver)
2064{
2065	driver_unregister(&cdriver->driver);
2066}
2067
2068static void ccw_device_todo(struct work_struct *work)
2069{
2070	struct ccw_device_private *priv;
2071	struct ccw_device *cdev;
2072	struct subchannel *sch;
2073	enum cdev_todo todo;
2074
2075	priv = container_of(work, struct ccw_device_private, todo_work);
2076	cdev = priv->cdev;
2077	sch = to_subchannel(cdev->dev.parent);
2078	/* Find out todo. */
2079	spin_lock_irq(cdev->ccwlock);
2080	todo = priv->todo;
2081	priv->todo = CDEV_TODO_NOTHING;
2082	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2083		      priv->dev_id.ssid, priv->dev_id.devno, todo);
2084	spin_unlock_irq(cdev->ccwlock);
2085	/* Perform todo. */
2086	switch (todo) {
2087	case CDEV_TODO_ENABLE_CMF:
2088		cmf_reenable(cdev);
2089		break;
2090	case CDEV_TODO_REBIND:
2091		ccw_device_do_unbind_bind(cdev);
2092		break;
2093	case CDEV_TODO_REGISTER:
2094		io_subchannel_register(cdev);
2095		break;
2096	case CDEV_TODO_UNREG_EVAL:
2097		if (!sch_is_pseudo_sch(sch))
2098			css_schedule_eval(sch->schid);
2099		fallthrough;
2100	case CDEV_TODO_UNREG:
2101		if (sch_is_pseudo_sch(sch))
2102			ccw_device_unregister(cdev);
2103		else
2104			ccw_device_call_sch_unregister(cdev);
2105		break;
2106	default:
2107		break;
2108	}
2109	/* Release workqueue ref. */
2110	put_device(&cdev->dev);
2111}
2112
2113/**
2114 * ccw_device_sched_todo - schedule ccw device operation
2115 * @cdev: ccw device
2116 * @todo: todo
2117 *
2118 * Schedule the operation identified by @todo to be performed on the slow path
2119 * workqueue. Do nothing if another operation with higher priority is already
2120 * scheduled. Needs to be called with ccwdev lock held.
2121 */
2122void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2123{
2124	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2125		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2126		      todo);
2127	if (cdev->private->todo >= todo)
2128		return;
2129	cdev->private->todo = todo;
2130	/* Get workqueue ref. */
2131	if (!get_device(&cdev->dev))
2132		return;
2133	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2134		/* Already queued, release workqueue ref. */
2135		put_device(&cdev->dev);
2136	}
2137}
2138
2139/**
2140 * ccw_device_siosl() - initiate logging
2141 * @cdev: ccw device
2142 *
2143 * This function is used to invoke model-dependent logging within the channel
2144 * subsystem.
2145 */
2146int ccw_device_siosl(struct ccw_device *cdev)
2147{
2148	struct subchannel *sch = to_subchannel(cdev->dev.parent);
2149
2150	return chsc_siosl(sch->schid);
2151}
2152EXPORT_SYMBOL_GPL(ccw_device_siosl);
2153
2154EXPORT_SYMBOL(ccw_device_set_online);
2155EXPORT_SYMBOL(ccw_device_set_offline);
2156EXPORT_SYMBOL(ccw_driver_register);
2157EXPORT_SYMBOL(ccw_driver_unregister);
2158EXPORT_SYMBOL(get_ccwdev_by_busid);
v5.4
   1// SPDX-License-Identifier: GPL-1.0+
   2/*
   3 *  bus driver for ccw devices
   4 *
   5 *    Copyright IBM Corp. 2002, 2008
   6 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
   7 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
   8 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/spinlock.h>
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/slab.h>
  20#include <linux/list.h>
  21#include <linux/device.h>
  22#include <linux/workqueue.h>
  23#include <linux/delay.h>
  24#include <linux/timer.h>
  25#include <linux/kernel_stat.h>
  26#include <linux/sched/signal.h>
  27#include <linux/dma-mapping.h>
  28
  29#include <asm/ccwdev.h>
  30#include <asm/cio.h>
  31#include <asm/param.h>		/* HZ */
  32#include <asm/cmb.h>
  33#include <asm/isc.h>
  34
  35#include "chp.h"
  36#include "cio.h"
  37#include "cio_debug.h"
  38#include "css.h"
  39#include "device.h"
  40#include "ioasm.h"
  41#include "io_sch.h"
  42#include "blacklist.h"
  43#include "chsc.h"
  44
  45static struct timer_list recovery_timer;
  46static DEFINE_SPINLOCK(recovery_lock);
  47static int recovery_phase;
  48static const unsigned long recovery_delay[] = { 3, 30, 300 };
  49
  50static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
  51static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
  52static struct bus_type ccw_bus_type;
  53
  54/******************* bus type handling ***********************/
  55
  56/* The Linux driver model distinguishes between a bus type and
  57 * the bus itself. Of course we only have one channel
  58 * subsystem driver and one channel system per machine, but
  59 * we still use the abstraction. T.R. says it's a good idea. */
  60static int
  61ccw_bus_match (struct device * dev, struct device_driver * drv)
  62{
  63	struct ccw_device *cdev = to_ccwdev(dev);
  64	struct ccw_driver *cdrv = to_ccwdrv(drv);
  65	const struct ccw_device_id *ids = cdrv->ids, *found;
  66
  67	if (!ids)
  68		return 0;
  69
  70	found = ccw_device_id_match(ids, &cdev->id);
  71	if (!found)
  72		return 0;
  73
  74	cdev->id.driver_info = found->driver_info;
  75
  76	return 1;
  77}
  78
  79/* Store modalias string delimited by prefix/suffix string into buffer with
  80 * specified size. Return length of resulting string (excluding trailing '\0')
  81 * even if string doesn't fit buffer (snprintf semantics). */
  82static int snprint_alias(char *buf, size_t size,
  83			 struct ccw_device_id *id, const char *suffix)
  84{
  85	int len;
  86
  87	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
  88	if (len > size)
  89		return len;
  90	buf += len;
  91	size -= len;
  92
  93	if (id->dev_type != 0)
  94		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
  95				id->dev_model, suffix);
  96	else
  97		len += snprintf(buf, size, "dtdm%s", suffix);
  98
  99	return len;
 100}
 101
 102/* Set up environment variables for ccw device uevent. Return 0 on success,
 103 * non-zero otherwise. */
 104static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
 105{
 106	struct ccw_device *cdev = to_ccwdev(dev);
 107	struct ccw_device_id *id = &(cdev->id);
 108	int ret;
 109	char modalias_buf[30];
 110
 111	/* CU_TYPE= */
 112	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
 113	if (ret)
 114		return ret;
 115
 116	/* CU_MODEL= */
 117	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
 118	if (ret)
 119		return ret;
 120
 121	/* The next two can be zero, that's ok for us */
 122	/* DEV_TYPE= */
 123	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
 124	if (ret)
 125		return ret;
 126
 127	/* DEV_MODEL= */
 128	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
 129	if (ret)
 130		return ret;
 131
 132	/* MODALIAS=  */
 133	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
 134	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
 135	return ret;
 136}
 137
 138static void io_subchannel_irq(struct subchannel *);
 139static int io_subchannel_probe(struct subchannel *);
 140static int io_subchannel_remove(struct subchannel *);
 141static void io_subchannel_shutdown(struct subchannel *);
 142static int io_subchannel_sch_event(struct subchannel *, int);
 143static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
 144				   int);
 145static void recovery_func(struct timer_list *unused);
 146
 147static struct css_device_id io_subchannel_ids[] = {
 148	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 149	{ /* end of list */ },
 150};
 151
 152static int io_subchannel_prepare(struct subchannel *sch)
 153{
 154	struct ccw_device *cdev;
 155	/*
 156	 * Don't allow suspend while a ccw device registration
 157	 * is still outstanding.
 158	 */
 159	cdev = sch_get_cdev(sch);
 160	if (cdev && !device_is_registered(&cdev->dev))
 161		return -EAGAIN;
 162	return 0;
 163}
 164
 165static int io_subchannel_settle(void)
 166{
 167	int ret;
 168
 169	ret = wait_event_interruptible(ccw_device_init_wq,
 170				atomic_read(&ccw_device_init_count) == 0);
 171	if (ret)
 172		return -EINTR;
 173	flush_workqueue(cio_work_q);
 174	return 0;
 175}
 176
 177static struct css_driver io_subchannel_driver = {
 178	.drv = {
 179		.owner = THIS_MODULE,
 180		.name = "io_subchannel",
 181	},
 182	.subchannel_type = io_subchannel_ids,
 183	.irq = io_subchannel_irq,
 184	.sch_event = io_subchannel_sch_event,
 185	.chp_event = io_subchannel_chp_event,
 186	.probe = io_subchannel_probe,
 187	.remove = io_subchannel_remove,
 188	.shutdown = io_subchannel_shutdown,
 189	.prepare = io_subchannel_prepare,
 190	.settle = io_subchannel_settle,
 191};
 192
 193int __init io_subchannel_init(void)
 194{
 195	int ret;
 196
 197	timer_setup(&recovery_timer, recovery_func, 0);
 198	ret = bus_register(&ccw_bus_type);
 199	if (ret)
 200		return ret;
 201	ret = css_driver_register(&io_subchannel_driver);
 202	if (ret)
 203		bus_unregister(&ccw_bus_type);
 204
 205	return ret;
 206}
 207
 208
 209/************************ device handling **************************/
 210
 211static ssize_t
 212devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
 213{
 214	struct ccw_device *cdev = to_ccwdev(dev);
 215	struct ccw_device_id *id = &(cdev->id);
 216
 217	if (id->dev_type != 0)
 218		return sprintf(buf, "%04x/%02x\n",
 219				id->dev_type, id->dev_model);
 220	else
 221		return sprintf(buf, "n/a\n");
 222}
 223
 224static ssize_t
 225cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
 226{
 227	struct ccw_device *cdev = to_ccwdev(dev);
 228	struct ccw_device_id *id = &(cdev->id);
 229
 230	return sprintf(buf, "%04x/%02x\n",
 231		       id->cu_type, id->cu_model);
 232}
 233
 234static ssize_t
 235modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
 236{
 237	struct ccw_device *cdev = to_ccwdev(dev);
 238	struct ccw_device_id *id = &(cdev->id);
 239	int len;
 240
 241	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
 242
 243	return len > PAGE_SIZE ? PAGE_SIZE : len;
 244}
 245
 246static ssize_t
 247online_show (struct device *dev, struct device_attribute *attr, char *buf)
 248{
 249	struct ccw_device *cdev = to_ccwdev(dev);
 250
 251	return sprintf(buf, cdev->online ? "1\n" : "0\n");
 252}
 253
 254int ccw_device_is_orphan(struct ccw_device *cdev)
 255{
 256	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
 257}
 258
 259static void ccw_device_unregister(struct ccw_device *cdev)
 260{
 261	if (device_is_registered(&cdev->dev)) {
 262		/* Undo device_add(). */
 263		device_del(&cdev->dev);
 264	}
 265	if (cdev->private->flags.initialized) {
 266		cdev->private->flags.initialized = 0;
 267		/* Release reference from device_initialize(). */
 268		put_device(&cdev->dev);
 269	}
 270}
 271
 272static void io_subchannel_quiesce(struct subchannel *);
 273
 274/**
 275 * ccw_device_set_offline() - disable a ccw device for I/O
 276 * @cdev: target ccw device
 277 *
 278 * This function calls the driver's set_offline() function for @cdev, if
 279 * given, and then disables @cdev.
 280 * Returns:
 281 *   %0 on success and a negative error value on failure.
 282 * Context:
 283 *  enabled, ccw device lock not held
 284 */
 285int ccw_device_set_offline(struct ccw_device *cdev)
 286{
 287	struct subchannel *sch;
 288	int ret, state;
 289
 290	if (!cdev)
 291		return -ENODEV;
 292	if (!cdev->online || !cdev->drv)
 293		return -EINVAL;
 294
 295	if (cdev->drv->set_offline) {
 296		ret = cdev->drv->set_offline(cdev);
 297		if (ret != 0)
 298			return ret;
 299	}
 300	spin_lock_irq(cdev->ccwlock);
 301	sch = to_subchannel(cdev->dev.parent);
 302	cdev->online = 0;
 303	/* Wait until a final state or DISCONNECTED is reached */
 304	while (!dev_fsm_final_state(cdev) &&
 305	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 306		spin_unlock_irq(cdev->ccwlock);
 307		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 308			   cdev->private->state == DEV_STATE_DISCONNECTED));
 309		spin_lock_irq(cdev->ccwlock);
 310	}
 311	do {
 312		ret = ccw_device_offline(cdev);
 313		if (!ret)
 314			break;
 315		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
 316			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
 317			      cdev->private->dev_id.devno);
 318		if (ret != -EBUSY)
 319			goto error;
 320		state = cdev->private->state;
 321		spin_unlock_irq(cdev->ccwlock);
 322		io_subchannel_quiesce(sch);
 323		spin_lock_irq(cdev->ccwlock);
 324		cdev->private->state = state;
 325	} while (ret == -EBUSY);
 326	spin_unlock_irq(cdev->ccwlock);
 327	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 328		   cdev->private->state == DEV_STATE_DISCONNECTED));
 329	/* Inform the user if set offline failed. */
 330	if (cdev->private->state == DEV_STATE_BOXED) {
 331		pr_warn("%s: The device entered boxed state while being set offline\n",
 332			dev_name(&cdev->dev));
 333	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 334		pr_warn("%s: The device stopped operating while being set offline\n",
 335			dev_name(&cdev->dev));
 336	}
 337	/* Give up reference from ccw_device_set_online(). */
 338	put_device(&cdev->dev);
 339	return 0;
 340
 341error:
 342	cdev->private->state = DEV_STATE_OFFLINE;
 343	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
 344	spin_unlock_irq(cdev->ccwlock);
 345	/* Give up reference from ccw_device_set_online(). */
 346	put_device(&cdev->dev);
 347	return -ENODEV;
 348}
 349
 350/**
 351 * ccw_device_set_online() - enable a ccw device for I/O
 352 * @cdev: target ccw device
 353 *
 354 * This function first enables @cdev and then calls the driver's set_online()
 355 * function for @cdev, if given. If set_online() returns an error, @cdev is
 356 * disabled again.
 357 * Returns:
 358 *   %0 on success and a negative error value on failure.
 359 * Context:
 360 *  enabled, ccw device lock not held
 361 */
 362int ccw_device_set_online(struct ccw_device *cdev)
 363{
 364	int ret;
 365	int ret2;
 366
 367	if (!cdev)
 368		return -ENODEV;
 369	if (cdev->online || !cdev->drv)
 370		return -EINVAL;
 371	/* Hold on to an extra reference while device is online. */
 372	if (!get_device(&cdev->dev))
 373		return -ENODEV;
 374
 375	spin_lock_irq(cdev->ccwlock);
 376	ret = ccw_device_online(cdev);
 377	spin_unlock_irq(cdev->ccwlock);
 378	if (ret == 0)
 379		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
 380	else {
 381		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
 382			      "device 0.%x.%04x\n",
 383			      ret, cdev->private->dev_id.ssid,
 384			      cdev->private->dev_id.devno);
 385		/* Give up online reference since onlining failed. */
 386		put_device(&cdev->dev);
 387		return ret;
 388	}
 389	spin_lock_irq(cdev->ccwlock);
 390	/* Check if online processing was successful */
 391	if ((cdev->private->state != DEV_STATE_ONLINE) &&
 392	    (cdev->private->state != DEV_STATE_W4SENSE)) {
 393		spin_unlock_irq(cdev->ccwlock);
 394		/* Inform the user that set online failed. */
 395		if (cdev->private->state == DEV_STATE_BOXED) {
 396			pr_warn("%s: Setting the device online failed because it is boxed\n",
 397				dev_name(&cdev->dev));
 398		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
 399			pr_warn("%s: Setting the device online failed because it is not operational\n",
 400				dev_name(&cdev->dev));
 401		}
 402		/* Give up online reference since onlining failed. */
 403		put_device(&cdev->dev);
 404		return -ENODEV;
 405	}
 406	spin_unlock_irq(cdev->ccwlock);
 407	if (cdev->drv->set_online)
 408		ret = cdev->drv->set_online(cdev);
 409	if (ret)
 410		goto rollback;
 411
 412	spin_lock_irq(cdev->ccwlock);
 413	cdev->online = 1;
 414	spin_unlock_irq(cdev->ccwlock);
 415	return 0;
 416
 417rollback:
 418	spin_lock_irq(cdev->ccwlock);
 419	/* Wait until a final state or DISCONNECTED is reached */
 420	while (!dev_fsm_final_state(cdev) &&
 421	       cdev->private->state != DEV_STATE_DISCONNECTED) {
 422		spin_unlock_irq(cdev->ccwlock);
 423		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 424			   cdev->private->state == DEV_STATE_DISCONNECTED));
 425		spin_lock_irq(cdev->ccwlock);
 426	}
 427	ret2 = ccw_device_offline(cdev);
 428	if (ret2)
 429		goto error;
 430	spin_unlock_irq(cdev->ccwlock);
 431	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
 432		   cdev->private->state == DEV_STATE_DISCONNECTED));
 433	/* Give up online reference since onlining failed. */
 434	put_device(&cdev->dev);
 435	return ret;
 436
 437error:
 438	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
 439		      "device 0.%x.%04x\n",
 440		      ret2, cdev->private->dev_id.ssid,
 441		      cdev->private->dev_id.devno);
 442	cdev->private->state = DEV_STATE_OFFLINE;
 443	spin_unlock_irq(cdev->ccwlock);
 444	/* Give up online reference since onlining failed. */
 445	put_device(&cdev->dev);
 446	return ret;
 447}
 448
 449static int online_store_handle_offline(struct ccw_device *cdev)
 450{
 451	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
 452		spin_lock_irq(cdev->ccwlock);
 453		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
 454		spin_unlock_irq(cdev->ccwlock);
 455		return 0;
 456	}
 457	if (cdev->drv && cdev->drv->set_offline)
 458		return ccw_device_set_offline(cdev);
 459	return -EINVAL;
 460}
 461
 462static int online_store_recog_and_online(struct ccw_device *cdev)
 463{
 464	/* Do device recognition, if needed. */
 465	if (cdev->private->state == DEV_STATE_BOXED) {
 466		spin_lock_irq(cdev->ccwlock);
 467		ccw_device_recognition(cdev);
 468		spin_unlock_irq(cdev->ccwlock);
 469		wait_event(cdev->private->wait_q,
 470			   cdev->private->flags.recog_done);
 471		if (cdev->private->state != DEV_STATE_OFFLINE)
 472			/* recognition failed */
 473			return -EAGAIN;
 474	}
 475	if (cdev->drv && cdev->drv->set_online)
 476		return ccw_device_set_online(cdev);
 477	return -EINVAL;
 478}
 479
 480static int online_store_handle_online(struct ccw_device *cdev, int force)
 481{
 482	int ret;
 483
 484	ret = online_store_recog_and_online(cdev);
 485	if (ret && !force)
 486		return ret;
 487	if (force && cdev->private->state == DEV_STATE_BOXED) {
 488		ret = ccw_device_stlck(cdev);
 489		if (ret)
 490			return ret;
 491		if (cdev->id.cu_type == 0)
 492			cdev->private->state = DEV_STATE_NOT_OPER;
 493		ret = online_store_recog_and_online(cdev);
 494		if (ret)
 495			return ret;
 496	}
 497	return 0;
 498}
 499
 500static ssize_t online_store (struct device *dev, struct device_attribute *attr,
 501			     const char *buf, size_t count)
 502{
 503	struct ccw_device *cdev = to_ccwdev(dev);
 504	int force, ret;
 505	unsigned long i;
 506
 507	/* Prevent conflict between multiple on-/offline processing requests. */
 508	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
 509		return -EAGAIN;
 510	/* Prevent conflict between internal I/Os and on-/offline processing. */
 511	if (!dev_fsm_final_state(cdev) &&
 512	    cdev->private->state != DEV_STATE_DISCONNECTED) {
 513		ret = -EAGAIN;
 514		goto out;
 515	}
 516	/* Prevent conflict between pending work and on-/offline processing.*/
 517	if (work_pending(&cdev->private->todo_work)) {
 518		ret = -EAGAIN;
 519		goto out;
 520	}
 521	if (!strncmp(buf, "force\n", count)) {
 522		force = 1;
 523		i = 1;
 524		ret = 0;
 525	} else {
 526		force = 0;
 527		ret = kstrtoul(buf, 16, &i);
 528	}
 529	if (ret)
 530		goto out;
 531
 532	device_lock(dev);
 533	switch (i) {
 534	case 0:
 535		ret = online_store_handle_offline(cdev);
 536		break;
 537	case 1:
 538		ret = online_store_handle_online(cdev, force);
 539		break;
 540	default:
 541		ret = -EINVAL;
 542	}
 543	device_unlock(dev);
 544
 545out:
 546	atomic_set(&cdev->private->onoff, 0);
 547	return (ret < 0) ? ret : count;
 548}
 549
 550static ssize_t
 551available_show (struct device *dev, struct device_attribute *attr, char *buf)
 552{
 553	struct ccw_device *cdev = to_ccwdev(dev);
 554	struct subchannel *sch;
 555
 556	if (ccw_device_is_orphan(cdev))
 557		return sprintf(buf, "no device\n");
 558	switch (cdev->private->state) {
 559	case DEV_STATE_BOXED:
 560		return sprintf(buf, "boxed\n");
 561	case DEV_STATE_DISCONNECTED:
 562	case DEV_STATE_DISCONNECTED_SENSE_ID:
 563	case DEV_STATE_NOT_OPER:
 564		sch = to_subchannel(dev->parent);
 565		if (!sch->lpm)
 566			return sprintf(buf, "no path\n");
 567		else
 568			return sprintf(buf, "no device\n");
 569	default:
 570		/* All other states considered fine. */
 571		return sprintf(buf, "good\n");
 572	}
 573}
 574
 575static ssize_t
 576initiate_logging(struct device *dev, struct device_attribute *attr,
 577		 const char *buf, size_t count)
 578{
 579	struct subchannel *sch = to_subchannel(dev);
 580	int rc;
 581
 582	rc = chsc_siosl(sch->schid);
 583	if (rc < 0) {
 584		pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
 585			sch->schid.ssid, sch->schid.sch_no, rc);
 586		return rc;
 587	}
 588	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
 589		  sch->schid.ssid, sch->schid.sch_no);
 590	return count;
 591}
 592
 593static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
 594			char *buf)
 595{
 596	struct subchannel *sch = to_subchannel(dev);
 597
 598	return sprintf(buf, "%02x\n", sch->vpm);
 599}
 600
 601static DEVICE_ATTR_RO(devtype);
 602static DEVICE_ATTR_RO(cutype);
 603static DEVICE_ATTR_RO(modalias);
 604static DEVICE_ATTR_RW(online);
 605static DEVICE_ATTR(availability, 0444, available_show, NULL);
 606static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
 607static DEVICE_ATTR_RO(vpm);
 608
 609static struct attribute *io_subchannel_attrs[] = {
 610	&dev_attr_logging.attr,
 611	&dev_attr_vpm.attr,
 612	NULL,
 613};
 614
 615static const struct attribute_group io_subchannel_attr_group = {
 616	.attrs = io_subchannel_attrs,
 617};
 618
 619static struct attribute * ccwdev_attrs[] = {
 620	&dev_attr_devtype.attr,
 621	&dev_attr_cutype.attr,
 622	&dev_attr_modalias.attr,
 623	&dev_attr_online.attr,
 624	&dev_attr_cmb_enable.attr,
 625	&dev_attr_availability.attr,
 626	NULL,
 627};
 628
 629static const struct attribute_group ccwdev_attr_group = {
 630	.attrs = ccwdev_attrs,
 631};
 632
 633static const struct attribute_group *ccwdev_attr_groups[] = {
 634	&ccwdev_attr_group,
 635	NULL,
 636};
 637
 638static int ccw_device_add(struct ccw_device *cdev)
 639{
 640	struct device *dev = &cdev->dev;
 641
 642	dev->bus = &ccw_bus_type;
 643	return device_add(dev);
 644}
 645
 646static int match_dev_id(struct device *dev, const void *data)
 647{
 648	struct ccw_device *cdev = to_ccwdev(dev);
 649	struct ccw_dev_id *dev_id = (void *)data;
 650
 651	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
 652}
 653
 654/**
 655 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
 656 * @dev_id: id of the device to be searched
 657 *
 658 * This function searches all devices attached to the ccw bus for a device
 659 * matching @dev_id.
 660 * Returns:
 661 *  If a device is found its reference count is increased and returned;
 662 *  else %NULL is returned.
 663 */
 664struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
 665{
 666	struct device *dev;
 667
 668	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
 669
 670	return dev ? to_ccwdev(dev) : NULL;
 671}
 672EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
 673
 674static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
 675{
 676	int ret;
 677
 678	if (device_is_registered(&cdev->dev)) {
 679		device_release_driver(&cdev->dev);
 680		ret = device_attach(&cdev->dev);
 681		WARN_ON(ret == -ENODEV);
 682	}
 683}
 684
 685static void
 686ccw_device_release(struct device *dev)
 687{
 688	struct ccw_device *cdev;
 689
 690	cdev = to_ccwdev(dev);
 691	cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
 692			sizeof(*cdev->private->dma_area));
 693	cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
 694	/* Release reference of parent subchannel. */
 695	put_device(cdev->dev.parent);
 696	kfree(cdev->private);
 697	kfree(cdev);
 698}
 699
 700static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
 701{
 702	struct ccw_device *cdev;
 703	struct gen_pool *dma_pool;
 704
 705	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
 706	if (!cdev)
 707		goto err_cdev;
 708	cdev->private = kzalloc(sizeof(struct ccw_device_private),
 709				GFP_KERNEL | GFP_DMA);
 710	if (!cdev->private)
 711		goto err_priv;
 712	cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
 713	cdev->dev.dma_mask = sch->dev.dma_mask;
 714	dma_pool = cio_gp_dma_create(&cdev->dev, 1);
 715	if (!dma_pool)
 716		goto err_dma_pool;
 717	cdev->private->dma_pool = dma_pool;
 718	cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
 719					sizeof(*cdev->private->dma_area));
 720	if (!cdev->private->dma_area)
 721		goto err_dma_area;
 722	return cdev;
 723err_dma_area:
 724	cio_gp_dma_destroy(dma_pool, &cdev->dev);
 725err_dma_pool:
 726	kfree(cdev->private);
 727err_priv:
 728	kfree(cdev);
 729err_cdev:
 730	return ERR_PTR(-ENOMEM);
 731}
 732
 733static void ccw_device_todo(struct work_struct *work);
 734
 735static int io_subchannel_initialize_dev(struct subchannel *sch,
 736					struct ccw_device *cdev)
 737{
 738	struct ccw_device_private *priv = cdev->private;
 739	int ret;
 740
 741	priv->cdev = cdev;
 742	priv->int_class = IRQIO_CIO;
 743	priv->state = DEV_STATE_NOT_OPER;
 744	priv->dev_id.devno = sch->schib.pmcw.dev;
 745	priv->dev_id.ssid = sch->schid.ssid;
 746
 747	INIT_WORK(&priv->todo_work, ccw_device_todo);
 748	INIT_LIST_HEAD(&priv->cmb_list);
 749	init_waitqueue_head(&priv->wait_q);
 750	timer_setup(&priv->timer, ccw_device_timeout, 0);
 751
 752	atomic_set(&priv->onoff, 0);
 753	cdev->ccwlock = sch->lock;
 754	cdev->dev.parent = &sch->dev;
 755	cdev->dev.release = ccw_device_release;
 756	cdev->dev.groups = ccwdev_attr_groups;
 757	/* Do first half of device_register. */
 758	device_initialize(&cdev->dev);
 759	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
 760			   cdev->private->dev_id.devno);
 761	if (ret)
 762		goto out_put;
 763	if (!get_device(&sch->dev)) {
 764		ret = -ENODEV;
 765		goto out_put;
 766	}
 767	priv->flags.initialized = 1;
 768	spin_lock_irq(sch->lock);
 769	sch_set_cdev(sch, cdev);
 770	spin_unlock_irq(sch->lock);
 771	return 0;
 772
 773out_put:
 774	/* Release reference from device_initialize(). */
 775	put_device(&cdev->dev);
 776	return ret;
 777}
 778
 779static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
 780{
 781	struct ccw_device *cdev;
 782	int ret;
 783
 784	cdev = io_subchannel_allocate_dev(sch);
 785	if (!IS_ERR(cdev)) {
 786		ret = io_subchannel_initialize_dev(sch, cdev);
 787		if (ret)
 788			cdev = ERR_PTR(ret);
 789	}
 790	return cdev;
 791}
 792
 793static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
 794
 795static void sch_create_and_recog_new_device(struct subchannel *sch)
 796{
 797	struct ccw_device *cdev;
 798
 799	/* Need to allocate a new ccw device. */
 800	cdev = io_subchannel_create_ccwdev(sch);
 801	if (IS_ERR(cdev)) {
 802		/* OK, we did everything we could... */
 803		css_sch_device_unregister(sch);
 804		return;
 805	}
 806	/* Start recognition for the new ccw device. */
 807	io_subchannel_recog(cdev, sch);
 808}
 809
 810/*
 811 * Register recognized device.
 812 */
 813static void io_subchannel_register(struct ccw_device *cdev)
 814{
 815	struct subchannel *sch;
 816	int ret, adjust_init_count = 1;
 817	unsigned long flags;
 818
 819	sch = to_subchannel(cdev->dev.parent);
 820	/*
 821	 * Check if subchannel is still registered. It may have become
 822	 * unregistered if a machine check hit us after finishing
 823	 * device recognition but before the register work could be
 824	 * queued.
 825	 */
 826	if (!device_is_registered(&sch->dev))
 827		goto out_err;
 828	css_update_ssd_info(sch);
 829	/*
 830	 * io_subchannel_register() will also be called after device
 831	 * recognition has been done for a boxed device (which will already
 832	 * be registered). We need to reprobe since we may now have sense id
 833	 * information.
 834	 */
 835	if (device_is_registered(&cdev->dev)) {
 836		if (!cdev->drv) {
 837			ret = device_reprobe(&cdev->dev);
 838			if (ret)
 839				/* We can't do much here. */
 840				CIO_MSG_EVENT(0, "device_reprobe() returned"
 841					      " %d for 0.%x.%04x\n", ret,
 842					      cdev->private->dev_id.ssid,
 843					      cdev->private->dev_id.devno);
 844		}
 845		adjust_init_count = 0;
 846		goto out;
 847	}
 848	/*
 849	 * Now we know this subchannel will stay, we can throw
 850	 * our delayed uevent.
 851	 */
 852	dev_set_uevent_suppress(&sch->dev, 0);
 853	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 
 
 854	/* make it known to the system */
 855	ret = ccw_device_add(cdev);
 856	if (ret) {
 857		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
 858			      cdev->private->dev_id.ssid,
 859			      cdev->private->dev_id.devno, ret);
 860		spin_lock_irqsave(sch->lock, flags);
 861		sch_set_cdev(sch, NULL);
 862		spin_unlock_irqrestore(sch->lock, flags);
 863		/* Release initial device reference. */
 864		put_device(&cdev->dev);
 865		goto out_err;
 866	}
 867out:
 868	cdev->private->flags.recog_done = 1;
 869	wake_up(&cdev->private->wait_q);
 870out_err:
 871	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
 872		wake_up(&ccw_device_init_wq);
 873}
 874
 875static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
 876{
 877	struct subchannel *sch;
 878
 879	/* Get subchannel reference for local processing. */
 880	if (!get_device(cdev->dev.parent))
 881		return;
 882	sch = to_subchannel(cdev->dev.parent);
 883	css_sch_device_unregister(sch);
 884	/* Release subchannel reference for local processing. */
 885	put_device(&sch->dev);
 886}
 887
 888/*
 889 * subchannel recognition done. Called from the state machine.
 890 */
 891void
 892io_subchannel_recog_done(struct ccw_device *cdev)
 893{
 894	if (css_init_done == 0) {
 895		cdev->private->flags.recog_done = 1;
 896		return;
 897	}
 898	switch (cdev->private->state) {
 899	case DEV_STATE_BOXED:
 900		/* Device did not respond in time. */
 901	case DEV_STATE_NOT_OPER:
 902		cdev->private->flags.recog_done = 1;
 903		/* Remove device found not operational. */
 904		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
 905		if (atomic_dec_and_test(&ccw_device_init_count))
 906			wake_up(&ccw_device_init_wq);
 907		break;
 908	case DEV_STATE_OFFLINE:
 909		/*
 910		 * We can't register the device in interrupt context so
 911		 * we schedule a work item.
 912		 */
 913		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
 914		break;
 915	}
 916}
 917
 918static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 919{
 920	/* Increase counter of devices currently in recognition. */
 921	atomic_inc(&ccw_device_init_count);
 922
 923	/* Start async. device sensing. */
 924	spin_lock_irq(sch->lock);
 925	ccw_device_recognition(cdev);
 926	spin_unlock_irq(sch->lock);
 927}
 928
 929static int ccw_device_move_to_sch(struct ccw_device *cdev,
 930				  struct subchannel *sch)
 931{
 932	struct subchannel *old_sch;
 933	int rc, old_enabled = 0;
 934
 935	old_sch = to_subchannel(cdev->dev.parent);
 936	/* Obtain child reference for new parent. */
 937	if (!get_device(&sch->dev))
 938		return -ENODEV;
 939
 940	if (!sch_is_pseudo_sch(old_sch)) {
 941		spin_lock_irq(old_sch->lock);
 942		old_enabled = old_sch->schib.pmcw.ena;
 943		rc = 0;
 944		if (old_enabled)
 945			rc = cio_disable_subchannel(old_sch);
 946		spin_unlock_irq(old_sch->lock);
 947		if (rc == -EBUSY) {
 948			/* Release child reference for new parent. */
 949			put_device(&sch->dev);
 950			return rc;
 951		}
 952	}
 953
 954	mutex_lock(&sch->reg_mutex);
 955	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
 956	mutex_unlock(&sch->reg_mutex);
 957	if (rc) {
 958		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
 959			      cdev->private->dev_id.ssid,
 960			      cdev->private->dev_id.devno, sch->schid.ssid,
 961			      sch->schib.pmcw.dev, rc);
 962		if (old_enabled) {
 963			/* Try to reenable the old subchannel. */
 964			spin_lock_irq(old_sch->lock);
 965			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
 966			spin_unlock_irq(old_sch->lock);
 967		}
 968		/* Release child reference for new parent. */
 969		put_device(&sch->dev);
 970		return rc;
 971	}
 972	/* Clean up old subchannel. */
 973	if (!sch_is_pseudo_sch(old_sch)) {
 974		spin_lock_irq(old_sch->lock);
 975		sch_set_cdev(old_sch, NULL);
 976		spin_unlock_irq(old_sch->lock);
 977		css_schedule_eval(old_sch->schid);
 978	}
 979	/* Release child reference for old parent. */
 980	put_device(&old_sch->dev);
 981	/* Initialize new subchannel. */
 982	spin_lock_irq(sch->lock);
 983	cdev->ccwlock = sch->lock;
 984	if (!sch_is_pseudo_sch(sch))
 985		sch_set_cdev(sch, cdev);
 986	spin_unlock_irq(sch->lock);
 987	if (!sch_is_pseudo_sch(sch))
 988		css_update_ssd_info(sch);
 989	return 0;
 990}
 991
 992static int ccw_device_move_to_orph(struct ccw_device *cdev)
 993{
 994	struct subchannel *sch = to_subchannel(cdev->dev.parent);
 995	struct channel_subsystem *css = to_css(sch->dev.parent);
 996
 997	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
 998}
 999
1000static void io_subchannel_irq(struct subchannel *sch)
1001{
1002	struct ccw_device *cdev;
1003
1004	cdev = sch_get_cdev(sch);
1005
1006	CIO_TRACE_EVENT(6, "IRQ");
1007	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1008	if (cdev)
1009		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1010	else
1011		inc_irq_stat(IRQIO_CIO);
1012}
1013
1014void io_subchannel_init_config(struct subchannel *sch)
1015{
1016	memset(&sch->config, 0, sizeof(sch->config));
1017	sch->config.csense = 1;
1018}
1019
1020static void io_subchannel_init_fields(struct subchannel *sch)
1021{
1022	if (cio_is_console(sch->schid))
1023		sch->opm = 0xff;
1024	else
1025		sch->opm = chp_get_sch_opm(sch);
1026	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1027	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1028
1029	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1030		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1031		      sch->schib.pmcw.dev, sch->schid.ssid,
1032		      sch->schid.sch_no, sch->schib.pmcw.pim,
1033		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1034
1035	io_subchannel_init_config(sch);
1036}
1037
1038/*
1039 * Note: We always return 0 so that we bind to the device even on error.
1040 * This is needed so that our remove function is called on unregister.
1041 */
1042static int io_subchannel_probe(struct subchannel *sch)
1043{
1044	struct io_subchannel_private *io_priv;
1045	struct ccw_device *cdev;
1046	int rc;
1047
1048	if (cio_is_console(sch->schid)) {
1049		rc = sysfs_create_group(&sch->dev.kobj,
1050					&io_subchannel_attr_group);
1051		if (rc)
1052			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1053				      "attributes for subchannel "
1054				      "0.%x.%04x (rc=%d)\n",
1055				      sch->schid.ssid, sch->schid.sch_no, rc);
1056		/*
1057		 * The console subchannel already has an associated ccw_device.
1058		 * Throw the delayed uevent for the subchannel, register
1059		 * the ccw_device and exit.
1060		 */
1061		dev_set_uevent_suppress(&sch->dev, 0);
1062		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 
 
 
1063		cdev = sch_get_cdev(sch);
1064		rc = ccw_device_add(cdev);
1065		if (rc) {
1066			/* Release online reference. */
1067			put_device(&cdev->dev);
1068			goto out_schedule;
1069		}
1070		if (atomic_dec_and_test(&ccw_device_init_count))
1071			wake_up(&ccw_device_init_wq);
1072		return 0;
1073	}
1074	io_subchannel_init_fields(sch);
1075	rc = cio_commit_config(sch);
1076	if (rc)
1077		goto out_schedule;
1078	rc = sysfs_create_group(&sch->dev.kobj,
1079				&io_subchannel_attr_group);
1080	if (rc)
1081		goto out_schedule;
1082	/* Allocate I/O subchannel private data. */
1083	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1084	if (!io_priv)
1085		goto out_schedule;
1086
1087	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1088				sizeof(*io_priv->dma_area),
1089				&io_priv->dma_area_dma, GFP_KERNEL);
1090	if (!io_priv->dma_area) {
1091		kfree(io_priv);
1092		goto out_schedule;
1093	}
1094
1095	set_io_private(sch, io_priv);
1096	css_schedule_eval(sch->schid);
1097	return 0;
1098
1099out_schedule:
1100	spin_lock_irq(sch->lock);
1101	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1102	spin_unlock_irq(sch->lock);
1103	return 0;
1104}
1105
1106static int io_subchannel_remove(struct subchannel *sch)
1107{
1108	struct io_subchannel_private *io_priv = to_io_private(sch);
1109	struct ccw_device *cdev;
1110
1111	cdev = sch_get_cdev(sch);
1112	if (!cdev)
1113		goto out_free;
1114
1115	ccw_device_unregister(cdev);
1116	spin_lock_irq(sch->lock);
1117	sch_set_cdev(sch, NULL);
1118	set_io_private(sch, NULL);
1119	spin_unlock_irq(sch->lock);
1120out_free:
1121	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1122			  io_priv->dma_area, io_priv->dma_area_dma);
1123	kfree(io_priv);
1124	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1125	return 0;
1126}
1127
1128static void io_subchannel_verify(struct subchannel *sch)
1129{
1130	struct ccw_device *cdev;
1131
1132	cdev = sch_get_cdev(sch);
1133	if (cdev)
1134		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1135}
1136
1137static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1138{
1139	struct ccw_device *cdev;
1140
1141	cdev = sch_get_cdev(sch);
1142	if (!cdev)
1143		return;
1144	if (cio_update_schib(sch))
1145		goto err;
1146	/* Check for I/O on path. */
1147	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1148		goto out;
1149	if (cdev->private->state == DEV_STATE_ONLINE) {
1150		ccw_device_kill_io(cdev);
1151		goto out;
1152	}
1153	if (cio_clear(sch))
1154		goto err;
1155out:
1156	/* Trigger path verification. */
1157	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1158	return;
1159
1160err:
1161	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1162}
1163
1164static int io_subchannel_chp_event(struct subchannel *sch,
1165				   struct chp_link *link, int event)
1166{
1167	struct ccw_device *cdev = sch_get_cdev(sch);
1168	int mask;
1169
1170	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1171	if (!mask)
1172		return 0;
1173	switch (event) {
1174	case CHP_VARY_OFF:
1175		sch->opm &= ~mask;
1176		sch->lpm &= ~mask;
1177		if (cdev)
1178			cdev->private->path_gone_mask |= mask;
1179		io_subchannel_terminate_path(sch, mask);
1180		break;
1181	case CHP_VARY_ON:
1182		sch->opm |= mask;
1183		sch->lpm |= mask;
1184		if (cdev)
1185			cdev->private->path_new_mask |= mask;
1186		io_subchannel_verify(sch);
1187		break;
1188	case CHP_OFFLINE:
1189		if (cio_update_schib(sch))
1190			return -ENODEV;
1191		if (cdev)
1192			cdev->private->path_gone_mask |= mask;
1193		io_subchannel_terminate_path(sch, mask);
1194		break;
1195	case CHP_ONLINE:
1196		if (cio_update_schib(sch))
1197			return -ENODEV;
1198		sch->lpm |= mask & sch->opm;
1199		if (cdev)
1200			cdev->private->path_new_mask |= mask;
1201		io_subchannel_verify(sch);
1202		break;
1203	}
1204	return 0;
1205}
1206
1207static void io_subchannel_quiesce(struct subchannel *sch)
1208{
1209	struct ccw_device *cdev;
1210	int ret;
1211
1212	spin_lock_irq(sch->lock);
1213	cdev = sch_get_cdev(sch);
1214	if (cio_is_console(sch->schid))
1215		goto out_unlock;
1216	if (!sch->schib.pmcw.ena)
1217		goto out_unlock;
1218	ret = cio_disable_subchannel(sch);
1219	if (ret != -EBUSY)
1220		goto out_unlock;
1221	if (cdev->handler)
1222		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1223	while (ret == -EBUSY) {
1224		cdev->private->state = DEV_STATE_QUIESCE;
1225		cdev->private->iretry = 255;
1226		ret = ccw_device_cancel_halt_clear(cdev);
1227		if (ret == -EBUSY) {
1228			ccw_device_set_timeout(cdev, HZ/10);
1229			spin_unlock_irq(sch->lock);
1230			wait_event(cdev->private->wait_q,
1231				   cdev->private->state != DEV_STATE_QUIESCE);
1232			spin_lock_irq(sch->lock);
1233		}
1234		ret = cio_disable_subchannel(sch);
1235	}
1236out_unlock:
1237	spin_unlock_irq(sch->lock);
1238}
1239
1240static void io_subchannel_shutdown(struct subchannel *sch)
1241{
1242	io_subchannel_quiesce(sch);
1243}
1244
1245static int device_is_disconnected(struct ccw_device *cdev)
1246{
1247	if (!cdev)
1248		return 0;
1249	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1250		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1251}
1252
1253static int recovery_check(struct device *dev, void *data)
1254{
1255	struct ccw_device *cdev = to_ccwdev(dev);
1256	struct subchannel *sch;
1257	int *redo = data;
1258
1259	spin_lock_irq(cdev->ccwlock);
1260	switch (cdev->private->state) {
1261	case DEV_STATE_ONLINE:
1262		sch = to_subchannel(cdev->dev.parent);
1263		if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1264			break;
1265		/* fall through */
1266	case DEV_STATE_DISCONNECTED:
1267		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1268			      cdev->private->dev_id.ssid,
1269			      cdev->private->dev_id.devno);
1270		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1271		*redo = 1;
1272		break;
1273	case DEV_STATE_DISCONNECTED_SENSE_ID:
1274		*redo = 1;
1275		break;
1276	}
1277	spin_unlock_irq(cdev->ccwlock);
1278
1279	return 0;
1280}
1281
1282static void recovery_work_func(struct work_struct *unused)
1283{
1284	int redo = 0;
1285
1286	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1287	if (redo) {
1288		spin_lock_irq(&recovery_lock);
1289		if (!timer_pending(&recovery_timer)) {
1290			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1291				recovery_phase++;
1292			mod_timer(&recovery_timer, jiffies +
1293				  recovery_delay[recovery_phase] * HZ);
1294		}
1295		spin_unlock_irq(&recovery_lock);
1296	} else
1297		CIO_MSG_EVENT(3, "recovery: end\n");
1298}
1299
1300static DECLARE_WORK(recovery_work, recovery_work_func);
1301
1302static void recovery_func(struct timer_list *unused)
1303{
1304	/*
1305	 * We can't do our recovery in softirq context and it's not
1306	 * performance critical, so we schedule it.
1307	 */
1308	schedule_work(&recovery_work);
1309}
1310
1311void ccw_device_schedule_recovery(void)
1312{
1313	unsigned long flags;
1314
1315	CIO_MSG_EVENT(3, "recovery: schedule\n");
1316	spin_lock_irqsave(&recovery_lock, flags);
1317	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1318		recovery_phase = 0;
1319		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1320	}
1321	spin_unlock_irqrestore(&recovery_lock, flags);
1322}
1323
1324static int purge_fn(struct device *dev, void *data)
1325{
1326	struct ccw_device *cdev = to_ccwdev(dev);
1327	struct ccw_dev_id *id = &cdev->private->dev_id;
1328
1329	spin_lock_irq(cdev->ccwlock);
1330	if (is_blacklisted(id->ssid, id->devno) &&
1331	    (cdev->private->state == DEV_STATE_OFFLINE) &&
1332	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1333		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1334			      id->devno);
1335		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1336		atomic_set(&cdev->private->onoff, 0);
1337	}
1338	spin_unlock_irq(cdev->ccwlock);
1339	/* Abort loop in case of pending signal. */
1340	if (signal_pending(current))
1341		return -EINTR;
1342
1343	return 0;
1344}
1345
1346/**
1347 * ccw_purge_blacklisted - purge unused, blacklisted devices
1348 *
1349 * Unregister all ccw devices that are offline and on the blacklist.
1350 */
1351int ccw_purge_blacklisted(void)
1352{
1353	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1354	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1355	return 0;
1356}
1357
1358void ccw_device_set_disconnected(struct ccw_device *cdev)
1359{
1360	if (!cdev)
1361		return;
1362	ccw_device_set_timeout(cdev, 0);
1363	cdev->private->flags.fake_irb = 0;
1364	cdev->private->state = DEV_STATE_DISCONNECTED;
1365	if (cdev->online)
1366		ccw_device_schedule_recovery();
1367}
1368
1369void ccw_device_set_notoper(struct ccw_device *cdev)
1370{
1371	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1372
1373	CIO_TRACE_EVENT(2, "notoper");
1374	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1375	ccw_device_set_timeout(cdev, 0);
1376	cio_disable_subchannel(sch);
1377	cdev->private->state = DEV_STATE_NOT_OPER;
1378}
1379
1380enum io_sch_action {
1381	IO_SCH_UNREG,
1382	IO_SCH_ORPH_UNREG,
1383	IO_SCH_ATTACH,
1384	IO_SCH_UNREG_ATTACH,
1385	IO_SCH_ORPH_ATTACH,
1386	IO_SCH_REPROBE,
1387	IO_SCH_VERIFY,
1388	IO_SCH_DISC,
1389	IO_SCH_NOP,
1390};
1391
1392static enum io_sch_action sch_get_action(struct subchannel *sch)
1393{
1394	struct ccw_device *cdev;
1395
1396	cdev = sch_get_cdev(sch);
1397	if (cio_update_schib(sch)) {
1398		/* Not operational. */
1399		if (!cdev)
1400			return IO_SCH_UNREG;
1401		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1402			return IO_SCH_UNREG;
1403		return IO_SCH_ORPH_UNREG;
1404	}
1405	/* Operational. */
1406	if (!cdev)
1407		return IO_SCH_ATTACH;
1408	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1409		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1410			return IO_SCH_UNREG_ATTACH;
1411		return IO_SCH_ORPH_ATTACH;
1412	}
1413	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1414		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1415			return IO_SCH_UNREG;
1416		return IO_SCH_DISC;
1417	}
1418	if (device_is_disconnected(cdev))
1419		return IO_SCH_REPROBE;
1420	if (cdev->online && !cdev->private->flags.resuming)
1421		return IO_SCH_VERIFY;
1422	if (cdev->private->state == DEV_STATE_NOT_OPER)
1423		return IO_SCH_UNREG_ATTACH;
1424	return IO_SCH_NOP;
1425}
1426
1427/**
1428 * io_subchannel_sch_event - process subchannel event
1429 * @sch: subchannel
1430 * @process: non-zero if function is called in process context
1431 *
1432 * An unspecified event occurred for this subchannel. Adjust data according
1433 * to the current operational state of the subchannel and device. Return
1434 * zero when the event has been handled sufficiently or -EAGAIN when this
1435 * function should be called again in process context.
1436 */
1437static int io_subchannel_sch_event(struct subchannel *sch, int process)
1438{
1439	unsigned long flags;
1440	struct ccw_device *cdev;
1441	struct ccw_dev_id dev_id;
1442	enum io_sch_action action;
1443	int rc = -EAGAIN;
1444
1445	spin_lock_irqsave(sch->lock, flags);
1446	if (!device_is_registered(&sch->dev))
1447		goto out_unlock;
1448	if (work_pending(&sch->todo_work))
1449		goto out_unlock;
1450	cdev = sch_get_cdev(sch);
1451	if (cdev && work_pending(&cdev->private->todo_work))
1452		goto out_unlock;
1453	action = sch_get_action(sch);
1454	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1455		      sch->schid.ssid, sch->schid.sch_no, process,
1456		      action);
1457	/* Perform immediate actions while holding the lock. */
1458	switch (action) {
1459	case IO_SCH_REPROBE:
1460		/* Trigger device recognition. */
1461		ccw_device_trigger_reprobe(cdev);
1462		rc = 0;
1463		goto out_unlock;
1464	case IO_SCH_VERIFY:
1465		/* Trigger path verification. */
1466		io_subchannel_verify(sch);
1467		rc = 0;
1468		goto out_unlock;
1469	case IO_SCH_DISC:
1470		ccw_device_set_disconnected(cdev);
1471		rc = 0;
1472		goto out_unlock;
1473	case IO_SCH_ORPH_UNREG:
1474	case IO_SCH_ORPH_ATTACH:
1475		ccw_device_set_disconnected(cdev);
1476		break;
1477	case IO_SCH_UNREG_ATTACH:
1478	case IO_SCH_UNREG:
1479		if (!cdev)
1480			break;
1481		if (cdev->private->state == DEV_STATE_SENSE_ID) {
1482			/*
1483			 * Note: delayed work triggered by this event
1484			 * and repeated calls to sch_event are synchronized
1485			 * by the above check for work_pending(cdev).
1486			 */
1487			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1488		} else
1489			ccw_device_set_notoper(cdev);
1490		break;
1491	case IO_SCH_NOP:
1492		rc = 0;
1493		goto out_unlock;
1494	default:
1495		break;
1496	}
1497	spin_unlock_irqrestore(sch->lock, flags);
1498	/* All other actions require process context. */
1499	if (!process)
1500		goto out;
1501	/* Handle attached ccw device. */
1502	switch (action) {
1503	case IO_SCH_ORPH_UNREG:
1504	case IO_SCH_ORPH_ATTACH:
1505		/* Move ccw device to orphanage. */
1506		rc = ccw_device_move_to_orph(cdev);
1507		if (rc)
1508			goto out;
1509		break;
1510	case IO_SCH_UNREG_ATTACH:
1511		spin_lock_irqsave(sch->lock, flags);
1512		if (cdev->private->flags.resuming) {
1513			/* Device will be handled later. */
1514			rc = 0;
1515			goto out_unlock;
1516		}
1517		sch_set_cdev(sch, NULL);
1518		spin_unlock_irqrestore(sch->lock, flags);
1519		/* Unregister ccw device. */
1520		ccw_device_unregister(cdev);
1521		break;
1522	default:
1523		break;
1524	}
1525	/* Handle subchannel. */
1526	switch (action) {
1527	case IO_SCH_ORPH_UNREG:
1528	case IO_SCH_UNREG:
1529		if (!cdev || !cdev->private->flags.resuming)
1530			css_sch_device_unregister(sch);
1531		break;
1532	case IO_SCH_ORPH_ATTACH:
1533	case IO_SCH_UNREG_ATTACH:
1534	case IO_SCH_ATTACH:
1535		dev_id.ssid = sch->schid.ssid;
1536		dev_id.devno = sch->schib.pmcw.dev;
1537		cdev = get_ccwdev_by_dev_id(&dev_id);
1538		if (!cdev) {
1539			sch_create_and_recog_new_device(sch);
1540			break;
1541		}
1542		rc = ccw_device_move_to_sch(cdev, sch);
1543		if (rc) {
1544			/* Release reference from get_ccwdev_by_dev_id() */
1545			put_device(&cdev->dev);
1546			goto out;
1547		}
1548		spin_lock_irqsave(sch->lock, flags);
1549		ccw_device_trigger_reprobe(cdev);
1550		spin_unlock_irqrestore(sch->lock, flags);
1551		/* Release reference from get_ccwdev_by_dev_id() */
1552		put_device(&cdev->dev);
1553		break;
1554	default:
1555		break;
1556	}
1557	return 0;
1558
1559out_unlock:
1560	spin_unlock_irqrestore(sch->lock, flags);
1561out:
1562	return rc;
1563}
1564
1565static void ccw_device_set_int_class(struct ccw_device *cdev)
1566{
1567	struct ccw_driver *cdrv = cdev->drv;
1568
1569	/* Note: we interpret class 0 in this context as an uninitialized
1570	 * field since it translates to a non-I/O interrupt class. */
1571	if (cdrv->int_class != 0)
1572		cdev->private->int_class = cdrv->int_class;
1573	else
1574		cdev->private->int_class = IRQIO_CIO;
1575}
1576
1577#ifdef CONFIG_CCW_CONSOLE
1578int __init ccw_device_enable_console(struct ccw_device *cdev)
1579{
1580	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1581	int rc;
1582
1583	if (!cdev->drv || !cdev->handler)
1584		return -EINVAL;
1585
1586	io_subchannel_init_fields(sch);
1587	rc = cio_commit_config(sch);
1588	if (rc)
1589		return rc;
1590	sch->driver = &io_subchannel_driver;
1591	io_subchannel_recog(cdev, sch);
1592	/* Now wait for the async. recognition to come to an end. */
1593	spin_lock_irq(cdev->ccwlock);
1594	while (!dev_fsm_final_state(cdev))
1595		ccw_device_wait_idle(cdev);
1596
1597	/* Hold on to an extra reference while device is online. */
1598	get_device(&cdev->dev);
1599	rc = ccw_device_online(cdev);
1600	if (rc)
1601		goto out_unlock;
1602
1603	while (!dev_fsm_final_state(cdev))
1604		ccw_device_wait_idle(cdev);
1605
1606	if (cdev->private->state == DEV_STATE_ONLINE)
1607		cdev->online = 1;
1608	else
1609		rc = -EIO;
1610out_unlock:
1611	spin_unlock_irq(cdev->ccwlock);
1612	if (rc) /* Give up online reference since onlining failed. */
1613		put_device(&cdev->dev);
1614	return rc;
1615}
1616
1617struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1618{
1619	struct io_subchannel_private *io_priv;
1620	struct ccw_device *cdev;
1621	struct subchannel *sch;
1622
1623	sch = cio_probe_console();
1624	if (IS_ERR(sch))
1625		return ERR_CAST(sch);
1626
1627	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1628	if (!io_priv)
1629		goto err_priv;
1630	io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1631				sizeof(*io_priv->dma_area),
1632				&io_priv->dma_area_dma, GFP_KERNEL);
1633	if (!io_priv->dma_area)
1634		goto err_dma_area;
1635	set_io_private(sch, io_priv);
1636	cdev = io_subchannel_create_ccwdev(sch);
1637	if (IS_ERR(cdev)) {
1638		dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1639				  io_priv->dma_area, io_priv->dma_area_dma);
1640		set_io_private(sch, NULL);
1641		put_device(&sch->dev);
1642		kfree(io_priv);
1643		return cdev;
1644	}
1645	cdev->drv = drv;
1646	ccw_device_set_int_class(cdev);
1647	return cdev;
1648
1649err_dma_area:
1650	kfree(io_priv);
1651err_priv:
1652	put_device(&sch->dev);
1653	return ERR_PTR(-ENOMEM);
1654}
1655
1656void __init ccw_device_destroy_console(struct ccw_device *cdev)
1657{
1658	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1659	struct io_subchannel_private *io_priv = to_io_private(sch);
1660
1661	set_io_private(sch, NULL);
1662	put_device(&sch->dev);
1663	put_device(&cdev->dev);
1664	dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1665			  io_priv->dma_area, io_priv->dma_area_dma);
1666	kfree(io_priv);
1667}
1668
1669/**
1670 * ccw_device_wait_idle() - busy wait for device to become idle
1671 * @cdev: ccw device
1672 *
1673 * Poll until activity control is zero, that is, no function or data
1674 * transfer is pending/active.
1675 * Called with device lock being held.
1676 */
1677void ccw_device_wait_idle(struct ccw_device *cdev)
1678{
1679	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1680
1681	while (1) {
1682		cio_tsch(sch);
1683		if (sch->schib.scsw.cmd.actl == 0)
1684			break;
1685		udelay_simple(100);
1686	}
1687}
1688
1689static int ccw_device_pm_restore(struct device *dev);
1690
1691int ccw_device_force_console(struct ccw_device *cdev)
1692{
1693	return ccw_device_pm_restore(&cdev->dev);
1694}
1695EXPORT_SYMBOL_GPL(ccw_device_force_console);
1696#endif
1697
1698/**
1699 * get_ccwdev_by_busid() - obtain device from a bus id
1700 * @cdrv: driver the device is owned by
1701 * @bus_id: bus id of the device to be searched
1702 *
1703 * This function searches all devices owned by @cdrv for a device with a bus
1704 * id matching @bus_id.
1705 * Returns:
1706 *  If a match is found, its reference count of the found device is increased
1707 *  and it is returned; else %NULL is returned.
1708 */
1709struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1710				       const char *bus_id)
1711{
1712	struct device *dev;
1713
1714	dev = driver_find_device_by_name(&cdrv->driver, bus_id);
1715
1716	return dev ? to_ccwdev(dev) : NULL;
1717}
1718
1719/************************** device driver handling ************************/
1720
1721/* This is the implementation of the ccw_driver class. The probe, remove
1722 * and release methods are initially very similar to the device_driver
1723 * implementations, with the difference that they have ccw_device
1724 * arguments.
1725 *
1726 * A ccw driver also contains the information that is needed for
1727 * device matching.
1728 */
1729static int
1730ccw_device_probe (struct device *dev)
1731{
1732	struct ccw_device *cdev = to_ccwdev(dev);
1733	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1734	int ret;
1735
1736	cdev->drv = cdrv; /* to let the driver call _set_online */
1737	ccw_device_set_int_class(cdev);
1738	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1739	if (ret) {
1740		cdev->drv = NULL;
1741		cdev->private->int_class = IRQIO_CIO;
1742		return ret;
1743	}
1744
1745	return 0;
1746}
1747
1748static int ccw_device_remove(struct device *dev)
1749{
1750	struct ccw_device *cdev = to_ccwdev(dev);
1751	struct ccw_driver *cdrv = cdev->drv;
1752	struct subchannel *sch;
1753	int ret;
1754
1755	if (cdrv->remove)
1756		cdrv->remove(cdev);
1757
1758	spin_lock_irq(cdev->ccwlock);
1759	if (cdev->online) {
1760		cdev->online = 0;
1761		ret = ccw_device_offline(cdev);
1762		spin_unlock_irq(cdev->ccwlock);
1763		if (ret == 0)
1764			wait_event(cdev->private->wait_q,
1765				   dev_fsm_final_state(cdev));
1766		else
1767			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1768				      "device 0.%x.%04x\n",
1769				      ret, cdev->private->dev_id.ssid,
1770				      cdev->private->dev_id.devno);
1771		/* Give up reference obtained in ccw_device_set_online(). */
1772		put_device(&cdev->dev);
1773		spin_lock_irq(cdev->ccwlock);
1774	}
1775	ccw_device_set_timeout(cdev, 0);
1776	cdev->drv = NULL;
1777	cdev->private->int_class = IRQIO_CIO;
1778	sch = to_subchannel(cdev->dev.parent);
1779	spin_unlock_irq(cdev->ccwlock);
1780	io_subchannel_quiesce(sch);
1781	__disable_cmf(cdev);
1782
1783	return 0;
1784}
1785
1786static void ccw_device_shutdown(struct device *dev)
1787{
1788	struct ccw_device *cdev;
1789
1790	cdev = to_ccwdev(dev);
1791	if (cdev->drv && cdev->drv->shutdown)
1792		cdev->drv->shutdown(cdev);
1793	__disable_cmf(cdev);
1794}
1795
1796static int ccw_device_pm_prepare(struct device *dev)
1797{
1798	struct ccw_device *cdev = to_ccwdev(dev);
1799
1800	if (work_pending(&cdev->private->todo_work))
1801		return -EAGAIN;
1802	/* Fail while device is being set online/offline. */
1803	if (atomic_read(&cdev->private->onoff))
1804		return -EAGAIN;
1805
1806	if (cdev->online && cdev->drv && cdev->drv->prepare)
1807		return cdev->drv->prepare(cdev);
1808
1809	return 0;
1810}
1811
1812static void ccw_device_pm_complete(struct device *dev)
1813{
1814	struct ccw_device *cdev = to_ccwdev(dev);
1815
1816	if (cdev->online && cdev->drv && cdev->drv->complete)
1817		cdev->drv->complete(cdev);
1818}
1819
1820static int ccw_device_pm_freeze(struct device *dev)
1821{
1822	struct ccw_device *cdev = to_ccwdev(dev);
1823	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1824	int ret, cm_enabled;
1825
1826	/* Fail suspend while device is in transistional state. */
1827	if (!dev_fsm_final_state(cdev))
1828		return -EAGAIN;
1829	if (!cdev->online)
1830		return 0;
1831	if (cdev->drv && cdev->drv->freeze) {
1832		ret = cdev->drv->freeze(cdev);
1833		if (ret)
1834			return ret;
1835	}
1836
1837	spin_lock_irq(sch->lock);
1838	cm_enabled = cdev->private->cmb != NULL;
1839	spin_unlock_irq(sch->lock);
1840	if (cm_enabled) {
1841		/* Don't have the css write on memory. */
1842		ret = ccw_set_cmf(cdev, 0);
1843		if (ret)
1844			return ret;
1845	}
1846	/* From here on, disallow device driver I/O. */
1847	spin_lock_irq(sch->lock);
1848	ret = cio_disable_subchannel(sch);
1849	spin_unlock_irq(sch->lock);
1850
1851	return ret;
1852}
1853
1854static int ccw_device_pm_thaw(struct device *dev)
1855{
1856	struct ccw_device *cdev = to_ccwdev(dev);
1857	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1858	int ret, cm_enabled;
1859
1860	if (!cdev->online)
1861		return 0;
1862
1863	spin_lock_irq(sch->lock);
1864	/* Allow device driver I/O again. */
1865	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1866	cm_enabled = cdev->private->cmb != NULL;
1867	spin_unlock_irq(sch->lock);
1868	if (ret)
1869		return ret;
1870
1871	if (cm_enabled) {
1872		ret = ccw_set_cmf(cdev, 1);
1873		if (ret)
1874			return ret;
1875	}
1876
1877	if (cdev->drv && cdev->drv->thaw)
1878		ret = cdev->drv->thaw(cdev);
1879
1880	return ret;
1881}
1882
1883static void __ccw_device_pm_restore(struct ccw_device *cdev)
1884{
1885	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1886
1887	spin_lock_irq(sch->lock);
1888	if (cio_is_console(sch->schid)) {
1889		cio_enable_subchannel(sch, (u32)(addr_t)sch);
1890		goto out_unlock;
1891	}
1892	/*
1893	 * While we were sleeping, devices may have gone or become
1894	 * available again. Kick re-detection.
1895	 */
1896	cdev->private->flags.resuming = 1;
1897	cdev->private->path_new_mask = LPM_ANYPATH;
1898	css_sched_sch_todo(sch, SCH_TODO_EVAL);
1899	spin_unlock_irq(sch->lock);
1900	css_wait_for_slow_path();
1901
1902	/* cdev may have been moved to a different subchannel. */
1903	sch = to_subchannel(cdev->dev.parent);
1904	spin_lock_irq(sch->lock);
1905	if (cdev->private->state != DEV_STATE_ONLINE &&
1906	    cdev->private->state != DEV_STATE_OFFLINE)
1907		goto out_unlock;
1908
1909	ccw_device_recognition(cdev);
1910	spin_unlock_irq(sch->lock);
1911	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1912		   cdev->private->state == DEV_STATE_DISCONNECTED);
1913	spin_lock_irq(sch->lock);
1914
1915out_unlock:
1916	cdev->private->flags.resuming = 0;
1917	spin_unlock_irq(sch->lock);
1918}
1919
1920static int resume_handle_boxed(struct ccw_device *cdev)
1921{
1922	cdev->private->state = DEV_STATE_BOXED;
1923	if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1924		return 0;
1925	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1926	return -ENODEV;
1927}
1928
1929static int resume_handle_disc(struct ccw_device *cdev)
1930{
1931	cdev->private->state = DEV_STATE_DISCONNECTED;
1932	if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1933		return 0;
1934	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1935	return -ENODEV;
1936}
1937
1938static int ccw_device_pm_restore(struct device *dev)
1939{
1940	struct ccw_device *cdev = to_ccwdev(dev);
1941	struct subchannel *sch;
1942	int ret = 0;
1943
1944	__ccw_device_pm_restore(cdev);
1945	sch = to_subchannel(cdev->dev.parent);
1946	spin_lock_irq(sch->lock);
1947	if (cio_is_console(sch->schid))
1948		goto out_restore;
1949
1950	/* check recognition results */
1951	switch (cdev->private->state) {
1952	case DEV_STATE_OFFLINE:
1953	case DEV_STATE_ONLINE:
1954		cdev->private->flags.donotify = 0;
1955		break;
1956	case DEV_STATE_BOXED:
1957		ret = resume_handle_boxed(cdev);
1958		if (ret)
1959			goto out_unlock;
1960		goto out_restore;
1961	default:
1962		ret = resume_handle_disc(cdev);
1963		if (ret)
1964			goto out_unlock;
1965		goto out_restore;
1966	}
1967	/* check if the device type has changed */
1968	if (!ccw_device_test_sense_data(cdev)) {
1969		ccw_device_update_sense_data(cdev);
1970		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1971		ret = -ENODEV;
1972		goto out_unlock;
1973	}
1974	if (!cdev->online)
1975		goto out_unlock;
1976
1977	if (ccw_device_online(cdev)) {
1978		ret = resume_handle_disc(cdev);
1979		if (ret)
1980			goto out_unlock;
1981		goto out_restore;
1982	}
1983	spin_unlock_irq(sch->lock);
1984	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1985	spin_lock_irq(sch->lock);
1986
1987	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1988		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1989		ret = -ENODEV;
1990		goto out_unlock;
1991	}
1992
1993	/* reenable cmf, if needed */
1994	if (cdev->private->cmb) {
1995		spin_unlock_irq(sch->lock);
1996		ret = ccw_set_cmf(cdev, 1);
1997		spin_lock_irq(sch->lock);
1998		if (ret) {
1999			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2000				      "(rc=%d)\n", cdev->private->dev_id.ssid,
2001				      cdev->private->dev_id.devno, ret);
2002			ret = 0;
2003		}
2004	}
2005
2006out_restore:
2007	spin_unlock_irq(sch->lock);
2008	if (cdev->online && cdev->drv && cdev->drv->restore)
2009		ret = cdev->drv->restore(cdev);
2010	return ret;
2011
2012out_unlock:
2013	spin_unlock_irq(sch->lock);
2014	return ret;
2015}
2016
2017static const struct dev_pm_ops ccw_pm_ops = {
2018	.prepare = ccw_device_pm_prepare,
2019	.complete = ccw_device_pm_complete,
2020	.freeze = ccw_device_pm_freeze,
2021	.thaw = ccw_device_pm_thaw,
2022	.restore = ccw_device_pm_restore,
2023};
2024
2025static struct bus_type ccw_bus_type = {
2026	.name   = "ccw",
2027	.match  = ccw_bus_match,
2028	.uevent = ccw_uevent,
2029	.probe  = ccw_device_probe,
2030	.remove = ccw_device_remove,
2031	.shutdown = ccw_device_shutdown,
2032	.pm = &ccw_pm_ops,
2033};
2034
2035/**
2036 * ccw_driver_register() - register a ccw driver
2037 * @cdriver: driver to be registered
2038 *
2039 * This function is mainly a wrapper around driver_register().
2040 * Returns:
2041 *   %0 on success and a negative error value on failure.
2042 */
2043int ccw_driver_register(struct ccw_driver *cdriver)
2044{
2045	struct device_driver *drv = &cdriver->driver;
2046
2047	drv->bus = &ccw_bus_type;
2048
2049	return driver_register(drv);
2050}
2051
2052/**
2053 * ccw_driver_unregister() - deregister a ccw driver
2054 * @cdriver: driver to be deregistered
2055 *
2056 * This function is mainly a wrapper around driver_unregister().
2057 */
2058void ccw_driver_unregister(struct ccw_driver *cdriver)
2059{
2060	driver_unregister(&cdriver->driver);
2061}
2062
2063static void ccw_device_todo(struct work_struct *work)
2064{
2065	struct ccw_device_private *priv;
2066	struct ccw_device *cdev;
2067	struct subchannel *sch;
2068	enum cdev_todo todo;
2069
2070	priv = container_of(work, struct ccw_device_private, todo_work);
2071	cdev = priv->cdev;
2072	sch = to_subchannel(cdev->dev.parent);
2073	/* Find out todo. */
2074	spin_lock_irq(cdev->ccwlock);
2075	todo = priv->todo;
2076	priv->todo = CDEV_TODO_NOTHING;
2077	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2078		      priv->dev_id.ssid, priv->dev_id.devno, todo);
2079	spin_unlock_irq(cdev->ccwlock);
2080	/* Perform todo. */
2081	switch (todo) {
2082	case CDEV_TODO_ENABLE_CMF:
2083		cmf_reenable(cdev);
2084		break;
2085	case CDEV_TODO_REBIND:
2086		ccw_device_do_unbind_bind(cdev);
2087		break;
2088	case CDEV_TODO_REGISTER:
2089		io_subchannel_register(cdev);
2090		break;
2091	case CDEV_TODO_UNREG_EVAL:
2092		if (!sch_is_pseudo_sch(sch))
2093			css_schedule_eval(sch->schid);
2094		/* fall-through */
2095	case CDEV_TODO_UNREG:
2096		if (sch_is_pseudo_sch(sch))
2097			ccw_device_unregister(cdev);
2098		else
2099			ccw_device_call_sch_unregister(cdev);
2100		break;
2101	default:
2102		break;
2103	}
2104	/* Release workqueue ref. */
2105	put_device(&cdev->dev);
2106}
2107
2108/**
2109 * ccw_device_sched_todo - schedule ccw device operation
2110 * @cdev: ccw device
2111 * @todo: todo
2112 *
2113 * Schedule the operation identified by @todo to be performed on the slow path
2114 * workqueue. Do nothing if another operation with higher priority is already
2115 * scheduled. Needs to be called with ccwdev lock held.
2116 */
2117void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2118{
2119	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2120		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2121		      todo);
2122	if (cdev->private->todo >= todo)
2123		return;
2124	cdev->private->todo = todo;
2125	/* Get workqueue ref. */
2126	if (!get_device(&cdev->dev))
2127		return;
2128	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2129		/* Already queued, release workqueue ref. */
2130		put_device(&cdev->dev);
2131	}
2132}
2133
2134/**
2135 * ccw_device_siosl() - initiate logging
2136 * @cdev: ccw device
2137 *
2138 * This function is used to invoke model-dependent logging within the channel
2139 * subsystem.
2140 */
2141int ccw_device_siosl(struct ccw_device *cdev)
2142{
2143	struct subchannel *sch = to_subchannel(cdev->dev.parent);
2144
2145	return chsc_siosl(sch->schid);
2146}
2147EXPORT_SYMBOL_GPL(ccw_device_siosl);
2148
2149EXPORT_SYMBOL(ccw_device_set_online);
2150EXPORT_SYMBOL(ccw_device_set_offline);
2151EXPORT_SYMBOL(ccw_driver_register);
2152EXPORT_SYMBOL(ccw_driver_unregister);
2153EXPORT_SYMBOL(get_ccwdev_by_busid);