Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * driver for channel subsystem
   4 *
   5 * Copyright IBM Corp. 2002, 2010
   6 *
   7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
   8 *	      Cornelia Huck (cornelia.huck@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/device.h>
  17#include <linux/slab.h>
  18#include <linux/errno.h>
  19#include <linux/list.h>
  20#include <linux/reboot.h>
 
  21#include <linux/proc_fs.h>
  22#include <linux/genalloc.h>
  23#include <linux/dma-mapping.h>
  24#include <asm/isc.h>
  25#include <asm/crw.h>
  26
  27#include "css.h"
  28#include "cio.h"
  29#include "blacklist.h"
  30#include "cio_debug.h"
  31#include "ioasm.h"
  32#include "chsc.h"
  33#include "device.h"
  34#include "idset.h"
  35#include "chp.h"
  36
  37int css_init_done = 0;
  38int max_ssid;
  39
  40#define MAX_CSS_IDX 0
  41struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
  42static struct bus_type css_bus_type;
  43
  44int
  45for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  46{
  47	struct subchannel_id schid;
  48	int ret;
  49
  50	init_subchannel_id(&schid);
  51	do {
  52		do {
  53			ret = fn(schid, data);
  54			if (ret)
  55				break;
  56		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
  57		schid.sch_no = 0;
  58	} while (schid.ssid++ < max_ssid);
  59	return ret;
  60}
  61
  62struct cb_data {
  63	void *data;
  64	struct idset *set;
  65	int (*fn_known_sch)(struct subchannel *, void *);
  66	int (*fn_unknown_sch)(struct subchannel_id, void *);
  67};
  68
  69static int call_fn_known_sch(struct device *dev, void *data)
  70{
  71	struct subchannel *sch = to_subchannel(dev);
  72	struct cb_data *cb = data;
  73	int rc = 0;
  74
  75	if (cb->set)
  76		idset_sch_del(cb->set, sch->schid);
  77	if (cb->fn_known_sch)
  78		rc = cb->fn_known_sch(sch, cb->data);
  79	return rc;
  80}
  81
  82static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  83{
  84	struct cb_data *cb = data;
  85	int rc = 0;
  86
  87	if (idset_sch_contains(cb->set, schid))
  88		rc = cb->fn_unknown_sch(schid, cb->data);
  89	return rc;
  90}
  91
  92static int call_fn_all_sch(struct subchannel_id schid, void *data)
  93{
  94	struct cb_data *cb = data;
  95	struct subchannel *sch;
  96	int rc = 0;
  97
  98	sch = get_subchannel_by_schid(schid);
  99	if (sch) {
 100		if (cb->fn_known_sch)
 101			rc = cb->fn_known_sch(sch, cb->data);
 102		put_device(&sch->dev);
 103	} else {
 104		if (cb->fn_unknown_sch)
 105			rc = cb->fn_unknown_sch(schid, cb->data);
 106	}
 107
 108	return rc;
 109}
 110
 111int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
 112			       int (*fn_unknown)(struct subchannel_id,
 113			       void *), void *data)
 114{
 115	struct cb_data cb;
 116	int rc;
 117
 118	cb.data = data;
 119	cb.fn_known_sch = fn_known;
 120	cb.fn_unknown_sch = fn_unknown;
 121
 122	if (fn_known && !fn_unknown) {
 123		/* Skip idset allocation in case of known-only loop. */
 124		cb.set = NULL;
 125		return bus_for_each_dev(&css_bus_type, NULL, &cb,
 126					call_fn_known_sch);
 127	}
 128
 129	cb.set = idset_sch_new();
 130	if (!cb.set)
 131		/* fall back to brute force scanning in case of oom */
 132		return for_each_subchannel(call_fn_all_sch, &cb);
 133
 134	idset_fill(cb.set);
 135
 136	/* Process registered subchannels. */
 137	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
 138	if (rc)
 139		goto out;
 140	/* Process unregistered subchannels. */
 141	if (fn_unknown)
 142		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
 143out:
 144	idset_free(cb.set);
 145
 146	return rc;
 147}
 148
 149static void css_sch_todo(struct work_struct *work);
 150
 151static void css_sch_create_locks(struct subchannel *sch)
 152{
 153	spin_lock_init(&sch->lock);
 
 
 
 
 154	mutex_init(&sch->reg_mutex);
 
 
 155}
 156
 157static void css_subchannel_release(struct device *dev)
 158{
 159	struct subchannel *sch = to_subchannel(dev);
 160
 161	sch->config.intparm = 0;
 162	cio_commit_config(sch);
 163	kfree(sch->driver_override);
 
 164	kfree(sch);
 165}
 166
 167static int css_validate_subchannel(struct subchannel_id schid,
 168				   struct schib *schib)
 169{
 170	int err;
 171
 172	switch (schib->pmcw.st) {
 173	case SUBCHANNEL_TYPE_IO:
 174	case SUBCHANNEL_TYPE_MSG:
 175		if (!css_sch_is_valid(schib))
 176			err = -ENODEV;
 177		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
 178			CIO_MSG_EVENT(6, "Blacklisted device detected "
 179				      "at devno %04X, subchannel set %x\n",
 180				      schib->pmcw.dev, schid.ssid);
 181			err = -ENODEV;
 182		} else
 183			err = 0;
 184		break;
 185	default:
 186		err = 0;
 187	}
 188	if (err)
 189		goto out;
 190
 191	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
 192		      schid.ssid, schid.sch_no, schib->pmcw.st);
 193out:
 194	return err;
 195}
 196
 197struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
 198					struct schib *schib)
 199{
 200	struct subchannel *sch;
 201	int ret;
 202
 203	ret = css_validate_subchannel(schid, schib);
 204	if (ret < 0)
 205		return ERR_PTR(ret);
 206
 207	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
 208	if (!sch)
 209		return ERR_PTR(-ENOMEM);
 210
 211	sch->schid = schid;
 212	sch->schib = *schib;
 213	sch->st = schib->pmcw.st;
 214
 215	css_sch_create_locks(sch);
 
 
 216
 217	INIT_WORK(&sch->todo_work, css_sch_todo);
 218	sch->dev.release = &css_subchannel_release;
 219	sch->dev.dma_mask = &sch->dma_mask;
 220	device_initialize(&sch->dev);
 221	/*
 222	 * The physical addresses for some of the dma structures that can
 223	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
 224	 */
 225	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
 226	if (ret)
 227		goto err;
 228	/*
 229	 * But we don't have such restrictions imposed on the stuff that
 230	 * is handled by the streaming API.
 231	 */
 232	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
 233	if (ret)
 234		goto err;
 235
 236	return sch;
 237
 238err:
 239	kfree(sch);
 240	return ERR_PTR(ret);
 241}
 242
 243static int css_sch_device_register(struct subchannel *sch)
 244{
 245	int ret;
 246
 247	mutex_lock(&sch->reg_mutex);
 248	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
 249		     sch->schid.sch_no);
 250	ret = device_add(&sch->dev);
 251	mutex_unlock(&sch->reg_mutex);
 252	return ret;
 253}
 254
 255/**
 256 * css_sch_device_unregister - unregister a subchannel
 257 * @sch: subchannel to be unregistered
 258 */
 259void css_sch_device_unregister(struct subchannel *sch)
 260{
 261	mutex_lock(&sch->reg_mutex);
 262	if (device_is_registered(&sch->dev))
 263		device_unregister(&sch->dev);
 264	mutex_unlock(&sch->reg_mutex);
 265}
 266EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 267
 268static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 269{
 270	int i;
 271	int mask;
 272
 273	memset(ssd, 0, sizeof(struct chsc_ssd_info));
 274	ssd->path_mask = pmcw->pim;
 275	for (i = 0; i < 8; i++) {
 276		mask = 0x80 >> i;
 277		if (pmcw->pim & mask) {
 278			chp_id_init(&ssd->chpid[i]);
 279			ssd->chpid[i].id = pmcw->chpid[i];
 280		}
 281	}
 282}
 283
 284static void ssd_register_chpids(struct chsc_ssd_info *ssd)
 285{
 286	int i;
 287	int mask;
 288
 289	for (i = 0; i < 8; i++) {
 290		mask = 0x80 >> i;
 291		if (ssd->path_mask & mask)
 292			chp_new(ssd->chpid[i]);
 293	}
 294}
 295
 296void css_update_ssd_info(struct subchannel *sch)
 297{
 298	int ret;
 299
 300	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
 301	if (ret)
 302		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
 303
 304	ssd_register_chpids(&sch->ssd_info);
 305}
 306
 307static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 308			 char *buf)
 309{
 310	struct subchannel *sch = to_subchannel(dev);
 311
 312	return sprintf(buf, "%01x\n", sch->st);
 313}
 314
 315static DEVICE_ATTR_RO(type);
 316
 317static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 318			     char *buf)
 319{
 320	struct subchannel *sch = to_subchannel(dev);
 321
 322	return sprintf(buf, "css:t%01X\n", sch->st);
 323}
 324
 325static DEVICE_ATTR_RO(modalias);
 326
 327static ssize_t driver_override_store(struct device *dev,
 328				     struct device_attribute *attr,
 329				     const char *buf, size_t count)
 330{
 331	struct subchannel *sch = to_subchannel(dev);
 332	int ret;
 333
 334	ret = driver_set_override(dev, &sch->driver_override, buf, count);
 335	if (ret)
 336		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337
 338	return count;
 339}
 340
 341static ssize_t driver_override_show(struct device *dev,
 342				    struct device_attribute *attr, char *buf)
 343{
 344	struct subchannel *sch = to_subchannel(dev);
 345	ssize_t len;
 346
 347	device_lock(dev);
 348	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
 349	device_unlock(dev);
 350	return len;
 351}
 352static DEVICE_ATTR_RW(driver_override);
 353
 354static struct attribute *subch_attrs[] = {
 355	&dev_attr_type.attr,
 356	&dev_attr_modalias.attr,
 357	&dev_attr_driver_override.attr,
 358	NULL,
 359};
 360
 361static struct attribute_group subch_attr_group = {
 362	.attrs = subch_attrs,
 363};
 364
 365static const struct attribute_group *default_subch_attr_groups[] = {
 366	&subch_attr_group,
 367	NULL,
 368};
 369
 370static ssize_t chpids_show(struct device *dev,
 371			   struct device_attribute *attr,
 372			   char *buf)
 373{
 374	struct subchannel *sch = to_subchannel(dev);
 375	struct chsc_ssd_info *ssd = &sch->ssd_info;
 376	ssize_t ret = 0;
 377	int mask;
 378	int chp;
 379
 380	for (chp = 0; chp < 8; chp++) {
 381		mask = 0x80 >> chp;
 382		if (ssd->path_mask & mask)
 383			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
 384		else
 385			ret += sprintf(buf + ret, "00 ");
 386	}
 387	ret += sprintf(buf + ret, "\n");
 388	return ret;
 389}
 390static DEVICE_ATTR_RO(chpids);
 391
 392static ssize_t pimpampom_show(struct device *dev,
 393			      struct device_attribute *attr,
 394			      char *buf)
 395{
 396	struct subchannel *sch = to_subchannel(dev);
 397	struct pmcw *pmcw = &sch->schib.pmcw;
 398
 399	return sprintf(buf, "%02x %02x %02x\n",
 400		       pmcw->pim, pmcw->pam, pmcw->pom);
 401}
 402static DEVICE_ATTR_RO(pimpampom);
 403
 404static ssize_t dev_busid_show(struct device *dev,
 405			      struct device_attribute *attr,
 406			      char *buf)
 407{
 408	struct subchannel *sch = to_subchannel(dev);
 409	struct pmcw *pmcw = &sch->schib.pmcw;
 410
 411	if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
 412	    (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
 413		return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
 414				  pmcw->dev);
 415	else
 416		return sysfs_emit(buf, "none\n");
 417}
 418static DEVICE_ATTR_RO(dev_busid);
 419
 420static struct attribute *io_subchannel_type_attrs[] = {
 421	&dev_attr_chpids.attr,
 422	&dev_attr_pimpampom.attr,
 423	&dev_attr_dev_busid.attr,
 424	NULL,
 425};
 426ATTRIBUTE_GROUPS(io_subchannel_type);
 427
 428static const struct device_type io_subchannel_type = {
 429	.groups = io_subchannel_type_groups,
 430};
 431
 432int css_register_subchannel(struct subchannel *sch)
 433{
 434	int ret;
 435
 436	/* Initialize the subchannel structure */
 437	sch->dev.parent = &channel_subsystems[0]->device;
 438	sch->dev.bus = &css_bus_type;
 439	sch->dev.groups = default_subch_attr_groups;
 440
 441	if (sch->st == SUBCHANNEL_TYPE_IO)
 442		sch->dev.type = &io_subchannel_type;
 443
 
 
 
 
 
 
 
 
 
 
 444	css_update_ssd_info(sch);
 445	/* make it known to the system */
 446	ret = css_sch_device_register(sch);
 447	if (ret) {
 448		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
 449			      sch->schid.ssid, sch->schid.sch_no, ret);
 450		return ret;
 451	}
 
 
 
 
 
 
 
 
 
 452	return ret;
 453}
 454
 455static int css_probe_device(struct subchannel_id schid, struct schib *schib)
 456{
 457	struct subchannel *sch;
 458	int ret;
 459
 460	sch = css_alloc_subchannel(schid, schib);
 461	if (IS_ERR(sch))
 462		return PTR_ERR(sch);
 463
 464	ret = css_register_subchannel(sch);
 465	if (ret)
 466		put_device(&sch->dev);
 467
 468	return ret;
 469}
 470
 471static int
 472check_subchannel(struct device *dev, const void *data)
 473{
 474	struct subchannel *sch;
 475	struct subchannel_id *schid = (void *)data;
 476
 477	sch = to_subchannel(dev);
 478	return schid_equal(&sch->schid, schid);
 479}
 480
 481struct subchannel *
 482get_subchannel_by_schid(struct subchannel_id schid)
 483{
 484	struct device *dev;
 485
 486	dev = bus_find_device(&css_bus_type, NULL,
 487			      &schid, check_subchannel);
 488
 489	return dev ? to_subchannel(dev) : NULL;
 490}
 491
 492/**
 493 * css_sch_is_valid() - check if a subchannel is valid
 494 * @schib: subchannel information block for the subchannel
 495 */
 496int css_sch_is_valid(struct schib *schib)
 497{
 498	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
 499		return 0;
 500	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
 501		return 0;
 502	return 1;
 503}
 504EXPORT_SYMBOL_GPL(css_sch_is_valid);
 505
 506static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
 507{
 508	struct schib schib;
 509	int ccode;
 510
 511	if (!slow) {
 512		/* Will be done on the slow path. */
 513		return -EAGAIN;
 514	}
 515	/*
 516	 * The first subchannel that is not-operational (ccode==3)
 517	 * indicates that there aren't any more devices available.
 518	 * If stsch gets an exception, it means the current subchannel set
 519	 * is not valid.
 520	 */
 521	ccode = stsch(schid, &schib);
 522	if (ccode)
 523		return (ccode == 3) ? -ENXIO : ccode;
 524
 525	return css_probe_device(schid, &schib);
 526}
 527
 528static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
 529{
 530	int ret = 0;
 531
 532	if (sch->driver) {
 533		if (sch->driver->sch_event)
 534			ret = sch->driver->sch_event(sch, slow);
 535		else
 536			dev_dbg(&sch->dev,
 537				"Got subchannel machine check but "
 538				"no sch_event handler provided.\n");
 539	}
 540	if (ret != 0 && ret != -EAGAIN) {
 541		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
 542			      sch->schid.ssid, sch->schid.sch_no, ret);
 543	}
 544	return ret;
 545}
 546
 547static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
 548{
 549	struct subchannel *sch;
 550	int ret;
 551
 552	sch = get_subchannel_by_schid(schid);
 553	if (sch) {
 554		ret = css_evaluate_known_subchannel(sch, slow);
 555		put_device(&sch->dev);
 556	} else
 557		ret = css_evaluate_new_subchannel(schid, slow);
 558	if (ret == -EAGAIN)
 559		css_schedule_eval(schid);
 560}
 561
 562/**
 563 * css_sched_sch_todo - schedule a subchannel operation
 564 * @sch: subchannel
 565 * @todo: todo
 566 *
 567 * Schedule the operation identified by @todo to be performed on the slow path
 568 * workqueue. Do nothing if another operation with higher priority is already
 569 * scheduled. Needs to be called with subchannel lock held.
 570 */
 571void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
 572{
 573	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
 574		      sch->schid.ssid, sch->schid.sch_no, todo);
 575	if (sch->todo >= todo)
 576		return;
 577	/* Get workqueue ref. */
 578	if (!get_device(&sch->dev))
 579		return;
 580	sch->todo = todo;
 581	if (!queue_work(cio_work_q, &sch->todo_work)) {
 582		/* Already queued, release workqueue ref. */
 583		put_device(&sch->dev);
 584	}
 585}
 586EXPORT_SYMBOL_GPL(css_sched_sch_todo);
 587
 588static void css_sch_todo(struct work_struct *work)
 589{
 590	struct subchannel *sch;
 591	enum sch_todo todo;
 592	int ret;
 593
 594	sch = container_of(work, struct subchannel, todo_work);
 595	/* Find out todo. */
 596	spin_lock_irq(&sch->lock);
 597	todo = sch->todo;
 598	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
 599		      sch->schid.sch_no, todo);
 600	sch->todo = SCH_TODO_NOTHING;
 601	spin_unlock_irq(&sch->lock);
 602	/* Perform todo. */
 603	switch (todo) {
 604	case SCH_TODO_NOTHING:
 605		break;
 606	case SCH_TODO_EVAL:
 607		ret = css_evaluate_known_subchannel(sch, 1);
 608		if (ret == -EAGAIN) {
 609			spin_lock_irq(&sch->lock);
 610			css_sched_sch_todo(sch, todo);
 611			spin_unlock_irq(&sch->lock);
 612		}
 613		break;
 614	case SCH_TODO_UNREG:
 615		css_sch_device_unregister(sch);
 616		break;
 617	}
 618	/* Release workqueue ref. */
 619	put_device(&sch->dev);
 620}
 621
 622static struct idset *slow_subchannel_set;
 623static DEFINE_SPINLOCK(slow_subchannel_lock);
 624static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
 625static atomic_t css_eval_scheduled;
 626
 627static int __init slow_subchannel_init(void)
 628{
 
 629	atomic_set(&css_eval_scheduled, 0);
 
 630	slow_subchannel_set = idset_sch_new();
 631	if (!slow_subchannel_set) {
 632		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
 633		return -ENOMEM;
 634	}
 635	return 0;
 636}
 637
 638static int slow_eval_known_fn(struct subchannel *sch, void *data)
 639{
 640	int eval;
 641	int rc;
 642
 643	spin_lock_irq(&slow_subchannel_lock);
 644	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
 645	idset_sch_del(slow_subchannel_set, sch->schid);
 646	spin_unlock_irq(&slow_subchannel_lock);
 647	if (eval) {
 648		rc = css_evaluate_known_subchannel(sch, 1);
 649		if (rc == -EAGAIN)
 650			css_schedule_eval(sch->schid);
 651		/*
 652		 * The loop might take long time for platforms with lots of
 653		 * known devices. Allow scheduling here.
 654		 */
 655		cond_resched();
 656	}
 657	return 0;
 658}
 659
 660static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
 661{
 662	int eval;
 663	int rc = 0;
 664
 665	spin_lock_irq(&slow_subchannel_lock);
 666	eval = idset_sch_contains(slow_subchannel_set, schid);
 667	idset_sch_del(slow_subchannel_set, schid);
 668	spin_unlock_irq(&slow_subchannel_lock);
 669	if (eval) {
 670		rc = css_evaluate_new_subchannel(schid, 1);
 671		switch (rc) {
 672		case -EAGAIN:
 673			css_schedule_eval(schid);
 674			rc = 0;
 675			break;
 676		case -ENXIO:
 677		case -ENOMEM:
 678		case -EIO:
 679			/* These should abort looping */
 680			spin_lock_irq(&slow_subchannel_lock);
 681			idset_sch_del_subseq(slow_subchannel_set, schid);
 682			spin_unlock_irq(&slow_subchannel_lock);
 683			break;
 684		default:
 685			rc = 0;
 686		}
 687		/* Allow scheduling here since the containing loop might
 688		 * take a while.  */
 689		cond_resched();
 690	}
 691	return rc;
 692}
 693
 694static void css_slow_path_func(struct work_struct *unused)
 695{
 696	unsigned long flags;
 697
 698	CIO_TRACE_EVENT(4, "slowpath");
 699	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
 700				   NULL);
 701	spin_lock_irqsave(&slow_subchannel_lock, flags);
 702	if (idset_is_empty(slow_subchannel_set)) {
 703		atomic_set(&css_eval_scheduled, 0);
 704		wake_up(&css_eval_wq);
 705	}
 706	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 707}
 708
 709static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
 710struct workqueue_struct *cio_work_q;
 711
 712void css_schedule_eval(struct subchannel_id schid)
 713{
 714	unsigned long flags;
 715
 716	spin_lock_irqsave(&slow_subchannel_lock, flags);
 717	idset_sch_add(slow_subchannel_set, schid);
 718	atomic_set(&css_eval_scheduled, 1);
 719	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 720	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 721}
 722
 723void css_schedule_eval_all(void)
 724{
 725	unsigned long flags;
 726
 727	spin_lock_irqsave(&slow_subchannel_lock, flags);
 728	idset_fill(slow_subchannel_set);
 729	atomic_set(&css_eval_scheduled, 1);
 730	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 731	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 732}
 733
 734static int __unset_validpath(struct device *dev, void *data)
 735{
 736	struct idset *set = data;
 737	struct subchannel *sch = to_subchannel(dev);
 738	struct pmcw *pmcw = &sch->schib.pmcw;
 739
 740	/* Here we want to make sure that we are considering only those subchannels
 741	 * which do not have an operational device attached to it. This can be found
 742	 * with the help of PAM and POM values of pmcw. OPM provides the information
 743	 * about any path which is currently vary-off, so that we should not consider.
 744	 */
 745	if (sch->st == SUBCHANNEL_TYPE_IO &&
 746	    (sch->opm & pmcw->pam & pmcw->pom))
 747		idset_sch_del(set, sch->schid);
 748
 749	return 0;
 750}
 751
 752static int __unset_online(struct device *dev, void *data)
 753{
 754	struct idset *set = data;
 755	struct subchannel *sch = to_subchannel(dev);
 756
 757	if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
 758		idset_sch_del(set, sch->schid);
 759
 760	return 0;
 761}
 762
 763void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
 764{
 765	unsigned long flags;
 766	struct idset *set;
 767
 768	/* Find unregistered subchannels. */
 769	set = idset_sch_new();
 770	if (!set) {
 771		/* Fallback. */
 772		css_schedule_eval_all();
 773		return;
 774	}
 775	idset_fill(set);
 776	switch (cond) {
 777	case CSS_EVAL_NO_PATH:
 778		bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
 779		break;
 780	case CSS_EVAL_NOT_ONLINE:
 781		bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
 782		break;
 783	default:
 784		break;
 785	}
 786
 787	/* Apply to slow_subchannel_set. */
 788	spin_lock_irqsave(&slow_subchannel_lock, flags);
 789	idset_add_set(slow_subchannel_set, set);
 790	atomic_set(&css_eval_scheduled, 1);
 791	queue_delayed_work(cio_work_q, &slow_path_work, delay);
 792	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 793	idset_free(set);
 794}
 795
 796void css_wait_for_slow_path(void)
 797{
 798	flush_workqueue(cio_work_q);
 799}
 800
 801/* Schedule reprobing of all subchannels with no valid operational path. */
 802void css_schedule_reprobe(void)
 803{
 804	/* Schedule with a delay to allow merging of subsequent calls. */
 805	css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
 806}
 807EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 808
 809/*
 810 * Called from the machine check handler for subchannel report words.
 811 */
 812static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 813{
 814	struct subchannel_id mchk_schid;
 815	struct subchannel *sch;
 816
 817	if (overflow) {
 818		css_schedule_eval_all();
 819		return;
 820	}
 821	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
 822		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 823		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
 824		      crw0->erc, crw0->rsid);
 825	if (crw1)
 826		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
 827			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 828			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
 829			      crw1->anc, crw1->erc, crw1->rsid);
 830	init_subchannel_id(&mchk_schid);
 831	mchk_schid.sch_no = crw0->rsid;
 832	if (crw1)
 833		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
 834
 835	if (crw0->erc == CRW_ERC_PMOD) {
 836		sch = get_subchannel_by_schid(mchk_schid);
 837		if (sch) {
 838			css_update_ssd_info(sch);
 839			put_device(&sch->dev);
 840		}
 841	}
 842	/*
 843	 * Since we are always presented with IPI in the CRW, we have to
 844	 * use stsch() to find out if the subchannel in question has come
 845	 * or gone.
 846	 */
 847	css_evaluate_subchannel(mchk_schid, 0);
 848}
 849
 850static void __init
 851css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
 852{
 853	struct cpuid cpu_id;
 854
 855	if (css_general_characteristics.mcss) {
 856		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
 857		css->global_pgid.pgid_high.ext_cssid.cssid =
 858			css->id_valid ? css->cssid : 0;
 859	} else {
 860		css->global_pgid.pgid_high.cpu_addr = stap();
 861	}
 862	get_cpu_id(&cpu_id);
 863	css->global_pgid.cpu_id = cpu_id.ident;
 864	css->global_pgid.cpu_model = cpu_id.machine;
 865	css->global_pgid.tod_high = tod_high;
 866}
 867
 868static void channel_subsystem_release(struct device *dev)
 869{
 870	struct channel_subsystem *css = to_css(dev);
 871
 872	mutex_destroy(&css->mutex);
 873	kfree(css);
 874}
 875
 876static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
 877			       char *buf)
 878{
 879	struct channel_subsystem *css = to_css(dev);
 880
 881	if (!css->id_valid)
 882		return -EINVAL;
 883
 884	return sprintf(buf, "%x\n", css->cssid);
 885}
 886static DEVICE_ATTR_RO(real_cssid);
 887
 888static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
 889			    const char *buf, size_t count)
 890{
 891	CIO_TRACE_EVENT(4, "usr-rescan");
 892
 893	css_schedule_eval_all();
 894	css_complete_work();
 895
 896	return count;
 897}
 898static DEVICE_ATTR_WO(rescan);
 899
 900static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
 901			      char *buf)
 902{
 903	struct channel_subsystem *css = to_css(dev);
 904	int ret;
 905
 906	mutex_lock(&css->mutex);
 907	ret = sprintf(buf, "%x\n", css->cm_enabled);
 908	mutex_unlock(&css->mutex);
 909	return ret;
 910}
 911
 912static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
 913			       const char *buf, size_t count)
 914{
 915	struct channel_subsystem *css = to_css(dev);
 916	unsigned long val;
 917	int ret;
 918
 919	ret = kstrtoul(buf, 16, &val);
 920	if (ret)
 921		return ret;
 922	mutex_lock(&css->mutex);
 923	switch (val) {
 924	case 0:
 925		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
 926		break;
 927	case 1:
 928		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
 929		break;
 930	default:
 931		ret = -EINVAL;
 932	}
 933	mutex_unlock(&css->mutex);
 934	return ret < 0 ? ret : count;
 935}
 936static DEVICE_ATTR_RW(cm_enable);
 937
 938static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
 939			      int index)
 940{
 941	return css_chsc_characteristics.secm ? attr->mode : 0;
 942}
 943
 944static struct attribute *cssdev_attrs[] = {
 945	&dev_attr_real_cssid.attr,
 946	&dev_attr_rescan.attr,
 947	NULL,
 948};
 949
 950static struct attribute_group cssdev_attr_group = {
 951	.attrs = cssdev_attrs,
 952};
 953
 954static struct attribute *cssdev_cm_attrs[] = {
 955	&dev_attr_cm_enable.attr,
 956	NULL,
 957};
 958
 959static struct attribute_group cssdev_cm_attr_group = {
 960	.attrs = cssdev_cm_attrs,
 961	.is_visible = cm_enable_mode,
 962};
 963
 964static const struct attribute_group *cssdev_attr_groups[] = {
 965	&cssdev_attr_group,
 966	&cssdev_cm_attr_group,
 967	NULL,
 968};
 969
 970static int __init setup_css(int nr)
 971{
 972	struct channel_subsystem *css;
 973	int ret;
 974
 975	css = kzalloc(sizeof(*css), GFP_KERNEL);
 976	if (!css)
 977		return -ENOMEM;
 978
 979	channel_subsystems[nr] = css;
 980	dev_set_name(&css->device, "css%x", nr);
 981	css->device.groups = cssdev_attr_groups;
 982	css->device.release = channel_subsystem_release;
 983	/*
 984	 * We currently allocate notifier bits with this (using
 985	 * css->device as the device argument with the DMA API)
 986	 * and are fine with 64 bit addresses.
 987	 */
 988	ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
 989	if (ret) {
 990		kfree(css);
 991		goto out_err;
 992	}
 993
 994	mutex_init(&css->mutex);
 995	ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
 996	if (!ret) {
 997		css->id_valid = true;
 998		pr_info("Partition identifier %01x.%01x\n", css->cssid,
 999			css->iid);
1000	}
1001	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1002
1003	ret = device_register(&css->device);
1004	if (ret) {
1005		put_device(&css->device);
1006		goto out_err;
1007	}
1008
1009	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1010					 GFP_KERNEL);
1011	if (!css->pseudo_subchannel) {
1012		device_unregister(&css->device);
1013		ret = -ENOMEM;
1014		goto out_err;
1015	}
1016
1017	css->pseudo_subchannel->dev.parent = &css->device;
1018	css->pseudo_subchannel->dev.release = css_subchannel_release;
1019	mutex_init(&css->pseudo_subchannel->reg_mutex);
1020	css_sch_create_locks(css->pseudo_subchannel);
 
 
 
 
 
1021
1022	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1023	ret = device_register(&css->pseudo_subchannel->dev);
1024	if (ret) {
1025		put_device(&css->pseudo_subchannel->dev);
1026		device_unregister(&css->device);
1027		goto out_err;
1028	}
1029
1030	return ret;
1031out_err:
1032	channel_subsystems[nr] = NULL;
1033	return ret;
1034}
1035
1036static int css_reboot_event(struct notifier_block *this,
1037			    unsigned long event,
1038			    void *ptr)
1039{
1040	struct channel_subsystem *css;
1041	int ret;
1042
1043	ret = NOTIFY_DONE;
1044	for_each_css(css) {
1045		mutex_lock(&css->mutex);
1046		if (css->cm_enabled)
1047			if (chsc_secm(css, 0))
1048				ret = NOTIFY_BAD;
1049		mutex_unlock(&css->mutex);
1050	}
1051
1052	return ret;
1053}
1054
1055static struct notifier_block css_reboot_notifier = {
1056	.notifier_call = css_reboot_event,
1057};
1058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059#define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1060static struct gen_pool *cio_dma_pool;
1061
1062/* Currently cio supports only a single css */
1063struct device *cio_get_dma_css_dev(void)
1064{
1065	return &channel_subsystems[0]->device;
1066}
1067
1068struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1069{
1070	struct gen_pool *gp_dma;
1071	void *cpu_addr;
1072	dma_addr_t dma_addr;
1073	int i;
1074
1075	gp_dma = gen_pool_create(3, -1);
1076	if (!gp_dma)
1077		return NULL;
1078	for (i = 0; i < nr_pages; ++i) {
1079		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1080					      CIO_DMA_GFP);
1081		if (!cpu_addr)
1082			return gp_dma;
1083		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1084				  dma_addr, PAGE_SIZE, -1);
1085	}
1086	return gp_dma;
1087}
1088
1089static void __gp_dma_free_dma(struct gen_pool *pool,
1090			      struct gen_pool_chunk *chunk, void *data)
1091{
1092	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1093
1094	dma_free_coherent((struct device *) data, chunk_size,
1095			 (void *) chunk->start_addr,
1096			 (dma_addr_t) chunk->phys_addr);
1097}
1098
1099void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1100{
1101	if (!gp_dma)
1102		return;
1103	/* this is quite ugly but no better idea */
1104	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1105	gen_pool_destroy(gp_dma);
1106}
1107
1108static int cio_dma_pool_init(void)
1109{
1110	/* No need to free up the resources: compiled in */
1111	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1112	if (!cio_dma_pool)
1113		return -ENOMEM;
1114	return 0;
1115}
1116
1117void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1118			size_t size)
1119{
1120	dma_addr_t dma_addr;
1121	unsigned long addr;
1122	size_t chunk_size;
1123
1124	if (!gp_dma)
1125		return NULL;
1126	addr = gen_pool_alloc(gp_dma, size);
1127	while (!addr) {
1128		chunk_size = round_up(size, PAGE_SIZE);
1129		addr = (unsigned long) dma_alloc_coherent(dma_dev,
1130					 chunk_size, &dma_addr, CIO_DMA_GFP);
1131		if (!addr)
1132			return NULL;
1133		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1134		addr = gen_pool_alloc(gp_dma, size);
1135	}
1136	return (void *) addr;
1137}
1138
1139void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1140{
1141	if (!cpu_addr)
1142		return;
1143	memset(cpu_addr, 0, size);
1144	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1145}
1146
1147/*
1148 * Allocate dma memory from the css global pool. Intended for memory not
1149 * specific to any single device within the css. The allocated memory
1150 * is not guaranteed to be 31-bit addressable.
1151 *
1152 * Caution: Not suitable for early stuff like console.
1153 */
1154void *cio_dma_zalloc(size_t size)
1155{
1156	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1157}
1158
1159void cio_dma_free(void *cpu_addr, size_t size)
1160{
1161	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1162}
1163
1164/*
1165 * Now that the driver core is running, we can setup our channel subsystem.
1166 * The struct subchannel's are created during probing.
1167 */
1168static int __init css_bus_init(void)
1169{
1170	int ret, i;
1171
1172	ret = chsc_init();
1173	if (ret)
1174		return ret;
1175
1176	chsc_determine_css_characteristics();
1177	/* Try to enable MSS. */
1178	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1179	if (ret)
1180		max_ssid = 0;
1181	else /* Success. */
1182		max_ssid = __MAX_SSID;
1183
1184	ret = slow_subchannel_init();
1185	if (ret)
1186		goto out;
1187
1188	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1189	if (ret)
1190		goto out;
1191
1192	if ((ret = bus_register(&css_bus_type)))
1193		goto out;
1194
1195	/* Setup css structure. */
1196	for (i = 0; i <= MAX_CSS_IDX; i++) {
1197		ret = setup_css(i);
1198		if (ret)
1199			goto out_unregister;
1200	}
1201	ret = register_reboot_notifier(&css_reboot_notifier);
1202	if (ret)
1203		goto out_unregister;
1204	ret = cio_dma_pool_init();
1205	if (ret)
1206		goto out_unregister_rn;
 
 
 
1207	airq_init();
1208	css_init_done = 1;
1209
1210	/* Enable default isc for I/O subchannels. */
1211	isc_register(IO_SCH_ISC);
1212
1213	return 0;
 
 
1214out_unregister_rn:
1215	unregister_reboot_notifier(&css_reboot_notifier);
1216out_unregister:
1217	while (i-- > 0) {
1218		struct channel_subsystem *css = channel_subsystems[i];
1219		device_unregister(&css->pseudo_subchannel->dev);
1220		device_unregister(&css->device);
1221	}
1222	bus_unregister(&css_bus_type);
1223out:
1224	crw_unregister_handler(CRW_RSC_SCH);
1225	idset_free(slow_subchannel_set);
1226	chsc_init_cleanup();
1227	pr_alert("The CSS device driver initialization failed with "
1228		 "errno=%d\n", ret);
1229	return ret;
1230}
1231
1232static void __init css_bus_cleanup(void)
1233{
1234	struct channel_subsystem *css;
1235
1236	for_each_css(css) {
1237		device_unregister(&css->pseudo_subchannel->dev);
1238		device_unregister(&css->device);
1239	}
1240	bus_unregister(&css_bus_type);
1241	crw_unregister_handler(CRW_RSC_SCH);
1242	idset_free(slow_subchannel_set);
1243	chsc_init_cleanup();
1244	isc_unregister(IO_SCH_ISC);
1245}
1246
1247static int __init channel_subsystem_init(void)
1248{
1249	int ret;
1250
1251	ret = css_bus_init();
1252	if (ret)
1253		return ret;
1254	cio_work_q = create_singlethread_workqueue("cio");
1255	if (!cio_work_q) {
1256		ret = -ENOMEM;
1257		goto out_bus;
1258	}
1259	ret = io_subchannel_init();
1260	if (ret)
1261		goto out_wq;
1262
1263	/* Register subchannels which are already in use. */
1264	cio_register_early_subchannels();
1265	/* Start initial subchannel evaluation. */
1266	css_schedule_eval_all();
1267
1268	return ret;
1269out_wq:
1270	destroy_workqueue(cio_work_q);
1271out_bus:
1272	css_bus_cleanup();
1273	return ret;
1274}
1275subsys_initcall(channel_subsystem_init);
1276
1277static int css_settle(struct device_driver *drv, void *unused)
1278{
1279	struct css_driver *cssdrv = to_cssdriver(drv);
1280
1281	if (cssdrv->settle)
1282		return cssdrv->settle();
1283	return 0;
1284}
1285
1286int css_complete_work(void)
1287{
1288	int ret;
1289
1290	/* Wait for the evaluation of subchannels to finish. */
1291	ret = wait_event_interruptible(css_eval_wq,
1292				       atomic_read(&css_eval_scheduled) == 0);
1293	if (ret)
1294		return -EINTR;
1295	flush_workqueue(cio_work_q);
1296	/* Wait for the subchannel type specific initialization to finish */
1297	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1298}
1299
1300
1301/*
1302 * Wait for the initialization of devices to finish, to make sure we are
1303 * done with our setup if the search for the root device starts.
1304 */
1305static int __init channel_subsystem_init_sync(void)
1306{
1307	css_complete_work();
1308	return 0;
1309}
1310subsys_initcall_sync(channel_subsystem_init_sync);
1311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312#ifdef CONFIG_PROC_FS
1313static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1314				size_t count, loff_t *ppos)
1315{
1316	int ret;
1317
1318	/* Handle pending CRW's. */
1319	crw_wait_for_channel_report();
1320	ret = css_complete_work();
1321
1322	return ret ? ret : count;
1323}
1324
1325static const struct proc_ops cio_settle_proc_ops = {
1326	.proc_open	= nonseekable_open,
1327	.proc_write	= cio_settle_write,
1328	.proc_lseek	= no_llseek,
1329};
1330
1331static int __init cio_settle_init(void)
1332{
1333	struct proc_dir_entry *entry;
1334
1335	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
 
1336	if (!entry)
1337		return -ENOMEM;
1338	return 0;
1339}
1340device_initcall(cio_settle_init);
1341#endif /*CONFIG_PROC_FS*/
1342
1343int sch_is_pseudo_sch(struct subchannel *sch)
1344{
1345	if (!sch->dev.parent)
1346		return 0;
1347	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1348}
1349
1350static int css_bus_match(struct device *dev, struct device_driver *drv)
1351{
1352	struct subchannel *sch = to_subchannel(dev);
1353	struct css_driver *driver = to_cssdriver(drv);
1354	struct css_device_id *id;
1355
1356	/* When driver_override is set, only bind to the matching driver */
1357	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1358		return 0;
1359
1360	for (id = driver->subchannel_type; id->match_flags; id++) {
1361		if (sch->st == id->type)
1362			return 1;
1363	}
1364
1365	return 0;
1366}
1367
1368static int css_probe(struct device *dev)
1369{
1370	struct subchannel *sch;
1371	int ret;
1372
1373	sch = to_subchannel(dev);
1374	sch->driver = to_cssdriver(dev->driver);
1375	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1376	if (ret)
1377		sch->driver = NULL;
1378	return ret;
1379}
1380
1381static void css_remove(struct device *dev)
1382{
1383	struct subchannel *sch;
 
1384
1385	sch = to_subchannel(dev);
1386	if (sch->driver->remove)
1387		sch->driver->remove(sch);
1388	sch->driver = NULL;
 
1389}
1390
1391static void css_shutdown(struct device *dev)
1392{
1393	struct subchannel *sch;
1394
1395	sch = to_subchannel(dev);
1396	if (sch->driver && sch->driver->shutdown)
1397		sch->driver->shutdown(sch);
1398}
1399
1400static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
1401{
1402	const struct subchannel *sch = to_subchannel(dev);
1403	int ret;
1404
1405	ret = add_uevent_var(env, "ST=%01X", sch->st);
1406	if (ret)
1407		return ret;
1408	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1409	return ret;
1410}
1411
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412static struct bus_type css_bus_type = {
1413	.name     = "css",
1414	.match    = css_bus_match,
1415	.probe    = css_probe,
1416	.remove   = css_remove,
1417	.shutdown = css_shutdown,
1418	.uevent   = css_uevent,
 
1419};
1420
1421/**
1422 * css_driver_register - register a css driver
1423 * @cdrv: css driver to register
1424 *
1425 * This is mainly a wrapper around driver_register that sets name
1426 * and bus_type in the embedded struct device_driver correctly.
1427 */
1428int css_driver_register(struct css_driver *cdrv)
1429{
1430	cdrv->drv.bus = &css_bus_type;
1431	return driver_register(&cdrv->drv);
1432}
1433EXPORT_SYMBOL_GPL(css_driver_register);
1434
1435/**
1436 * css_driver_unregister - unregister a css driver
1437 * @cdrv: css driver to unregister
1438 *
1439 * This is a wrapper around driver_unregister.
1440 */
1441void css_driver_unregister(struct css_driver *cdrv)
1442{
1443	driver_unregister(&cdrv->drv);
1444}
1445EXPORT_SYMBOL_GPL(css_driver_unregister);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * driver for channel subsystem
   4 *
   5 * Copyright IBM Corp. 2002, 2010
   6 *
   7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
   8 *	      Cornelia Huck (cornelia.huck@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/device.h>
  17#include <linux/slab.h>
  18#include <linux/errno.h>
  19#include <linux/list.h>
  20#include <linux/reboot.h>
  21#include <linux/suspend.h>
  22#include <linux/proc_fs.h>
  23#include <linux/genalloc.h>
  24#include <linux/dma-mapping.h>
  25#include <asm/isc.h>
  26#include <asm/crw.h>
  27
  28#include "css.h"
  29#include "cio.h"
  30#include "blacklist.h"
  31#include "cio_debug.h"
  32#include "ioasm.h"
  33#include "chsc.h"
  34#include "device.h"
  35#include "idset.h"
  36#include "chp.h"
  37
  38int css_init_done = 0;
  39int max_ssid;
  40
  41#define MAX_CSS_IDX 0
  42struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
  43static struct bus_type css_bus_type;
  44
  45int
  46for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  47{
  48	struct subchannel_id schid;
  49	int ret;
  50
  51	init_subchannel_id(&schid);
  52	do {
  53		do {
  54			ret = fn(schid, data);
  55			if (ret)
  56				break;
  57		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
  58		schid.sch_no = 0;
  59	} while (schid.ssid++ < max_ssid);
  60	return ret;
  61}
  62
  63struct cb_data {
  64	void *data;
  65	struct idset *set;
  66	int (*fn_known_sch)(struct subchannel *, void *);
  67	int (*fn_unknown_sch)(struct subchannel_id, void *);
  68};
  69
  70static int call_fn_known_sch(struct device *dev, void *data)
  71{
  72	struct subchannel *sch = to_subchannel(dev);
  73	struct cb_data *cb = data;
  74	int rc = 0;
  75
  76	if (cb->set)
  77		idset_sch_del(cb->set, sch->schid);
  78	if (cb->fn_known_sch)
  79		rc = cb->fn_known_sch(sch, cb->data);
  80	return rc;
  81}
  82
  83static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  84{
  85	struct cb_data *cb = data;
  86	int rc = 0;
  87
  88	if (idset_sch_contains(cb->set, schid))
  89		rc = cb->fn_unknown_sch(schid, cb->data);
  90	return rc;
  91}
  92
  93static int call_fn_all_sch(struct subchannel_id schid, void *data)
  94{
  95	struct cb_data *cb = data;
  96	struct subchannel *sch;
  97	int rc = 0;
  98
  99	sch = get_subchannel_by_schid(schid);
 100	if (sch) {
 101		if (cb->fn_known_sch)
 102			rc = cb->fn_known_sch(sch, cb->data);
 103		put_device(&sch->dev);
 104	} else {
 105		if (cb->fn_unknown_sch)
 106			rc = cb->fn_unknown_sch(schid, cb->data);
 107	}
 108
 109	return rc;
 110}
 111
 112int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
 113			       int (*fn_unknown)(struct subchannel_id,
 114			       void *), void *data)
 115{
 116	struct cb_data cb;
 117	int rc;
 118
 119	cb.data = data;
 120	cb.fn_known_sch = fn_known;
 121	cb.fn_unknown_sch = fn_unknown;
 122
 123	if (fn_known && !fn_unknown) {
 124		/* Skip idset allocation in case of known-only loop. */
 125		cb.set = NULL;
 126		return bus_for_each_dev(&css_bus_type, NULL, &cb,
 127					call_fn_known_sch);
 128	}
 129
 130	cb.set = idset_sch_new();
 131	if (!cb.set)
 132		/* fall back to brute force scanning in case of oom */
 133		return for_each_subchannel(call_fn_all_sch, &cb);
 134
 135	idset_fill(cb.set);
 136
 137	/* Process registered subchannels. */
 138	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
 139	if (rc)
 140		goto out;
 141	/* Process unregistered subchannels. */
 142	if (fn_unknown)
 143		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
 144out:
 145	idset_free(cb.set);
 146
 147	return rc;
 148}
 149
 150static void css_sch_todo(struct work_struct *work);
 151
 152static int css_sch_create_locks(struct subchannel *sch)
 153{
 154	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
 155	if (!sch->lock)
 156		return -ENOMEM;
 157
 158	spin_lock_init(sch->lock);
 159	mutex_init(&sch->reg_mutex);
 160
 161	return 0;
 162}
 163
 164static void css_subchannel_release(struct device *dev)
 165{
 166	struct subchannel *sch = to_subchannel(dev);
 167
 168	sch->config.intparm = 0;
 169	cio_commit_config(sch);
 170	kfree(sch->driver_override);
 171	kfree(sch->lock);
 172	kfree(sch);
 173}
 174
 175static int css_validate_subchannel(struct subchannel_id schid,
 176				   struct schib *schib)
 177{
 178	int err;
 179
 180	switch (schib->pmcw.st) {
 181	case SUBCHANNEL_TYPE_IO:
 182	case SUBCHANNEL_TYPE_MSG:
 183		if (!css_sch_is_valid(schib))
 184			err = -ENODEV;
 185		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
 186			CIO_MSG_EVENT(6, "Blacklisted device detected "
 187				      "at devno %04X, subchannel set %x\n",
 188				      schib->pmcw.dev, schid.ssid);
 189			err = -ENODEV;
 190		} else
 191			err = 0;
 192		break;
 193	default:
 194		err = 0;
 195	}
 196	if (err)
 197		goto out;
 198
 199	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
 200		      schid.ssid, schid.sch_no, schib->pmcw.st);
 201out:
 202	return err;
 203}
 204
 205struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
 206					struct schib *schib)
 207{
 208	struct subchannel *sch;
 209	int ret;
 210
 211	ret = css_validate_subchannel(schid, schib);
 212	if (ret < 0)
 213		return ERR_PTR(ret);
 214
 215	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
 216	if (!sch)
 217		return ERR_PTR(-ENOMEM);
 218
 219	sch->schid = schid;
 220	sch->schib = *schib;
 221	sch->st = schib->pmcw.st;
 222
 223	ret = css_sch_create_locks(sch);
 224	if (ret)
 225		goto err;
 226
 227	INIT_WORK(&sch->todo_work, css_sch_todo);
 228	sch->dev.release = &css_subchannel_release;
 
 229	device_initialize(&sch->dev);
 230	/*
 231	 * The physical addresses of some the dma structures that can
 232	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
 233	 */
 234	sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
 
 
 235	/*
 236	 * But we don't have such restrictions imposed on the stuff that
 237	 * is handled by the streaming API.
 238	 */
 239	sch->dma_mask = DMA_BIT_MASK(64);
 240	sch->dev.dma_mask = &sch->dma_mask;
 
 
 241	return sch;
 242
 243err:
 244	kfree(sch);
 245	return ERR_PTR(ret);
 246}
 247
 248static int css_sch_device_register(struct subchannel *sch)
 249{
 250	int ret;
 251
 252	mutex_lock(&sch->reg_mutex);
 253	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
 254		     sch->schid.sch_no);
 255	ret = device_add(&sch->dev);
 256	mutex_unlock(&sch->reg_mutex);
 257	return ret;
 258}
 259
 260/**
 261 * css_sch_device_unregister - unregister a subchannel
 262 * @sch: subchannel to be unregistered
 263 */
 264void css_sch_device_unregister(struct subchannel *sch)
 265{
 266	mutex_lock(&sch->reg_mutex);
 267	if (device_is_registered(&sch->dev))
 268		device_unregister(&sch->dev);
 269	mutex_unlock(&sch->reg_mutex);
 270}
 271EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 272
 273static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 274{
 275	int i;
 276	int mask;
 277
 278	memset(ssd, 0, sizeof(struct chsc_ssd_info));
 279	ssd->path_mask = pmcw->pim;
 280	for (i = 0; i < 8; i++) {
 281		mask = 0x80 >> i;
 282		if (pmcw->pim & mask) {
 283			chp_id_init(&ssd->chpid[i]);
 284			ssd->chpid[i].id = pmcw->chpid[i];
 285		}
 286	}
 287}
 288
 289static void ssd_register_chpids(struct chsc_ssd_info *ssd)
 290{
 291	int i;
 292	int mask;
 293
 294	for (i = 0; i < 8; i++) {
 295		mask = 0x80 >> i;
 296		if (ssd->path_mask & mask)
 297			chp_new(ssd->chpid[i]);
 298	}
 299}
 300
 301void css_update_ssd_info(struct subchannel *sch)
 302{
 303	int ret;
 304
 305	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
 306	if (ret)
 307		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
 308
 309	ssd_register_chpids(&sch->ssd_info);
 310}
 311
 312static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 313			 char *buf)
 314{
 315	struct subchannel *sch = to_subchannel(dev);
 316
 317	return sprintf(buf, "%01x\n", sch->st);
 318}
 319
 320static DEVICE_ATTR_RO(type);
 321
 322static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 323			     char *buf)
 324{
 325	struct subchannel *sch = to_subchannel(dev);
 326
 327	return sprintf(buf, "css:t%01X\n", sch->st);
 328}
 329
 330static DEVICE_ATTR_RO(modalias);
 331
 332static ssize_t driver_override_store(struct device *dev,
 333				     struct device_attribute *attr,
 334				     const char *buf, size_t count)
 335{
 336	struct subchannel *sch = to_subchannel(dev);
 337	char *driver_override, *old, *cp;
 338
 339	/* We need to keep extra room for a newline */
 340	if (count >= (PAGE_SIZE - 1))
 341		return -EINVAL;
 342
 343	driver_override = kstrndup(buf, count, GFP_KERNEL);
 344	if (!driver_override)
 345		return -ENOMEM;
 346
 347	cp = strchr(driver_override, '\n');
 348	if (cp)
 349		*cp = '\0';
 350
 351	device_lock(dev);
 352	old = sch->driver_override;
 353	if (strlen(driver_override)) {
 354		sch->driver_override = driver_override;
 355	} else {
 356		kfree(driver_override);
 357		sch->driver_override = NULL;
 358	}
 359	device_unlock(dev);
 360
 361	kfree(old);
 362
 363	return count;
 364}
 365
 366static ssize_t driver_override_show(struct device *dev,
 367				    struct device_attribute *attr, char *buf)
 368{
 369	struct subchannel *sch = to_subchannel(dev);
 370	ssize_t len;
 371
 372	device_lock(dev);
 373	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
 374	device_unlock(dev);
 375	return len;
 376}
 377static DEVICE_ATTR_RW(driver_override);
 378
 379static struct attribute *subch_attrs[] = {
 380	&dev_attr_type.attr,
 381	&dev_attr_modalias.attr,
 382	&dev_attr_driver_override.attr,
 383	NULL,
 384};
 385
 386static struct attribute_group subch_attr_group = {
 387	.attrs = subch_attrs,
 388};
 389
 390static const struct attribute_group *default_subch_attr_groups[] = {
 391	&subch_attr_group,
 392	NULL,
 393};
 394
 395static ssize_t chpids_show(struct device *dev,
 396			   struct device_attribute *attr,
 397			   char *buf)
 398{
 399	struct subchannel *sch = to_subchannel(dev);
 400	struct chsc_ssd_info *ssd = &sch->ssd_info;
 401	ssize_t ret = 0;
 402	int mask;
 403	int chp;
 404
 405	for (chp = 0; chp < 8; chp++) {
 406		mask = 0x80 >> chp;
 407		if (ssd->path_mask & mask)
 408			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
 409		else
 410			ret += sprintf(buf + ret, "00 ");
 411	}
 412	ret += sprintf(buf + ret, "\n");
 413	return ret;
 414}
 415static DEVICE_ATTR_RO(chpids);
 416
 417static ssize_t pimpampom_show(struct device *dev,
 418			      struct device_attribute *attr,
 419			      char *buf)
 420{
 421	struct subchannel *sch = to_subchannel(dev);
 422	struct pmcw *pmcw = &sch->schib.pmcw;
 423
 424	return sprintf(buf, "%02x %02x %02x\n",
 425		       pmcw->pim, pmcw->pam, pmcw->pom);
 426}
 427static DEVICE_ATTR_RO(pimpampom);
 428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429static struct attribute *io_subchannel_type_attrs[] = {
 430	&dev_attr_chpids.attr,
 431	&dev_attr_pimpampom.attr,
 
 432	NULL,
 433};
 434ATTRIBUTE_GROUPS(io_subchannel_type);
 435
 436static const struct device_type io_subchannel_type = {
 437	.groups = io_subchannel_type_groups,
 438};
 439
 440int css_register_subchannel(struct subchannel *sch)
 441{
 442	int ret;
 443
 444	/* Initialize the subchannel structure */
 445	sch->dev.parent = &channel_subsystems[0]->device;
 446	sch->dev.bus = &css_bus_type;
 447	sch->dev.groups = default_subch_attr_groups;
 448
 449	if (sch->st == SUBCHANNEL_TYPE_IO)
 450		sch->dev.type = &io_subchannel_type;
 451
 452	/*
 453	 * We don't want to generate uevents for I/O subchannels that don't
 454	 * have a working ccw device behind them since they will be
 455	 * unregistered before they can be used anyway, so we delay the add
 456	 * uevent until after device recognition was successful.
 457	 * Note that we suppress the uevent for all subchannel types;
 458	 * the subchannel driver can decide itself when it wants to inform
 459	 * userspace of its existence.
 460	 */
 461	dev_set_uevent_suppress(&sch->dev, 1);
 462	css_update_ssd_info(sch);
 463	/* make it known to the system */
 464	ret = css_sch_device_register(sch);
 465	if (ret) {
 466		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
 467			      sch->schid.ssid, sch->schid.sch_no, ret);
 468		return ret;
 469	}
 470	if (!sch->driver) {
 471		/*
 472		 * No driver matched. Generate the uevent now so that
 473		 * a fitting driver module may be loaded based on the
 474		 * modalias.
 475		 */
 476		dev_set_uevent_suppress(&sch->dev, 0);
 477		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 478	}
 479	return ret;
 480}
 481
 482static int css_probe_device(struct subchannel_id schid, struct schib *schib)
 483{
 484	struct subchannel *sch;
 485	int ret;
 486
 487	sch = css_alloc_subchannel(schid, schib);
 488	if (IS_ERR(sch))
 489		return PTR_ERR(sch);
 490
 491	ret = css_register_subchannel(sch);
 492	if (ret)
 493		put_device(&sch->dev);
 494
 495	return ret;
 496}
 497
 498static int
 499check_subchannel(struct device *dev, const void *data)
 500{
 501	struct subchannel *sch;
 502	struct subchannel_id *schid = (void *)data;
 503
 504	sch = to_subchannel(dev);
 505	return schid_equal(&sch->schid, schid);
 506}
 507
 508struct subchannel *
 509get_subchannel_by_schid(struct subchannel_id schid)
 510{
 511	struct device *dev;
 512
 513	dev = bus_find_device(&css_bus_type, NULL,
 514			      &schid, check_subchannel);
 515
 516	return dev ? to_subchannel(dev) : NULL;
 517}
 518
 519/**
 520 * css_sch_is_valid() - check if a subchannel is valid
 521 * @schib: subchannel information block for the subchannel
 522 */
 523int css_sch_is_valid(struct schib *schib)
 524{
 525	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
 526		return 0;
 527	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
 528		return 0;
 529	return 1;
 530}
 531EXPORT_SYMBOL_GPL(css_sch_is_valid);
 532
 533static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
 534{
 535	struct schib schib;
 536	int ccode;
 537
 538	if (!slow) {
 539		/* Will be done on the slow path. */
 540		return -EAGAIN;
 541	}
 542	/*
 543	 * The first subchannel that is not-operational (ccode==3)
 544	 * indicates that there aren't any more devices available.
 545	 * If stsch gets an exception, it means the current subchannel set
 546	 * is not valid.
 547	 */
 548	ccode = stsch(schid, &schib);
 549	if (ccode)
 550		return (ccode == 3) ? -ENXIO : ccode;
 551
 552	return css_probe_device(schid, &schib);
 553}
 554
 555static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
 556{
 557	int ret = 0;
 558
 559	if (sch->driver) {
 560		if (sch->driver->sch_event)
 561			ret = sch->driver->sch_event(sch, slow);
 562		else
 563			dev_dbg(&sch->dev,
 564				"Got subchannel machine check but "
 565				"no sch_event handler provided.\n");
 566	}
 567	if (ret != 0 && ret != -EAGAIN) {
 568		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
 569			      sch->schid.ssid, sch->schid.sch_no, ret);
 570	}
 571	return ret;
 572}
 573
 574static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
 575{
 576	struct subchannel *sch;
 577	int ret;
 578
 579	sch = get_subchannel_by_schid(schid);
 580	if (sch) {
 581		ret = css_evaluate_known_subchannel(sch, slow);
 582		put_device(&sch->dev);
 583	} else
 584		ret = css_evaluate_new_subchannel(schid, slow);
 585	if (ret == -EAGAIN)
 586		css_schedule_eval(schid);
 587}
 588
 589/**
 590 * css_sched_sch_todo - schedule a subchannel operation
 591 * @sch: subchannel
 592 * @todo: todo
 593 *
 594 * Schedule the operation identified by @todo to be performed on the slow path
 595 * workqueue. Do nothing if another operation with higher priority is already
 596 * scheduled. Needs to be called with subchannel lock held.
 597 */
 598void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
 599{
 600	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
 601		      sch->schid.ssid, sch->schid.sch_no, todo);
 602	if (sch->todo >= todo)
 603		return;
 604	/* Get workqueue ref. */
 605	if (!get_device(&sch->dev))
 606		return;
 607	sch->todo = todo;
 608	if (!queue_work(cio_work_q, &sch->todo_work)) {
 609		/* Already queued, release workqueue ref. */
 610		put_device(&sch->dev);
 611	}
 612}
 613EXPORT_SYMBOL_GPL(css_sched_sch_todo);
 614
 615static void css_sch_todo(struct work_struct *work)
 616{
 617	struct subchannel *sch;
 618	enum sch_todo todo;
 619	int ret;
 620
 621	sch = container_of(work, struct subchannel, todo_work);
 622	/* Find out todo. */
 623	spin_lock_irq(sch->lock);
 624	todo = sch->todo;
 625	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
 626		      sch->schid.sch_no, todo);
 627	sch->todo = SCH_TODO_NOTHING;
 628	spin_unlock_irq(sch->lock);
 629	/* Perform todo. */
 630	switch (todo) {
 631	case SCH_TODO_NOTHING:
 632		break;
 633	case SCH_TODO_EVAL:
 634		ret = css_evaluate_known_subchannel(sch, 1);
 635		if (ret == -EAGAIN) {
 636			spin_lock_irq(sch->lock);
 637			css_sched_sch_todo(sch, todo);
 638			spin_unlock_irq(sch->lock);
 639		}
 640		break;
 641	case SCH_TODO_UNREG:
 642		css_sch_device_unregister(sch);
 643		break;
 644	}
 645	/* Release workqueue ref. */
 646	put_device(&sch->dev);
 647}
 648
 649static struct idset *slow_subchannel_set;
 650static spinlock_t slow_subchannel_lock;
 651static wait_queue_head_t css_eval_wq;
 652static atomic_t css_eval_scheduled;
 653
 654static int __init slow_subchannel_init(void)
 655{
 656	spin_lock_init(&slow_subchannel_lock);
 657	atomic_set(&css_eval_scheduled, 0);
 658	init_waitqueue_head(&css_eval_wq);
 659	slow_subchannel_set = idset_sch_new();
 660	if (!slow_subchannel_set) {
 661		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
 662		return -ENOMEM;
 663	}
 664	return 0;
 665}
 666
 667static int slow_eval_known_fn(struct subchannel *sch, void *data)
 668{
 669	int eval;
 670	int rc;
 671
 672	spin_lock_irq(&slow_subchannel_lock);
 673	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
 674	idset_sch_del(slow_subchannel_set, sch->schid);
 675	spin_unlock_irq(&slow_subchannel_lock);
 676	if (eval) {
 677		rc = css_evaluate_known_subchannel(sch, 1);
 678		if (rc == -EAGAIN)
 679			css_schedule_eval(sch->schid);
 
 
 
 
 
 680	}
 681	return 0;
 682}
 683
 684static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
 685{
 686	int eval;
 687	int rc = 0;
 688
 689	spin_lock_irq(&slow_subchannel_lock);
 690	eval = idset_sch_contains(slow_subchannel_set, schid);
 691	idset_sch_del(slow_subchannel_set, schid);
 692	spin_unlock_irq(&slow_subchannel_lock);
 693	if (eval) {
 694		rc = css_evaluate_new_subchannel(schid, 1);
 695		switch (rc) {
 696		case -EAGAIN:
 697			css_schedule_eval(schid);
 698			rc = 0;
 699			break;
 700		case -ENXIO:
 701		case -ENOMEM:
 702		case -EIO:
 703			/* These should abort looping */
 704			spin_lock_irq(&slow_subchannel_lock);
 705			idset_sch_del_subseq(slow_subchannel_set, schid);
 706			spin_unlock_irq(&slow_subchannel_lock);
 707			break;
 708		default:
 709			rc = 0;
 710		}
 711		/* Allow scheduling here since the containing loop might
 712		 * take a while.  */
 713		cond_resched();
 714	}
 715	return rc;
 716}
 717
 718static void css_slow_path_func(struct work_struct *unused)
 719{
 720	unsigned long flags;
 721
 722	CIO_TRACE_EVENT(4, "slowpath");
 723	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
 724				   NULL);
 725	spin_lock_irqsave(&slow_subchannel_lock, flags);
 726	if (idset_is_empty(slow_subchannel_set)) {
 727		atomic_set(&css_eval_scheduled, 0);
 728		wake_up(&css_eval_wq);
 729	}
 730	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 731}
 732
 733static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
 734struct workqueue_struct *cio_work_q;
 735
 736void css_schedule_eval(struct subchannel_id schid)
 737{
 738	unsigned long flags;
 739
 740	spin_lock_irqsave(&slow_subchannel_lock, flags);
 741	idset_sch_add(slow_subchannel_set, schid);
 742	atomic_set(&css_eval_scheduled, 1);
 743	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 744	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 745}
 746
 747void css_schedule_eval_all(void)
 748{
 749	unsigned long flags;
 750
 751	spin_lock_irqsave(&slow_subchannel_lock, flags);
 752	idset_fill(slow_subchannel_set);
 753	atomic_set(&css_eval_scheduled, 1);
 754	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 755	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 756}
 757
 758static int __unset_registered(struct device *dev, void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 759{
 760	struct idset *set = data;
 761	struct subchannel *sch = to_subchannel(dev);
 762
 763	idset_sch_del(set, sch->schid);
 
 
 764	return 0;
 765}
 766
 767void css_schedule_eval_all_unreg(unsigned long delay)
 768{
 769	unsigned long flags;
 770	struct idset *unreg_set;
 771
 772	/* Find unregistered subchannels. */
 773	unreg_set = idset_sch_new();
 774	if (!unreg_set) {
 775		/* Fallback. */
 776		css_schedule_eval_all();
 777		return;
 778	}
 779	idset_fill(unreg_set);
 780	bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
 
 
 
 
 
 
 
 
 
 
 781	/* Apply to slow_subchannel_set. */
 782	spin_lock_irqsave(&slow_subchannel_lock, flags);
 783	idset_add_set(slow_subchannel_set, unreg_set);
 784	atomic_set(&css_eval_scheduled, 1);
 785	queue_delayed_work(cio_work_q, &slow_path_work, delay);
 786	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 787	idset_free(unreg_set);
 788}
 789
 790void css_wait_for_slow_path(void)
 791{
 792	flush_workqueue(cio_work_q);
 793}
 794
 795/* Schedule reprobing of all unregistered subchannels. */
 796void css_schedule_reprobe(void)
 797{
 798	/* Schedule with a delay to allow merging of subsequent calls. */
 799	css_schedule_eval_all_unreg(1 * HZ);
 800}
 801EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 802
 803/*
 804 * Called from the machine check handler for subchannel report words.
 805 */
 806static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 807{
 808	struct subchannel_id mchk_schid;
 809	struct subchannel *sch;
 810
 811	if (overflow) {
 812		css_schedule_eval_all();
 813		return;
 814	}
 815	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
 816		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 817		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
 818		      crw0->erc, crw0->rsid);
 819	if (crw1)
 820		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
 821			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 822			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
 823			      crw1->anc, crw1->erc, crw1->rsid);
 824	init_subchannel_id(&mchk_schid);
 825	mchk_schid.sch_no = crw0->rsid;
 826	if (crw1)
 827		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
 828
 829	if (crw0->erc == CRW_ERC_PMOD) {
 830		sch = get_subchannel_by_schid(mchk_schid);
 831		if (sch) {
 832			css_update_ssd_info(sch);
 833			put_device(&sch->dev);
 834		}
 835	}
 836	/*
 837	 * Since we are always presented with IPI in the CRW, we have to
 838	 * use stsch() to find out if the subchannel in question has come
 839	 * or gone.
 840	 */
 841	css_evaluate_subchannel(mchk_schid, 0);
 842}
 843
 844static void __init
 845css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
 846{
 847	struct cpuid cpu_id;
 848
 849	if (css_general_characteristics.mcss) {
 850		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
 851		css->global_pgid.pgid_high.ext_cssid.cssid =
 852			(css->cssid < 0) ? 0 : css->cssid;
 853	} else {
 854		css->global_pgid.pgid_high.cpu_addr = stap();
 855	}
 856	get_cpu_id(&cpu_id);
 857	css->global_pgid.cpu_id = cpu_id.ident;
 858	css->global_pgid.cpu_model = cpu_id.machine;
 859	css->global_pgid.tod_high = tod_high;
 860}
 861
 862static void channel_subsystem_release(struct device *dev)
 863{
 864	struct channel_subsystem *css = to_css(dev);
 865
 866	mutex_destroy(&css->mutex);
 867	kfree(css);
 868}
 869
 870static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
 871			       char *buf)
 872{
 873	struct channel_subsystem *css = to_css(dev);
 874
 875	if (css->cssid < 0)
 876		return -EINVAL;
 877
 878	return sprintf(buf, "%x\n", css->cssid);
 879}
 880static DEVICE_ATTR_RO(real_cssid);
 881
 
 
 
 
 
 
 
 
 
 
 
 
 882static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
 883			      char *buf)
 884{
 885	struct channel_subsystem *css = to_css(dev);
 886	int ret;
 887
 888	mutex_lock(&css->mutex);
 889	ret = sprintf(buf, "%x\n", css->cm_enabled);
 890	mutex_unlock(&css->mutex);
 891	return ret;
 892}
 893
 894static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
 895			       const char *buf, size_t count)
 896{
 897	struct channel_subsystem *css = to_css(dev);
 898	unsigned long val;
 899	int ret;
 900
 901	ret = kstrtoul(buf, 16, &val);
 902	if (ret)
 903		return ret;
 904	mutex_lock(&css->mutex);
 905	switch (val) {
 906	case 0:
 907		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
 908		break;
 909	case 1:
 910		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
 911		break;
 912	default:
 913		ret = -EINVAL;
 914	}
 915	mutex_unlock(&css->mutex);
 916	return ret < 0 ? ret : count;
 917}
 918static DEVICE_ATTR_RW(cm_enable);
 919
 920static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
 921			      int index)
 922{
 923	return css_chsc_characteristics.secm ? attr->mode : 0;
 924}
 925
 926static struct attribute *cssdev_attrs[] = {
 927	&dev_attr_real_cssid.attr,
 
 928	NULL,
 929};
 930
 931static struct attribute_group cssdev_attr_group = {
 932	.attrs = cssdev_attrs,
 933};
 934
 935static struct attribute *cssdev_cm_attrs[] = {
 936	&dev_attr_cm_enable.attr,
 937	NULL,
 938};
 939
 940static struct attribute_group cssdev_cm_attr_group = {
 941	.attrs = cssdev_cm_attrs,
 942	.is_visible = cm_enable_mode,
 943};
 944
 945static const struct attribute_group *cssdev_attr_groups[] = {
 946	&cssdev_attr_group,
 947	&cssdev_cm_attr_group,
 948	NULL,
 949};
 950
 951static int __init setup_css(int nr)
 952{
 953	struct channel_subsystem *css;
 954	int ret;
 955
 956	css = kzalloc(sizeof(*css), GFP_KERNEL);
 957	if (!css)
 958		return -ENOMEM;
 959
 960	channel_subsystems[nr] = css;
 961	dev_set_name(&css->device, "css%x", nr);
 962	css->device.groups = cssdev_attr_groups;
 963	css->device.release = channel_subsystem_release;
 964	/*
 965	 * We currently allocate notifier bits with this (using
 966	 * css->device as the device argument with the DMA API)
 967	 * and are fine with 64 bit addresses.
 968	 */
 969	css->device.coherent_dma_mask = DMA_BIT_MASK(64);
 970	css->device.dma_mask = &css->device.coherent_dma_mask;
 
 
 
 971
 972	mutex_init(&css->mutex);
 973	css->cssid = chsc_get_cssid(nr);
 
 
 
 
 
 974	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
 975
 976	ret = device_register(&css->device);
 977	if (ret) {
 978		put_device(&css->device);
 979		goto out_err;
 980	}
 981
 982	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
 983					 GFP_KERNEL);
 984	if (!css->pseudo_subchannel) {
 985		device_unregister(&css->device);
 986		ret = -ENOMEM;
 987		goto out_err;
 988	}
 989
 990	css->pseudo_subchannel->dev.parent = &css->device;
 991	css->pseudo_subchannel->dev.release = css_subchannel_release;
 992	mutex_init(&css->pseudo_subchannel->reg_mutex);
 993	ret = css_sch_create_locks(css->pseudo_subchannel);
 994	if (ret) {
 995		kfree(css->pseudo_subchannel);
 996		device_unregister(&css->device);
 997		goto out_err;
 998	}
 999
1000	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1001	ret = device_register(&css->pseudo_subchannel->dev);
1002	if (ret) {
1003		put_device(&css->pseudo_subchannel->dev);
1004		device_unregister(&css->device);
1005		goto out_err;
1006	}
1007
1008	return ret;
1009out_err:
1010	channel_subsystems[nr] = NULL;
1011	return ret;
1012}
1013
1014static int css_reboot_event(struct notifier_block *this,
1015			    unsigned long event,
1016			    void *ptr)
1017{
1018	struct channel_subsystem *css;
1019	int ret;
1020
1021	ret = NOTIFY_DONE;
1022	for_each_css(css) {
1023		mutex_lock(&css->mutex);
1024		if (css->cm_enabled)
1025			if (chsc_secm(css, 0))
1026				ret = NOTIFY_BAD;
1027		mutex_unlock(&css->mutex);
1028	}
1029
1030	return ret;
1031}
1032
1033static struct notifier_block css_reboot_notifier = {
1034	.notifier_call = css_reboot_event,
1035};
1036
1037/*
1038 * Since the css devices are neither on a bus nor have a class
1039 * nor have a special device type, we cannot stop/restart channel
1040 * path measurements via the normal suspend/resume callbacks, but have
1041 * to use notifiers.
1042 */
1043static int css_power_event(struct notifier_block *this, unsigned long event,
1044			   void *ptr)
1045{
1046	struct channel_subsystem *css;
1047	int ret;
1048
1049	switch (event) {
1050	case PM_HIBERNATION_PREPARE:
1051	case PM_SUSPEND_PREPARE:
1052		ret = NOTIFY_DONE;
1053		for_each_css(css) {
1054			mutex_lock(&css->mutex);
1055			if (!css->cm_enabled) {
1056				mutex_unlock(&css->mutex);
1057				continue;
1058			}
1059			ret = __chsc_do_secm(css, 0);
1060			ret = notifier_from_errno(ret);
1061			mutex_unlock(&css->mutex);
1062		}
1063		break;
1064	case PM_POST_HIBERNATION:
1065	case PM_POST_SUSPEND:
1066		ret = NOTIFY_DONE;
1067		for_each_css(css) {
1068			mutex_lock(&css->mutex);
1069			if (!css->cm_enabled) {
1070				mutex_unlock(&css->mutex);
1071				continue;
1072			}
1073			ret = __chsc_do_secm(css, 1);
1074			ret = notifier_from_errno(ret);
1075			mutex_unlock(&css->mutex);
1076		}
1077		/* search for subchannels, which appeared during hibernation */
1078		css_schedule_reprobe();
1079		break;
1080	default:
1081		ret = NOTIFY_DONE;
1082	}
1083	return ret;
1084
1085}
1086static struct notifier_block css_power_notifier = {
1087	.notifier_call = css_power_event,
1088};
1089
1090#define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1091static struct gen_pool *cio_dma_pool;
1092
1093/* Currently cio supports only a single css */
1094struct device *cio_get_dma_css_dev(void)
1095{
1096	return &channel_subsystems[0]->device;
1097}
1098
1099struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1100{
1101	struct gen_pool *gp_dma;
1102	void *cpu_addr;
1103	dma_addr_t dma_addr;
1104	int i;
1105
1106	gp_dma = gen_pool_create(3, -1);
1107	if (!gp_dma)
1108		return NULL;
1109	for (i = 0; i < nr_pages; ++i) {
1110		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1111					      CIO_DMA_GFP);
1112		if (!cpu_addr)
1113			return gp_dma;
1114		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1115				  dma_addr, PAGE_SIZE, -1);
1116	}
1117	return gp_dma;
1118}
1119
1120static void __gp_dma_free_dma(struct gen_pool *pool,
1121			      struct gen_pool_chunk *chunk, void *data)
1122{
1123	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1124
1125	dma_free_coherent((struct device *) data, chunk_size,
1126			 (void *) chunk->start_addr,
1127			 (dma_addr_t) chunk->phys_addr);
1128}
1129
1130void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1131{
1132	if (!gp_dma)
1133		return;
1134	/* this is quite ugly but no better idea */
1135	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1136	gen_pool_destroy(gp_dma);
1137}
1138
1139static int cio_dma_pool_init(void)
1140{
1141	/* No need to free up the resources: compiled in */
1142	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1143	if (!cio_dma_pool)
1144		return -ENOMEM;
1145	return 0;
1146}
1147
1148void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1149			size_t size)
1150{
1151	dma_addr_t dma_addr;
1152	unsigned long addr;
1153	size_t chunk_size;
1154
1155	if (!gp_dma)
1156		return NULL;
1157	addr = gen_pool_alloc(gp_dma, size);
1158	while (!addr) {
1159		chunk_size = round_up(size, PAGE_SIZE);
1160		addr = (unsigned long) dma_alloc_coherent(dma_dev,
1161					 chunk_size, &dma_addr, CIO_DMA_GFP);
1162		if (!addr)
1163			return NULL;
1164		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1165		addr = gen_pool_alloc(gp_dma, size);
1166	}
1167	return (void *) addr;
1168}
1169
1170void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1171{
1172	if (!cpu_addr)
1173		return;
1174	memset(cpu_addr, 0, size);
1175	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1176}
1177
1178/*
1179 * Allocate dma memory from the css global pool. Intended for memory not
1180 * specific to any single device within the css. The allocated memory
1181 * is not guaranteed to be 31-bit addressable.
1182 *
1183 * Caution: Not suitable for early stuff like console.
1184 */
1185void *cio_dma_zalloc(size_t size)
1186{
1187	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1188}
1189
1190void cio_dma_free(void *cpu_addr, size_t size)
1191{
1192	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1193}
1194
1195/*
1196 * Now that the driver core is running, we can setup our channel subsystem.
1197 * The struct subchannel's are created during probing.
1198 */
1199static int __init css_bus_init(void)
1200{
1201	int ret, i;
1202
1203	ret = chsc_init();
1204	if (ret)
1205		return ret;
1206
1207	chsc_determine_css_characteristics();
1208	/* Try to enable MSS. */
1209	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1210	if (ret)
1211		max_ssid = 0;
1212	else /* Success. */
1213		max_ssid = __MAX_SSID;
1214
1215	ret = slow_subchannel_init();
1216	if (ret)
1217		goto out;
1218
1219	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1220	if (ret)
1221		goto out;
1222
1223	if ((ret = bus_register(&css_bus_type)))
1224		goto out;
1225
1226	/* Setup css structure. */
1227	for (i = 0; i <= MAX_CSS_IDX; i++) {
1228		ret = setup_css(i);
1229		if (ret)
1230			goto out_unregister;
1231	}
1232	ret = register_reboot_notifier(&css_reboot_notifier);
1233	if (ret)
1234		goto out_unregister;
1235	ret = register_pm_notifier(&css_power_notifier);
1236	if (ret)
1237		goto out_unregister_rn;
1238	ret = cio_dma_pool_init();
1239	if (ret)
1240		goto out_unregister_pmn;
1241	airq_init();
1242	css_init_done = 1;
1243
1244	/* Enable default isc for I/O subchannels. */
1245	isc_register(IO_SCH_ISC);
1246
1247	return 0;
1248out_unregister_pmn:
1249	unregister_pm_notifier(&css_power_notifier);
1250out_unregister_rn:
1251	unregister_reboot_notifier(&css_reboot_notifier);
1252out_unregister:
1253	while (i-- > 0) {
1254		struct channel_subsystem *css = channel_subsystems[i];
1255		device_unregister(&css->pseudo_subchannel->dev);
1256		device_unregister(&css->device);
1257	}
1258	bus_unregister(&css_bus_type);
1259out:
1260	crw_unregister_handler(CRW_RSC_SCH);
1261	idset_free(slow_subchannel_set);
1262	chsc_init_cleanup();
1263	pr_alert("The CSS device driver initialization failed with "
1264		 "errno=%d\n", ret);
1265	return ret;
1266}
1267
1268static void __init css_bus_cleanup(void)
1269{
1270	struct channel_subsystem *css;
1271
1272	for_each_css(css) {
1273		device_unregister(&css->pseudo_subchannel->dev);
1274		device_unregister(&css->device);
1275	}
1276	bus_unregister(&css_bus_type);
1277	crw_unregister_handler(CRW_RSC_SCH);
1278	idset_free(slow_subchannel_set);
1279	chsc_init_cleanup();
1280	isc_unregister(IO_SCH_ISC);
1281}
1282
1283static int __init channel_subsystem_init(void)
1284{
1285	int ret;
1286
1287	ret = css_bus_init();
1288	if (ret)
1289		return ret;
1290	cio_work_q = create_singlethread_workqueue("cio");
1291	if (!cio_work_q) {
1292		ret = -ENOMEM;
1293		goto out_bus;
1294	}
1295	ret = io_subchannel_init();
1296	if (ret)
1297		goto out_wq;
1298
1299	/* Register subchannels which are already in use. */
1300	cio_register_early_subchannels();
1301	/* Start initial subchannel evaluation. */
1302	css_schedule_eval_all();
1303
1304	return ret;
1305out_wq:
1306	destroy_workqueue(cio_work_q);
1307out_bus:
1308	css_bus_cleanup();
1309	return ret;
1310}
1311subsys_initcall(channel_subsystem_init);
1312
1313static int css_settle(struct device_driver *drv, void *unused)
1314{
1315	struct css_driver *cssdrv = to_cssdriver(drv);
1316
1317	if (cssdrv->settle)
1318		return cssdrv->settle();
1319	return 0;
1320}
1321
1322int css_complete_work(void)
1323{
1324	int ret;
1325
1326	/* Wait for the evaluation of subchannels to finish. */
1327	ret = wait_event_interruptible(css_eval_wq,
1328				       atomic_read(&css_eval_scheduled) == 0);
1329	if (ret)
1330		return -EINTR;
1331	flush_workqueue(cio_work_q);
1332	/* Wait for the subchannel type specific initialization to finish */
1333	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1334}
1335
1336
1337/*
1338 * Wait for the initialization of devices to finish, to make sure we are
1339 * done with our setup if the search for the root device starts.
1340 */
1341static int __init channel_subsystem_init_sync(void)
1342{
1343	css_complete_work();
1344	return 0;
1345}
1346subsys_initcall_sync(channel_subsystem_init_sync);
1347
1348void channel_subsystem_reinit(void)
1349{
1350	struct channel_path *chp;
1351	struct chp_id chpid;
1352
1353	chsc_enable_facility(CHSC_SDA_OC_MSS);
1354	chp_id_for_each(&chpid) {
1355		chp = chpid_to_chp(chpid);
1356		if (chp)
1357			chp_update_desc(chp);
1358	}
1359	cmf_reactivate();
1360}
1361
1362#ifdef CONFIG_PROC_FS
1363static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1364				size_t count, loff_t *ppos)
1365{
1366	int ret;
1367
1368	/* Handle pending CRW's. */
1369	crw_wait_for_channel_report();
1370	ret = css_complete_work();
1371
1372	return ret ? ret : count;
1373}
1374
1375static const struct file_operations cio_settle_proc_fops = {
1376	.open = nonseekable_open,
1377	.write = cio_settle_write,
1378	.llseek = no_llseek,
1379};
1380
1381static int __init cio_settle_init(void)
1382{
1383	struct proc_dir_entry *entry;
1384
1385	entry = proc_create("cio_settle", S_IWUSR, NULL,
1386			    &cio_settle_proc_fops);
1387	if (!entry)
1388		return -ENOMEM;
1389	return 0;
1390}
1391device_initcall(cio_settle_init);
1392#endif /*CONFIG_PROC_FS*/
1393
1394int sch_is_pseudo_sch(struct subchannel *sch)
1395{
1396	if (!sch->dev.parent)
1397		return 0;
1398	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1399}
1400
1401static int css_bus_match(struct device *dev, struct device_driver *drv)
1402{
1403	struct subchannel *sch = to_subchannel(dev);
1404	struct css_driver *driver = to_cssdriver(drv);
1405	struct css_device_id *id;
1406
1407	/* When driver_override is set, only bind to the matching driver */
1408	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1409		return 0;
1410
1411	for (id = driver->subchannel_type; id->match_flags; id++) {
1412		if (sch->st == id->type)
1413			return 1;
1414	}
1415
1416	return 0;
1417}
1418
1419static int css_probe(struct device *dev)
1420{
1421	struct subchannel *sch;
1422	int ret;
1423
1424	sch = to_subchannel(dev);
1425	sch->driver = to_cssdriver(dev->driver);
1426	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1427	if (ret)
1428		sch->driver = NULL;
1429	return ret;
1430}
1431
1432static int css_remove(struct device *dev)
1433{
1434	struct subchannel *sch;
1435	int ret;
1436
1437	sch = to_subchannel(dev);
1438	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
 
1439	sch->driver = NULL;
1440	return ret;
1441}
1442
1443static void css_shutdown(struct device *dev)
1444{
1445	struct subchannel *sch;
1446
1447	sch = to_subchannel(dev);
1448	if (sch->driver && sch->driver->shutdown)
1449		sch->driver->shutdown(sch);
1450}
1451
1452static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1453{
1454	struct subchannel *sch = to_subchannel(dev);
1455	int ret;
1456
1457	ret = add_uevent_var(env, "ST=%01X", sch->st);
1458	if (ret)
1459		return ret;
1460	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1461	return ret;
1462}
1463
1464static int css_pm_prepare(struct device *dev)
1465{
1466	struct subchannel *sch = to_subchannel(dev);
1467	struct css_driver *drv;
1468
1469	if (mutex_is_locked(&sch->reg_mutex))
1470		return -EAGAIN;
1471	if (!sch->dev.driver)
1472		return 0;
1473	drv = to_cssdriver(sch->dev.driver);
1474	/* Notify drivers that they may not register children. */
1475	return drv->prepare ? drv->prepare(sch) : 0;
1476}
1477
1478static void css_pm_complete(struct device *dev)
1479{
1480	struct subchannel *sch = to_subchannel(dev);
1481	struct css_driver *drv;
1482
1483	if (!sch->dev.driver)
1484		return;
1485	drv = to_cssdriver(sch->dev.driver);
1486	if (drv->complete)
1487		drv->complete(sch);
1488}
1489
1490static int css_pm_freeze(struct device *dev)
1491{
1492	struct subchannel *sch = to_subchannel(dev);
1493	struct css_driver *drv;
1494
1495	if (!sch->dev.driver)
1496		return 0;
1497	drv = to_cssdriver(sch->dev.driver);
1498	return drv->freeze ? drv->freeze(sch) : 0;
1499}
1500
1501static int css_pm_thaw(struct device *dev)
1502{
1503	struct subchannel *sch = to_subchannel(dev);
1504	struct css_driver *drv;
1505
1506	if (!sch->dev.driver)
1507		return 0;
1508	drv = to_cssdriver(sch->dev.driver);
1509	return drv->thaw ? drv->thaw(sch) : 0;
1510}
1511
1512static int css_pm_restore(struct device *dev)
1513{
1514	struct subchannel *sch = to_subchannel(dev);
1515	struct css_driver *drv;
1516
1517	css_update_ssd_info(sch);
1518	if (!sch->dev.driver)
1519		return 0;
1520	drv = to_cssdriver(sch->dev.driver);
1521	return drv->restore ? drv->restore(sch) : 0;
1522}
1523
1524static const struct dev_pm_ops css_pm_ops = {
1525	.prepare = css_pm_prepare,
1526	.complete = css_pm_complete,
1527	.freeze = css_pm_freeze,
1528	.thaw = css_pm_thaw,
1529	.restore = css_pm_restore,
1530};
1531
1532static struct bus_type css_bus_type = {
1533	.name     = "css",
1534	.match    = css_bus_match,
1535	.probe    = css_probe,
1536	.remove   = css_remove,
1537	.shutdown = css_shutdown,
1538	.uevent   = css_uevent,
1539	.pm = &css_pm_ops,
1540};
1541
1542/**
1543 * css_driver_register - register a css driver
1544 * @cdrv: css driver to register
1545 *
1546 * This is mainly a wrapper around driver_register that sets name
1547 * and bus_type in the embedded struct device_driver correctly.
1548 */
1549int css_driver_register(struct css_driver *cdrv)
1550{
1551	cdrv->drv.bus = &css_bus_type;
1552	return driver_register(&cdrv->drv);
1553}
1554EXPORT_SYMBOL_GPL(css_driver_register);
1555
1556/**
1557 * css_driver_unregister - unregister a css driver
1558 * @cdrv: css driver to unregister
1559 *
1560 * This is a wrapper around driver_unregister.
1561 */
1562void css_driver_unregister(struct css_driver *cdrv)
1563{
1564	driver_unregister(&cdrv->drv);
1565}
1566EXPORT_SYMBOL_GPL(css_driver_unregister);