Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * driver for channel subsystem
   4 *
   5 * Copyright IBM Corp. 2002, 2010
   6 *
   7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
   8 *	      Cornelia Huck (cornelia.huck@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/device.h>
  17#include <linux/slab.h>
  18#include <linux/errno.h>
  19#include <linux/list.h>
  20#include <linux/reboot.h>
  21#include <linux/suspend.h>
  22#include <linux/proc_fs.h>
  23#include <linux/genalloc.h>
  24#include <linux/dma-mapping.h>
  25#include <asm/isc.h>
  26#include <asm/crw.h>
  27
  28#include "css.h"
  29#include "cio.h"
  30#include "blacklist.h"
  31#include "cio_debug.h"
  32#include "ioasm.h"
  33#include "chsc.h"
  34#include "device.h"
  35#include "idset.h"
  36#include "chp.h"
  37
  38int css_init_done = 0;
  39int max_ssid;
  40
  41#define MAX_CSS_IDX 0
  42struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
  43static struct bus_type css_bus_type;
  44
  45int
  46for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  47{
  48	struct subchannel_id schid;
  49	int ret;
  50
  51	init_subchannel_id(&schid);
  52	do {
  53		do {
  54			ret = fn(schid, data);
  55			if (ret)
  56				break;
  57		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
  58		schid.sch_no = 0;
  59	} while (schid.ssid++ < max_ssid);
  60	return ret;
  61}
  62
  63struct cb_data {
  64	void *data;
  65	struct idset *set;
  66	int (*fn_known_sch)(struct subchannel *, void *);
  67	int (*fn_unknown_sch)(struct subchannel_id, void *);
  68};
  69
  70static int call_fn_known_sch(struct device *dev, void *data)
  71{
  72	struct subchannel *sch = to_subchannel(dev);
  73	struct cb_data *cb = data;
  74	int rc = 0;
  75
  76	if (cb->set)
  77		idset_sch_del(cb->set, sch->schid);
  78	if (cb->fn_known_sch)
  79		rc = cb->fn_known_sch(sch, cb->data);
  80	return rc;
  81}
  82
  83static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  84{
  85	struct cb_data *cb = data;
  86	int rc = 0;
  87
  88	if (idset_sch_contains(cb->set, schid))
  89		rc = cb->fn_unknown_sch(schid, cb->data);
  90	return rc;
  91}
  92
  93static int call_fn_all_sch(struct subchannel_id schid, void *data)
  94{
  95	struct cb_data *cb = data;
  96	struct subchannel *sch;
  97	int rc = 0;
  98
  99	sch = get_subchannel_by_schid(schid);
 100	if (sch) {
 101		if (cb->fn_known_sch)
 102			rc = cb->fn_known_sch(sch, cb->data);
 103		put_device(&sch->dev);
 104	} else {
 105		if (cb->fn_unknown_sch)
 106			rc = cb->fn_unknown_sch(schid, cb->data);
 107	}
 108
 109	return rc;
 110}
 111
 112int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
 113			       int (*fn_unknown)(struct subchannel_id,
 114			       void *), void *data)
 115{
 116	struct cb_data cb;
 117	int rc;
 118
 119	cb.data = data;
 120	cb.fn_known_sch = fn_known;
 121	cb.fn_unknown_sch = fn_unknown;
 122
 123	if (fn_known && !fn_unknown) {
 124		/* Skip idset allocation in case of known-only loop. */
 125		cb.set = NULL;
 126		return bus_for_each_dev(&css_bus_type, NULL, &cb,
 127					call_fn_known_sch);
 128	}
 129
 130	cb.set = idset_sch_new();
 131	if (!cb.set)
 132		/* fall back to brute force scanning in case of oom */
 133		return for_each_subchannel(call_fn_all_sch, &cb);
 134
 135	idset_fill(cb.set);
 136
 137	/* Process registered subchannels. */
 138	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
 139	if (rc)
 140		goto out;
 141	/* Process unregistered subchannels. */
 142	if (fn_unknown)
 143		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
 144out:
 145	idset_free(cb.set);
 146
 147	return rc;
 148}
 149
 150static void css_sch_todo(struct work_struct *work);
 151
 152static int css_sch_create_locks(struct subchannel *sch)
 153{
 154	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
 155	if (!sch->lock)
 156		return -ENOMEM;
 157
 158	spin_lock_init(sch->lock);
 159	mutex_init(&sch->reg_mutex);
 160
 161	return 0;
 162}
 163
 164static void css_subchannel_release(struct device *dev)
 165{
 166	struct subchannel *sch = to_subchannel(dev);
 167
 168	sch->config.intparm = 0;
 169	cio_commit_config(sch);
 170	kfree(sch->driver_override);
 171	kfree(sch->lock);
 172	kfree(sch);
 173}
 174
 175static int css_validate_subchannel(struct subchannel_id schid,
 176				   struct schib *schib)
 177{
 178	int err;
 179
 180	switch (schib->pmcw.st) {
 181	case SUBCHANNEL_TYPE_IO:
 182	case SUBCHANNEL_TYPE_MSG:
 183		if (!css_sch_is_valid(schib))
 184			err = -ENODEV;
 185		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
 186			CIO_MSG_EVENT(6, "Blacklisted device detected "
 187				      "at devno %04X, subchannel set %x\n",
 188				      schib->pmcw.dev, schid.ssid);
 189			err = -ENODEV;
 190		} else
 191			err = 0;
 192		break;
 193	default:
 194		err = 0;
 195	}
 196	if (err)
 197		goto out;
 198
 199	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
 200		      schid.ssid, schid.sch_no, schib->pmcw.st);
 201out:
 202	return err;
 203}
 204
 205struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
 206					struct schib *schib)
 207{
 208	struct subchannel *sch;
 209	int ret;
 210
 211	ret = css_validate_subchannel(schid, schib);
 212	if (ret < 0)
 213		return ERR_PTR(ret);
 214
 215	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
 216	if (!sch)
 217		return ERR_PTR(-ENOMEM);
 218
 219	sch->schid = schid;
 220	sch->schib = *schib;
 221	sch->st = schib->pmcw.st;
 222
 223	ret = css_sch_create_locks(sch);
 224	if (ret)
 225		goto err;
 226
 227	INIT_WORK(&sch->todo_work, css_sch_todo);
 228	sch->dev.release = &css_subchannel_release;
 
 229	device_initialize(&sch->dev);
 230	/*
 231	 * The physical addresses of some the dma structures that can
 232	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
 233	 */
 234	sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
 
 
 235	/*
 236	 * But we don't have such restrictions imposed on the stuff that
 237	 * is handled by the streaming API.
 238	 */
 239	sch->dma_mask = DMA_BIT_MASK(64);
 240	sch->dev.dma_mask = &sch->dma_mask;
 
 
 241	return sch;
 242
 243err:
 244	kfree(sch);
 245	return ERR_PTR(ret);
 246}
 247
 248static int css_sch_device_register(struct subchannel *sch)
 249{
 250	int ret;
 251
 252	mutex_lock(&sch->reg_mutex);
 253	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
 254		     sch->schid.sch_no);
 255	ret = device_add(&sch->dev);
 256	mutex_unlock(&sch->reg_mutex);
 257	return ret;
 258}
 259
 260/**
 261 * css_sch_device_unregister - unregister a subchannel
 262 * @sch: subchannel to be unregistered
 263 */
 264void css_sch_device_unregister(struct subchannel *sch)
 265{
 266	mutex_lock(&sch->reg_mutex);
 267	if (device_is_registered(&sch->dev))
 268		device_unregister(&sch->dev);
 269	mutex_unlock(&sch->reg_mutex);
 270}
 271EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 272
 273static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 274{
 275	int i;
 276	int mask;
 277
 278	memset(ssd, 0, sizeof(struct chsc_ssd_info));
 279	ssd->path_mask = pmcw->pim;
 280	for (i = 0; i < 8; i++) {
 281		mask = 0x80 >> i;
 282		if (pmcw->pim & mask) {
 283			chp_id_init(&ssd->chpid[i]);
 284			ssd->chpid[i].id = pmcw->chpid[i];
 285		}
 286	}
 287}
 288
 289static void ssd_register_chpids(struct chsc_ssd_info *ssd)
 290{
 291	int i;
 292	int mask;
 293
 294	for (i = 0; i < 8; i++) {
 295		mask = 0x80 >> i;
 296		if (ssd->path_mask & mask)
 297			chp_new(ssd->chpid[i]);
 298	}
 299}
 300
 301void css_update_ssd_info(struct subchannel *sch)
 302{
 303	int ret;
 304
 305	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
 306	if (ret)
 307		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
 308
 309	ssd_register_chpids(&sch->ssd_info);
 310}
 311
 312static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 313			 char *buf)
 314{
 315	struct subchannel *sch = to_subchannel(dev);
 316
 317	return sprintf(buf, "%01x\n", sch->st);
 318}
 319
 320static DEVICE_ATTR_RO(type);
 321
 322static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 323			     char *buf)
 324{
 325	struct subchannel *sch = to_subchannel(dev);
 326
 327	return sprintf(buf, "css:t%01X\n", sch->st);
 328}
 329
 330static DEVICE_ATTR_RO(modalias);
 331
 332static ssize_t driver_override_store(struct device *dev,
 333				     struct device_attribute *attr,
 334				     const char *buf, size_t count)
 335{
 336	struct subchannel *sch = to_subchannel(dev);
 337	char *driver_override, *old, *cp;
 338
 339	/* We need to keep extra room for a newline */
 340	if (count >= (PAGE_SIZE - 1))
 341		return -EINVAL;
 342
 343	driver_override = kstrndup(buf, count, GFP_KERNEL);
 344	if (!driver_override)
 345		return -ENOMEM;
 346
 347	cp = strchr(driver_override, '\n');
 348	if (cp)
 349		*cp = '\0';
 350
 351	device_lock(dev);
 352	old = sch->driver_override;
 353	if (strlen(driver_override)) {
 354		sch->driver_override = driver_override;
 355	} else {
 356		kfree(driver_override);
 357		sch->driver_override = NULL;
 358	}
 359	device_unlock(dev);
 360
 361	kfree(old);
 
 
 362
 363	return count;
 364}
 365
 366static ssize_t driver_override_show(struct device *dev,
 367				    struct device_attribute *attr, char *buf)
 368{
 369	struct subchannel *sch = to_subchannel(dev);
 370	ssize_t len;
 371
 372	device_lock(dev);
 373	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
 374	device_unlock(dev);
 375	return len;
 376}
 377static DEVICE_ATTR_RW(driver_override);
 378
 379static struct attribute *subch_attrs[] = {
 380	&dev_attr_type.attr,
 381	&dev_attr_modalias.attr,
 382	&dev_attr_driver_override.attr,
 383	NULL,
 384};
 385
 386static struct attribute_group subch_attr_group = {
 387	.attrs = subch_attrs,
 388};
 389
 390static const struct attribute_group *default_subch_attr_groups[] = {
 391	&subch_attr_group,
 392	NULL,
 393};
 394
 395static ssize_t chpids_show(struct device *dev,
 396			   struct device_attribute *attr,
 397			   char *buf)
 398{
 399	struct subchannel *sch = to_subchannel(dev);
 400	struct chsc_ssd_info *ssd = &sch->ssd_info;
 401	ssize_t ret = 0;
 402	int mask;
 403	int chp;
 404
 405	for (chp = 0; chp < 8; chp++) {
 406		mask = 0x80 >> chp;
 407		if (ssd->path_mask & mask)
 408			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
 409		else
 410			ret += sprintf(buf + ret, "00 ");
 411	}
 412	ret += sprintf(buf + ret, "\n");
 413	return ret;
 414}
 415static DEVICE_ATTR_RO(chpids);
 416
 417static ssize_t pimpampom_show(struct device *dev,
 418			      struct device_attribute *attr,
 419			      char *buf)
 420{
 421	struct subchannel *sch = to_subchannel(dev);
 422	struct pmcw *pmcw = &sch->schib.pmcw;
 423
 424	return sprintf(buf, "%02x %02x %02x\n",
 425		       pmcw->pim, pmcw->pam, pmcw->pom);
 426}
 427static DEVICE_ATTR_RO(pimpampom);
 428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429static struct attribute *io_subchannel_type_attrs[] = {
 430	&dev_attr_chpids.attr,
 431	&dev_attr_pimpampom.attr,
 
 432	NULL,
 433};
 434ATTRIBUTE_GROUPS(io_subchannel_type);
 435
 436static const struct device_type io_subchannel_type = {
 437	.groups = io_subchannel_type_groups,
 438};
 439
 440int css_register_subchannel(struct subchannel *sch)
 441{
 442	int ret;
 443
 444	/* Initialize the subchannel structure */
 445	sch->dev.parent = &channel_subsystems[0]->device;
 446	sch->dev.bus = &css_bus_type;
 447	sch->dev.groups = default_subch_attr_groups;
 448
 449	if (sch->st == SUBCHANNEL_TYPE_IO)
 450		sch->dev.type = &io_subchannel_type;
 451
 452	/*
 453	 * We don't want to generate uevents for I/O subchannels that don't
 454	 * have a working ccw device behind them since they will be
 455	 * unregistered before they can be used anyway, so we delay the add
 456	 * uevent until after device recognition was successful.
 457	 * Note that we suppress the uevent for all subchannel types;
 458	 * the subchannel driver can decide itself when it wants to inform
 459	 * userspace of its existence.
 460	 */
 461	dev_set_uevent_suppress(&sch->dev, 1);
 462	css_update_ssd_info(sch);
 463	/* make it known to the system */
 464	ret = css_sch_device_register(sch);
 465	if (ret) {
 466		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
 467			      sch->schid.ssid, sch->schid.sch_no, ret);
 468		return ret;
 469	}
 470	if (!sch->driver) {
 471		/*
 472		 * No driver matched. Generate the uevent now so that
 473		 * a fitting driver module may be loaded based on the
 474		 * modalias.
 475		 */
 476		dev_set_uevent_suppress(&sch->dev, 0);
 477		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 478	}
 479	return ret;
 480}
 481
 482static int css_probe_device(struct subchannel_id schid, struct schib *schib)
 483{
 484	struct subchannel *sch;
 485	int ret;
 486
 487	sch = css_alloc_subchannel(schid, schib);
 488	if (IS_ERR(sch))
 489		return PTR_ERR(sch);
 490
 491	ret = css_register_subchannel(sch);
 492	if (ret)
 493		put_device(&sch->dev);
 494
 495	return ret;
 496}
 497
 498static int
 499check_subchannel(struct device *dev, const void *data)
 500{
 501	struct subchannel *sch;
 502	struct subchannel_id *schid = (void *)data;
 503
 504	sch = to_subchannel(dev);
 505	return schid_equal(&sch->schid, schid);
 506}
 507
 508struct subchannel *
 509get_subchannel_by_schid(struct subchannel_id schid)
 510{
 511	struct device *dev;
 512
 513	dev = bus_find_device(&css_bus_type, NULL,
 514			      &schid, check_subchannel);
 515
 516	return dev ? to_subchannel(dev) : NULL;
 517}
 518
 519/**
 520 * css_sch_is_valid() - check if a subchannel is valid
 521 * @schib: subchannel information block for the subchannel
 522 */
 523int css_sch_is_valid(struct schib *schib)
 524{
 525	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
 526		return 0;
 527	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
 528		return 0;
 529	return 1;
 530}
 531EXPORT_SYMBOL_GPL(css_sch_is_valid);
 532
 533static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
 534{
 535	struct schib schib;
 536	int ccode;
 537
 538	if (!slow) {
 539		/* Will be done on the slow path. */
 540		return -EAGAIN;
 541	}
 542	/*
 543	 * The first subchannel that is not-operational (ccode==3)
 544	 * indicates that there aren't any more devices available.
 545	 * If stsch gets an exception, it means the current subchannel set
 546	 * is not valid.
 547	 */
 548	ccode = stsch(schid, &schib);
 549	if (ccode)
 550		return (ccode == 3) ? -ENXIO : ccode;
 551
 552	return css_probe_device(schid, &schib);
 553}
 554
 555static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
 556{
 557	int ret = 0;
 558
 559	if (sch->driver) {
 560		if (sch->driver->sch_event)
 561			ret = sch->driver->sch_event(sch, slow);
 562		else
 563			dev_dbg(&sch->dev,
 564				"Got subchannel machine check but "
 565				"no sch_event handler provided.\n");
 566	}
 567	if (ret != 0 && ret != -EAGAIN) {
 568		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
 569			      sch->schid.ssid, sch->schid.sch_no, ret);
 570	}
 571	return ret;
 572}
 573
 574static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
 575{
 576	struct subchannel *sch;
 577	int ret;
 578
 579	sch = get_subchannel_by_schid(schid);
 580	if (sch) {
 581		ret = css_evaluate_known_subchannel(sch, slow);
 582		put_device(&sch->dev);
 583	} else
 584		ret = css_evaluate_new_subchannel(schid, slow);
 585	if (ret == -EAGAIN)
 586		css_schedule_eval(schid);
 587}
 588
 589/**
 590 * css_sched_sch_todo - schedule a subchannel operation
 591 * @sch: subchannel
 592 * @todo: todo
 593 *
 594 * Schedule the operation identified by @todo to be performed on the slow path
 595 * workqueue. Do nothing if another operation with higher priority is already
 596 * scheduled. Needs to be called with subchannel lock held.
 597 */
 598void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
 599{
 600	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
 601		      sch->schid.ssid, sch->schid.sch_no, todo);
 602	if (sch->todo >= todo)
 603		return;
 604	/* Get workqueue ref. */
 605	if (!get_device(&sch->dev))
 606		return;
 607	sch->todo = todo;
 608	if (!queue_work(cio_work_q, &sch->todo_work)) {
 609		/* Already queued, release workqueue ref. */
 610		put_device(&sch->dev);
 611	}
 612}
 613EXPORT_SYMBOL_GPL(css_sched_sch_todo);
 614
 615static void css_sch_todo(struct work_struct *work)
 616{
 617	struct subchannel *sch;
 618	enum sch_todo todo;
 619	int ret;
 620
 621	sch = container_of(work, struct subchannel, todo_work);
 622	/* Find out todo. */
 623	spin_lock_irq(sch->lock);
 624	todo = sch->todo;
 625	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
 626		      sch->schid.sch_no, todo);
 627	sch->todo = SCH_TODO_NOTHING;
 628	spin_unlock_irq(sch->lock);
 629	/* Perform todo. */
 630	switch (todo) {
 631	case SCH_TODO_NOTHING:
 632		break;
 633	case SCH_TODO_EVAL:
 634		ret = css_evaluate_known_subchannel(sch, 1);
 635		if (ret == -EAGAIN) {
 636			spin_lock_irq(sch->lock);
 637			css_sched_sch_todo(sch, todo);
 638			spin_unlock_irq(sch->lock);
 639		}
 640		break;
 641	case SCH_TODO_UNREG:
 642		css_sch_device_unregister(sch);
 643		break;
 644	}
 645	/* Release workqueue ref. */
 646	put_device(&sch->dev);
 647}
 648
 649static struct idset *slow_subchannel_set;
 650static spinlock_t slow_subchannel_lock;
 651static wait_queue_head_t css_eval_wq;
 652static atomic_t css_eval_scheduled;
 653
 654static int __init slow_subchannel_init(void)
 655{
 656	spin_lock_init(&slow_subchannel_lock);
 657	atomic_set(&css_eval_scheduled, 0);
 658	init_waitqueue_head(&css_eval_wq);
 659	slow_subchannel_set = idset_sch_new();
 660	if (!slow_subchannel_set) {
 661		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
 662		return -ENOMEM;
 663	}
 664	return 0;
 665}
 666
 667static int slow_eval_known_fn(struct subchannel *sch, void *data)
 668{
 669	int eval;
 670	int rc;
 671
 672	spin_lock_irq(&slow_subchannel_lock);
 673	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
 674	idset_sch_del(slow_subchannel_set, sch->schid);
 675	spin_unlock_irq(&slow_subchannel_lock);
 676	if (eval) {
 677		rc = css_evaluate_known_subchannel(sch, 1);
 678		if (rc == -EAGAIN)
 679			css_schedule_eval(sch->schid);
 680		/*
 681		 * The loop might take long time for platforms with lots of
 682		 * known devices. Allow scheduling here.
 683		 */
 684		cond_resched();
 685	}
 686	return 0;
 687}
 688
 689static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
 690{
 691	int eval;
 692	int rc = 0;
 693
 694	spin_lock_irq(&slow_subchannel_lock);
 695	eval = idset_sch_contains(slow_subchannel_set, schid);
 696	idset_sch_del(slow_subchannel_set, schid);
 697	spin_unlock_irq(&slow_subchannel_lock);
 698	if (eval) {
 699		rc = css_evaluate_new_subchannel(schid, 1);
 700		switch (rc) {
 701		case -EAGAIN:
 702			css_schedule_eval(schid);
 703			rc = 0;
 704			break;
 705		case -ENXIO:
 706		case -ENOMEM:
 707		case -EIO:
 708			/* These should abort looping */
 709			spin_lock_irq(&slow_subchannel_lock);
 710			idset_sch_del_subseq(slow_subchannel_set, schid);
 711			spin_unlock_irq(&slow_subchannel_lock);
 712			break;
 713		default:
 714			rc = 0;
 715		}
 716		/* Allow scheduling here since the containing loop might
 717		 * take a while.  */
 718		cond_resched();
 719	}
 720	return rc;
 721}
 722
 723static void css_slow_path_func(struct work_struct *unused)
 724{
 725	unsigned long flags;
 726
 727	CIO_TRACE_EVENT(4, "slowpath");
 728	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
 729				   NULL);
 730	spin_lock_irqsave(&slow_subchannel_lock, flags);
 731	if (idset_is_empty(slow_subchannel_set)) {
 732		atomic_set(&css_eval_scheduled, 0);
 733		wake_up(&css_eval_wq);
 734	}
 735	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 736}
 737
 738static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
 739struct workqueue_struct *cio_work_q;
 740
 741void css_schedule_eval(struct subchannel_id schid)
 742{
 743	unsigned long flags;
 744
 745	spin_lock_irqsave(&slow_subchannel_lock, flags);
 746	idset_sch_add(slow_subchannel_set, schid);
 747	atomic_set(&css_eval_scheduled, 1);
 748	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 749	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 750}
 751
 752void css_schedule_eval_all(void)
 753{
 754	unsigned long flags;
 755
 756	spin_lock_irqsave(&slow_subchannel_lock, flags);
 757	idset_fill(slow_subchannel_set);
 758	atomic_set(&css_eval_scheduled, 1);
 759	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 760	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 761}
 762
 763static int __unset_registered(struct device *dev, void *data)
 764{
 765	struct idset *set = data;
 766	struct subchannel *sch = to_subchannel(dev);
 
 
 
 
 
 
 
 
 
 
 767
 768	idset_sch_del(set, sch->schid);
 769	return 0;
 770}
 771
 772void css_schedule_eval_all_unreg(unsigned long delay)
 
 
 
 
 
 
 
 
 
 
 
 773{
 774	unsigned long flags;
 775	struct idset *unreg_set;
 776
 777	/* Find unregistered subchannels. */
 778	unreg_set = idset_sch_new();
 779	if (!unreg_set) {
 780		/* Fallback. */
 781		css_schedule_eval_all();
 782		return;
 783	}
 784	idset_fill(unreg_set);
 785	bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
 
 
 
 
 
 
 
 
 
 
 786	/* Apply to slow_subchannel_set. */
 787	spin_lock_irqsave(&slow_subchannel_lock, flags);
 788	idset_add_set(slow_subchannel_set, unreg_set);
 789	atomic_set(&css_eval_scheduled, 1);
 790	queue_delayed_work(cio_work_q, &slow_path_work, delay);
 791	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 792	idset_free(unreg_set);
 793}
 794
 795void css_wait_for_slow_path(void)
 796{
 797	flush_workqueue(cio_work_q);
 798}
 799
 800/* Schedule reprobing of all unregistered subchannels. */
 801void css_schedule_reprobe(void)
 802{
 803	/* Schedule with a delay to allow merging of subsequent calls. */
 804	css_schedule_eval_all_unreg(1 * HZ);
 805}
 806EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 807
 808/*
 809 * Called from the machine check handler for subchannel report words.
 810 */
 811static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 812{
 813	struct subchannel_id mchk_schid;
 814	struct subchannel *sch;
 815
 816	if (overflow) {
 817		css_schedule_eval_all();
 818		return;
 819	}
 820	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
 821		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 822		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
 823		      crw0->erc, crw0->rsid);
 824	if (crw1)
 825		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
 826			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 827			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
 828			      crw1->anc, crw1->erc, crw1->rsid);
 829	init_subchannel_id(&mchk_schid);
 830	mchk_schid.sch_no = crw0->rsid;
 831	if (crw1)
 832		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
 833
 834	if (crw0->erc == CRW_ERC_PMOD) {
 835		sch = get_subchannel_by_schid(mchk_schid);
 836		if (sch) {
 837			css_update_ssd_info(sch);
 838			put_device(&sch->dev);
 839		}
 840	}
 841	/*
 842	 * Since we are always presented with IPI in the CRW, we have to
 843	 * use stsch() to find out if the subchannel in question has come
 844	 * or gone.
 845	 */
 846	css_evaluate_subchannel(mchk_schid, 0);
 847}
 848
 849static void __init
 850css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
 851{
 852	struct cpuid cpu_id;
 853
 854	if (css_general_characteristics.mcss) {
 855		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
 856		css->global_pgid.pgid_high.ext_cssid.cssid =
 857			(css->cssid < 0) ? 0 : css->cssid;
 858	} else {
 859		css->global_pgid.pgid_high.cpu_addr = stap();
 860	}
 861	get_cpu_id(&cpu_id);
 862	css->global_pgid.cpu_id = cpu_id.ident;
 863	css->global_pgid.cpu_model = cpu_id.machine;
 864	css->global_pgid.tod_high = tod_high;
 865}
 866
 867static void channel_subsystem_release(struct device *dev)
 868{
 869	struct channel_subsystem *css = to_css(dev);
 870
 871	mutex_destroy(&css->mutex);
 872	kfree(css);
 873}
 874
 875static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
 876			       char *buf)
 877{
 878	struct channel_subsystem *css = to_css(dev);
 879
 880	if (css->cssid < 0)
 881		return -EINVAL;
 882
 883	return sprintf(buf, "%x\n", css->cssid);
 884}
 885static DEVICE_ATTR_RO(real_cssid);
 886
 
 
 
 
 
 
 
 
 
 
 
 
 887static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
 888			      char *buf)
 889{
 890	struct channel_subsystem *css = to_css(dev);
 891	int ret;
 892
 893	mutex_lock(&css->mutex);
 894	ret = sprintf(buf, "%x\n", css->cm_enabled);
 895	mutex_unlock(&css->mutex);
 896	return ret;
 897}
 898
 899static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
 900			       const char *buf, size_t count)
 901{
 902	struct channel_subsystem *css = to_css(dev);
 903	unsigned long val;
 904	int ret;
 905
 906	ret = kstrtoul(buf, 16, &val);
 907	if (ret)
 908		return ret;
 909	mutex_lock(&css->mutex);
 910	switch (val) {
 911	case 0:
 912		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
 913		break;
 914	case 1:
 915		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
 916		break;
 917	default:
 918		ret = -EINVAL;
 919	}
 920	mutex_unlock(&css->mutex);
 921	return ret < 0 ? ret : count;
 922}
 923static DEVICE_ATTR_RW(cm_enable);
 924
 925static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
 926			      int index)
 927{
 928	return css_chsc_characteristics.secm ? attr->mode : 0;
 929}
 930
 931static struct attribute *cssdev_attrs[] = {
 932	&dev_attr_real_cssid.attr,
 
 933	NULL,
 934};
 935
 936static struct attribute_group cssdev_attr_group = {
 937	.attrs = cssdev_attrs,
 938};
 939
 940static struct attribute *cssdev_cm_attrs[] = {
 941	&dev_attr_cm_enable.attr,
 942	NULL,
 943};
 944
 945static struct attribute_group cssdev_cm_attr_group = {
 946	.attrs = cssdev_cm_attrs,
 947	.is_visible = cm_enable_mode,
 948};
 949
 950static const struct attribute_group *cssdev_attr_groups[] = {
 951	&cssdev_attr_group,
 952	&cssdev_cm_attr_group,
 953	NULL,
 954};
 955
 956static int __init setup_css(int nr)
 957{
 958	struct channel_subsystem *css;
 959	int ret;
 960
 961	css = kzalloc(sizeof(*css), GFP_KERNEL);
 962	if (!css)
 963		return -ENOMEM;
 964
 965	channel_subsystems[nr] = css;
 966	dev_set_name(&css->device, "css%x", nr);
 967	css->device.groups = cssdev_attr_groups;
 968	css->device.release = channel_subsystem_release;
 969	/*
 970	 * We currently allocate notifier bits with this (using
 971	 * css->device as the device argument with the DMA API)
 972	 * and are fine with 64 bit addresses.
 973	 */
 974	css->device.coherent_dma_mask = DMA_BIT_MASK(64);
 975	css->device.dma_mask = &css->device.coherent_dma_mask;
 
 
 
 976
 977	mutex_init(&css->mutex);
 978	css->cssid = chsc_get_cssid(nr);
 
 
 
 
 
 979	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
 980
 981	ret = device_register(&css->device);
 982	if (ret) {
 983		put_device(&css->device);
 984		goto out_err;
 985	}
 986
 987	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
 988					 GFP_KERNEL);
 989	if (!css->pseudo_subchannel) {
 990		device_unregister(&css->device);
 991		ret = -ENOMEM;
 992		goto out_err;
 993	}
 994
 995	css->pseudo_subchannel->dev.parent = &css->device;
 996	css->pseudo_subchannel->dev.release = css_subchannel_release;
 997	mutex_init(&css->pseudo_subchannel->reg_mutex);
 998	ret = css_sch_create_locks(css->pseudo_subchannel);
 999	if (ret) {
1000		kfree(css->pseudo_subchannel);
1001		device_unregister(&css->device);
1002		goto out_err;
1003	}
1004
1005	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1006	ret = device_register(&css->pseudo_subchannel->dev);
1007	if (ret) {
1008		put_device(&css->pseudo_subchannel->dev);
1009		device_unregister(&css->device);
1010		goto out_err;
1011	}
1012
1013	return ret;
1014out_err:
1015	channel_subsystems[nr] = NULL;
1016	return ret;
1017}
1018
1019static int css_reboot_event(struct notifier_block *this,
1020			    unsigned long event,
1021			    void *ptr)
1022{
1023	struct channel_subsystem *css;
1024	int ret;
1025
1026	ret = NOTIFY_DONE;
1027	for_each_css(css) {
1028		mutex_lock(&css->mutex);
1029		if (css->cm_enabled)
1030			if (chsc_secm(css, 0))
1031				ret = NOTIFY_BAD;
1032		mutex_unlock(&css->mutex);
1033	}
1034
1035	return ret;
1036}
1037
1038static struct notifier_block css_reboot_notifier = {
1039	.notifier_call = css_reboot_event,
1040};
1041
1042/*
1043 * Since the css devices are neither on a bus nor have a class
1044 * nor have a special device type, we cannot stop/restart channel
1045 * path measurements via the normal suspend/resume callbacks, but have
1046 * to use notifiers.
1047 */
1048static int css_power_event(struct notifier_block *this, unsigned long event,
1049			   void *ptr)
1050{
1051	struct channel_subsystem *css;
1052	int ret;
1053
1054	switch (event) {
1055	case PM_HIBERNATION_PREPARE:
1056	case PM_SUSPEND_PREPARE:
1057		ret = NOTIFY_DONE;
1058		for_each_css(css) {
1059			mutex_lock(&css->mutex);
1060			if (!css->cm_enabled) {
1061				mutex_unlock(&css->mutex);
1062				continue;
1063			}
1064			ret = __chsc_do_secm(css, 0);
1065			ret = notifier_from_errno(ret);
1066			mutex_unlock(&css->mutex);
1067		}
1068		break;
1069	case PM_POST_HIBERNATION:
1070	case PM_POST_SUSPEND:
1071		ret = NOTIFY_DONE;
1072		for_each_css(css) {
1073			mutex_lock(&css->mutex);
1074			if (!css->cm_enabled) {
1075				mutex_unlock(&css->mutex);
1076				continue;
1077			}
1078			ret = __chsc_do_secm(css, 1);
1079			ret = notifier_from_errno(ret);
1080			mutex_unlock(&css->mutex);
1081		}
1082		/* search for subchannels, which appeared during hibernation */
1083		css_schedule_reprobe();
1084		break;
1085	default:
1086		ret = NOTIFY_DONE;
1087	}
1088	return ret;
1089
1090}
1091static struct notifier_block css_power_notifier = {
1092	.notifier_call = css_power_event,
1093};
1094
1095#define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1096static struct gen_pool *cio_dma_pool;
1097
1098/* Currently cio supports only a single css */
1099struct device *cio_get_dma_css_dev(void)
1100{
1101	return &channel_subsystems[0]->device;
1102}
1103
1104struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1105{
1106	struct gen_pool *gp_dma;
1107	void *cpu_addr;
1108	dma_addr_t dma_addr;
1109	int i;
1110
1111	gp_dma = gen_pool_create(3, -1);
1112	if (!gp_dma)
1113		return NULL;
1114	for (i = 0; i < nr_pages; ++i) {
1115		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1116					      CIO_DMA_GFP);
1117		if (!cpu_addr)
1118			return gp_dma;
1119		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1120				  dma_addr, PAGE_SIZE, -1);
1121	}
1122	return gp_dma;
1123}
1124
1125static void __gp_dma_free_dma(struct gen_pool *pool,
1126			      struct gen_pool_chunk *chunk, void *data)
1127{
1128	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1129
1130	dma_free_coherent((struct device *) data, chunk_size,
1131			 (void *) chunk->start_addr,
1132			 (dma_addr_t) chunk->phys_addr);
1133}
1134
1135void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1136{
1137	if (!gp_dma)
1138		return;
1139	/* this is quite ugly but no better idea */
1140	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1141	gen_pool_destroy(gp_dma);
1142}
1143
1144static int cio_dma_pool_init(void)
1145{
1146	/* No need to free up the resources: compiled in */
1147	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1148	if (!cio_dma_pool)
1149		return -ENOMEM;
1150	return 0;
1151}
1152
1153void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1154			size_t size)
1155{
1156	dma_addr_t dma_addr;
1157	unsigned long addr;
1158	size_t chunk_size;
1159
1160	if (!gp_dma)
1161		return NULL;
1162	addr = gen_pool_alloc(gp_dma, size);
1163	while (!addr) {
1164		chunk_size = round_up(size, PAGE_SIZE);
1165		addr = (unsigned long) dma_alloc_coherent(dma_dev,
1166					 chunk_size, &dma_addr, CIO_DMA_GFP);
1167		if (!addr)
1168			return NULL;
1169		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1170		addr = gen_pool_alloc(gp_dma, size);
1171	}
1172	return (void *) addr;
1173}
1174
1175void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1176{
1177	if (!cpu_addr)
1178		return;
1179	memset(cpu_addr, 0, size);
1180	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1181}
1182
1183/*
1184 * Allocate dma memory from the css global pool. Intended for memory not
1185 * specific to any single device within the css. The allocated memory
1186 * is not guaranteed to be 31-bit addressable.
1187 *
1188 * Caution: Not suitable for early stuff like console.
1189 */
1190void *cio_dma_zalloc(size_t size)
1191{
1192	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1193}
1194
1195void cio_dma_free(void *cpu_addr, size_t size)
1196{
1197	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1198}
1199
1200/*
1201 * Now that the driver core is running, we can setup our channel subsystem.
1202 * The struct subchannel's are created during probing.
1203 */
1204static int __init css_bus_init(void)
1205{
1206	int ret, i;
1207
1208	ret = chsc_init();
1209	if (ret)
1210		return ret;
1211
1212	chsc_determine_css_characteristics();
1213	/* Try to enable MSS. */
1214	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1215	if (ret)
1216		max_ssid = 0;
1217	else /* Success. */
1218		max_ssid = __MAX_SSID;
1219
1220	ret = slow_subchannel_init();
1221	if (ret)
1222		goto out;
1223
1224	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1225	if (ret)
1226		goto out;
1227
1228	if ((ret = bus_register(&css_bus_type)))
1229		goto out;
1230
1231	/* Setup css structure. */
1232	for (i = 0; i <= MAX_CSS_IDX; i++) {
1233		ret = setup_css(i);
1234		if (ret)
1235			goto out_unregister;
1236	}
1237	ret = register_reboot_notifier(&css_reboot_notifier);
1238	if (ret)
1239		goto out_unregister;
1240	ret = register_pm_notifier(&css_power_notifier);
1241	if (ret)
1242		goto out_unregister_rn;
1243	ret = cio_dma_pool_init();
1244	if (ret)
1245		goto out_unregister_pmn;
1246	airq_init();
1247	css_init_done = 1;
1248
1249	/* Enable default isc for I/O subchannels. */
1250	isc_register(IO_SCH_ISC);
1251
1252	return 0;
1253out_unregister_pmn:
1254	unregister_pm_notifier(&css_power_notifier);
1255out_unregister_rn:
1256	unregister_reboot_notifier(&css_reboot_notifier);
1257out_unregister:
1258	while (i-- > 0) {
1259		struct channel_subsystem *css = channel_subsystems[i];
1260		device_unregister(&css->pseudo_subchannel->dev);
1261		device_unregister(&css->device);
1262	}
1263	bus_unregister(&css_bus_type);
1264out:
1265	crw_unregister_handler(CRW_RSC_SCH);
1266	idset_free(slow_subchannel_set);
1267	chsc_init_cleanup();
1268	pr_alert("The CSS device driver initialization failed with "
1269		 "errno=%d\n", ret);
1270	return ret;
1271}
1272
1273static void __init css_bus_cleanup(void)
1274{
1275	struct channel_subsystem *css;
1276
1277	for_each_css(css) {
1278		device_unregister(&css->pseudo_subchannel->dev);
1279		device_unregister(&css->device);
1280	}
1281	bus_unregister(&css_bus_type);
1282	crw_unregister_handler(CRW_RSC_SCH);
1283	idset_free(slow_subchannel_set);
1284	chsc_init_cleanup();
1285	isc_unregister(IO_SCH_ISC);
1286}
1287
1288static int __init channel_subsystem_init(void)
1289{
1290	int ret;
1291
1292	ret = css_bus_init();
1293	if (ret)
1294		return ret;
1295	cio_work_q = create_singlethread_workqueue("cio");
1296	if (!cio_work_q) {
1297		ret = -ENOMEM;
1298		goto out_bus;
1299	}
1300	ret = io_subchannel_init();
1301	if (ret)
1302		goto out_wq;
1303
1304	/* Register subchannels which are already in use. */
1305	cio_register_early_subchannels();
1306	/* Start initial subchannel evaluation. */
1307	css_schedule_eval_all();
1308
1309	return ret;
1310out_wq:
1311	destroy_workqueue(cio_work_q);
1312out_bus:
1313	css_bus_cleanup();
1314	return ret;
1315}
1316subsys_initcall(channel_subsystem_init);
1317
1318static int css_settle(struct device_driver *drv, void *unused)
1319{
1320	struct css_driver *cssdrv = to_cssdriver(drv);
1321
1322	if (cssdrv->settle)
1323		return cssdrv->settle();
1324	return 0;
1325}
1326
1327int css_complete_work(void)
1328{
1329	int ret;
1330
1331	/* Wait for the evaluation of subchannels to finish. */
1332	ret = wait_event_interruptible(css_eval_wq,
1333				       atomic_read(&css_eval_scheduled) == 0);
1334	if (ret)
1335		return -EINTR;
1336	flush_workqueue(cio_work_q);
1337	/* Wait for the subchannel type specific initialization to finish */
1338	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1339}
1340
1341
1342/*
1343 * Wait for the initialization of devices to finish, to make sure we are
1344 * done with our setup if the search for the root device starts.
1345 */
1346static int __init channel_subsystem_init_sync(void)
1347{
1348	css_complete_work();
1349	return 0;
1350}
1351subsys_initcall_sync(channel_subsystem_init_sync);
1352
1353void channel_subsystem_reinit(void)
1354{
1355	struct channel_path *chp;
1356	struct chp_id chpid;
1357
1358	chsc_enable_facility(CHSC_SDA_OC_MSS);
1359	chp_id_for_each(&chpid) {
1360		chp = chpid_to_chp(chpid);
1361		if (chp)
1362			chp_update_desc(chp);
1363	}
1364	cmf_reactivate();
1365}
1366
1367#ifdef CONFIG_PROC_FS
1368static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1369				size_t count, loff_t *ppos)
1370{
1371	int ret;
1372
1373	/* Handle pending CRW's. */
1374	crw_wait_for_channel_report();
1375	ret = css_complete_work();
1376
1377	return ret ? ret : count;
1378}
1379
1380static const struct proc_ops cio_settle_proc_ops = {
1381	.proc_open	= nonseekable_open,
1382	.proc_write	= cio_settle_write,
1383	.proc_lseek	= no_llseek,
1384};
1385
1386static int __init cio_settle_init(void)
1387{
1388	struct proc_dir_entry *entry;
1389
1390	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1391	if (!entry)
1392		return -ENOMEM;
1393	return 0;
1394}
1395device_initcall(cio_settle_init);
1396#endif /*CONFIG_PROC_FS*/
1397
1398int sch_is_pseudo_sch(struct subchannel *sch)
1399{
1400	if (!sch->dev.parent)
1401		return 0;
1402	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1403}
1404
1405static int css_bus_match(struct device *dev, struct device_driver *drv)
1406{
1407	struct subchannel *sch = to_subchannel(dev);
1408	struct css_driver *driver = to_cssdriver(drv);
1409	struct css_device_id *id;
1410
1411	/* When driver_override is set, only bind to the matching driver */
1412	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1413		return 0;
1414
1415	for (id = driver->subchannel_type; id->match_flags; id++) {
1416		if (sch->st == id->type)
1417			return 1;
1418	}
1419
1420	return 0;
1421}
1422
1423static int css_probe(struct device *dev)
1424{
1425	struct subchannel *sch;
1426	int ret;
1427
1428	sch = to_subchannel(dev);
1429	sch->driver = to_cssdriver(dev->driver);
1430	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1431	if (ret)
1432		sch->driver = NULL;
1433	return ret;
1434}
1435
1436static int css_remove(struct device *dev)
1437{
1438	struct subchannel *sch;
1439	int ret;
1440
1441	sch = to_subchannel(dev);
1442	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
 
1443	sch->driver = NULL;
1444	return ret;
1445}
1446
1447static void css_shutdown(struct device *dev)
1448{
1449	struct subchannel *sch;
1450
1451	sch = to_subchannel(dev);
1452	if (sch->driver && sch->driver->shutdown)
1453		sch->driver->shutdown(sch);
1454}
1455
1456static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1457{
1458	struct subchannel *sch = to_subchannel(dev);
1459	int ret;
1460
1461	ret = add_uevent_var(env, "ST=%01X", sch->st);
1462	if (ret)
1463		return ret;
1464	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1465	return ret;
1466}
1467
1468static int css_pm_prepare(struct device *dev)
1469{
1470	struct subchannel *sch = to_subchannel(dev);
1471	struct css_driver *drv;
1472
1473	if (mutex_is_locked(&sch->reg_mutex))
1474		return -EAGAIN;
1475	if (!sch->dev.driver)
1476		return 0;
1477	drv = to_cssdriver(sch->dev.driver);
1478	/* Notify drivers that they may not register children. */
1479	return drv->prepare ? drv->prepare(sch) : 0;
1480}
1481
1482static void css_pm_complete(struct device *dev)
1483{
1484	struct subchannel *sch = to_subchannel(dev);
1485	struct css_driver *drv;
1486
1487	if (!sch->dev.driver)
1488		return;
1489	drv = to_cssdriver(sch->dev.driver);
1490	if (drv->complete)
1491		drv->complete(sch);
1492}
1493
1494static int css_pm_freeze(struct device *dev)
1495{
1496	struct subchannel *sch = to_subchannel(dev);
1497	struct css_driver *drv;
1498
1499	if (!sch->dev.driver)
1500		return 0;
1501	drv = to_cssdriver(sch->dev.driver);
1502	return drv->freeze ? drv->freeze(sch) : 0;
1503}
1504
1505static int css_pm_thaw(struct device *dev)
1506{
1507	struct subchannel *sch = to_subchannel(dev);
1508	struct css_driver *drv;
1509
1510	if (!sch->dev.driver)
1511		return 0;
1512	drv = to_cssdriver(sch->dev.driver);
1513	return drv->thaw ? drv->thaw(sch) : 0;
1514}
1515
1516static int css_pm_restore(struct device *dev)
1517{
1518	struct subchannel *sch = to_subchannel(dev);
1519	struct css_driver *drv;
1520
1521	css_update_ssd_info(sch);
1522	if (!sch->dev.driver)
1523		return 0;
1524	drv = to_cssdriver(sch->dev.driver);
1525	return drv->restore ? drv->restore(sch) : 0;
1526}
1527
1528static const struct dev_pm_ops css_pm_ops = {
1529	.prepare = css_pm_prepare,
1530	.complete = css_pm_complete,
1531	.freeze = css_pm_freeze,
1532	.thaw = css_pm_thaw,
1533	.restore = css_pm_restore,
1534};
1535
1536static struct bus_type css_bus_type = {
1537	.name     = "css",
1538	.match    = css_bus_match,
1539	.probe    = css_probe,
1540	.remove   = css_remove,
1541	.shutdown = css_shutdown,
1542	.uevent   = css_uevent,
1543	.pm = &css_pm_ops,
1544};
1545
1546/**
1547 * css_driver_register - register a css driver
1548 * @cdrv: css driver to register
1549 *
1550 * This is mainly a wrapper around driver_register that sets name
1551 * and bus_type in the embedded struct device_driver correctly.
1552 */
1553int css_driver_register(struct css_driver *cdrv)
1554{
1555	cdrv->drv.bus = &css_bus_type;
1556	return driver_register(&cdrv->drv);
1557}
1558EXPORT_SYMBOL_GPL(css_driver_register);
1559
1560/**
1561 * css_driver_unregister - unregister a css driver
1562 * @cdrv: css driver to unregister
1563 *
1564 * This is a wrapper around driver_unregister.
1565 */
1566void css_driver_unregister(struct css_driver *cdrv)
1567{
1568	driver_unregister(&cdrv->drv);
1569}
1570EXPORT_SYMBOL_GPL(css_driver_unregister);
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * driver for channel subsystem
   4 *
   5 * Copyright IBM Corp. 2002, 2010
   6 *
   7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
   8 *	      Cornelia Huck (cornelia.huck@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/export.h>
  15#include <linux/init.h>
  16#include <linux/device.h>
  17#include <linux/slab.h>
  18#include <linux/errno.h>
  19#include <linux/list.h>
  20#include <linux/reboot.h>
 
  21#include <linux/proc_fs.h>
  22#include <linux/genalloc.h>
  23#include <linux/dma-mapping.h>
  24#include <asm/isc.h>
  25#include <asm/crw.h>
  26
  27#include "css.h"
  28#include "cio.h"
  29#include "blacklist.h"
  30#include "cio_debug.h"
  31#include "ioasm.h"
  32#include "chsc.h"
  33#include "device.h"
  34#include "idset.h"
  35#include "chp.h"
  36
  37int css_init_done = 0;
  38int max_ssid;
  39
  40#define MAX_CSS_IDX 0
  41struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
  42static struct bus_type css_bus_type;
  43
  44int
  45for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  46{
  47	struct subchannel_id schid;
  48	int ret;
  49
  50	init_subchannel_id(&schid);
  51	do {
  52		do {
  53			ret = fn(schid, data);
  54			if (ret)
  55				break;
  56		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
  57		schid.sch_no = 0;
  58	} while (schid.ssid++ < max_ssid);
  59	return ret;
  60}
  61
  62struct cb_data {
  63	void *data;
  64	struct idset *set;
  65	int (*fn_known_sch)(struct subchannel *, void *);
  66	int (*fn_unknown_sch)(struct subchannel_id, void *);
  67};
  68
  69static int call_fn_known_sch(struct device *dev, void *data)
  70{
  71	struct subchannel *sch = to_subchannel(dev);
  72	struct cb_data *cb = data;
  73	int rc = 0;
  74
  75	if (cb->set)
  76		idset_sch_del(cb->set, sch->schid);
  77	if (cb->fn_known_sch)
  78		rc = cb->fn_known_sch(sch, cb->data);
  79	return rc;
  80}
  81
  82static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  83{
  84	struct cb_data *cb = data;
  85	int rc = 0;
  86
  87	if (idset_sch_contains(cb->set, schid))
  88		rc = cb->fn_unknown_sch(schid, cb->data);
  89	return rc;
  90}
  91
  92static int call_fn_all_sch(struct subchannel_id schid, void *data)
  93{
  94	struct cb_data *cb = data;
  95	struct subchannel *sch;
  96	int rc = 0;
  97
  98	sch = get_subchannel_by_schid(schid);
  99	if (sch) {
 100		if (cb->fn_known_sch)
 101			rc = cb->fn_known_sch(sch, cb->data);
 102		put_device(&sch->dev);
 103	} else {
 104		if (cb->fn_unknown_sch)
 105			rc = cb->fn_unknown_sch(schid, cb->data);
 106	}
 107
 108	return rc;
 109}
 110
 111int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
 112			       int (*fn_unknown)(struct subchannel_id,
 113			       void *), void *data)
 114{
 115	struct cb_data cb;
 116	int rc;
 117
 118	cb.data = data;
 119	cb.fn_known_sch = fn_known;
 120	cb.fn_unknown_sch = fn_unknown;
 121
 122	if (fn_known && !fn_unknown) {
 123		/* Skip idset allocation in case of known-only loop. */
 124		cb.set = NULL;
 125		return bus_for_each_dev(&css_bus_type, NULL, &cb,
 126					call_fn_known_sch);
 127	}
 128
 129	cb.set = idset_sch_new();
 130	if (!cb.set)
 131		/* fall back to brute force scanning in case of oom */
 132		return for_each_subchannel(call_fn_all_sch, &cb);
 133
 134	idset_fill(cb.set);
 135
 136	/* Process registered subchannels. */
 137	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
 138	if (rc)
 139		goto out;
 140	/* Process unregistered subchannels. */
 141	if (fn_unknown)
 142		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
 143out:
 144	idset_free(cb.set);
 145
 146	return rc;
 147}
 148
 149static void css_sch_todo(struct work_struct *work);
 150
 151static void css_sch_create_locks(struct subchannel *sch)
 152{
 153	spin_lock_init(&sch->lock);
 
 
 
 
 154	mutex_init(&sch->reg_mutex);
 
 
 155}
 156
 157static void css_subchannel_release(struct device *dev)
 158{
 159	struct subchannel *sch = to_subchannel(dev);
 160
 161	sch->config.intparm = 0;
 162	cio_commit_config(sch);
 163	kfree(sch->driver_override);
 
 164	kfree(sch);
 165}
 166
 167static int css_validate_subchannel(struct subchannel_id schid,
 168				   struct schib *schib)
 169{
 170	int err;
 171
 172	switch (schib->pmcw.st) {
 173	case SUBCHANNEL_TYPE_IO:
 174	case SUBCHANNEL_TYPE_MSG:
 175		if (!css_sch_is_valid(schib))
 176			err = -ENODEV;
 177		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
 178			CIO_MSG_EVENT(6, "Blacklisted device detected "
 179				      "at devno %04X, subchannel set %x\n",
 180				      schib->pmcw.dev, schid.ssid);
 181			err = -ENODEV;
 182		} else
 183			err = 0;
 184		break;
 185	default:
 186		err = 0;
 187	}
 188	if (err)
 189		goto out;
 190
 191	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
 192		      schid.ssid, schid.sch_no, schib->pmcw.st);
 193out:
 194	return err;
 195}
 196
 197struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
 198					struct schib *schib)
 199{
 200	struct subchannel *sch;
 201	int ret;
 202
 203	ret = css_validate_subchannel(schid, schib);
 204	if (ret < 0)
 205		return ERR_PTR(ret);
 206
 207	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
 208	if (!sch)
 209		return ERR_PTR(-ENOMEM);
 210
 211	sch->schid = schid;
 212	sch->schib = *schib;
 213	sch->st = schib->pmcw.st;
 214
 215	css_sch_create_locks(sch);
 
 
 216
 217	INIT_WORK(&sch->todo_work, css_sch_todo);
 218	sch->dev.release = &css_subchannel_release;
 219	sch->dev.dma_mask = &sch->dma_mask;
 220	device_initialize(&sch->dev);
 221	/*
 222	 * The physical addresses for some of the dma structures that can
 223	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
 224	 */
 225	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
 226	if (ret)
 227		goto err;
 228	/*
 229	 * But we don't have such restrictions imposed on the stuff that
 230	 * is handled by the streaming API.
 231	 */
 232	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
 233	if (ret)
 234		goto err;
 235
 236	return sch;
 237
 238err:
 239	kfree(sch);
 240	return ERR_PTR(ret);
 241}
 242
 243static int css_sch_device_register(struct subchannel *sch)
 244{
 245	int ret;
 246
 247	mutex_lock(&sch->reg_mutex);
 248	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
 249		     sch->schid.sch_no);
 250	ret = device_add(&sch->dev);
 251	mutex_unlock(&sch->reg_mutex);
 252	return ret;
 253}
 254
 255/**
 256 * css_sch_device_unregister - unregister a subchannel
 257 * @sch: subchannel to be unregistered
 258 */
 259void css_sch_device_unregister(struct subchannel *sch)
 260{
 261	mutex_lock(&sch->reg_mutex);
 262	if (device_is_registered(&sch->dev))
 263		device_unregister(&sch->dev);
 264	mutex_unlock(&sch->reg_mutex);
 265}
 266EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 267
 268static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 269{
 270	int i;
 271	int mask;
 272
 273	memset(ssd, 0, sizeof(struct chsc_ssd_info));
 274	ssd->path_mask = pmcw->pim;
 275	for (i = 0; i < 8; i++) {
 276		mask = 0x80 >> i;
 277		if (pmcw->pim & mask) {
 278			chp_id_init(&ssd->chpid[i]);
 279			ssd->chpid[i].id = pmcw->chpid[i];
 280		}
 281	}
 282}
 283
 284static void ssd_register_chpids(struct chsc_ssd_info *ssd)
 285{
 286	int i;
 287	int mask;
 288
 289	for (i = 0; i < 8; i++) {
 290		mask = 0x80 >> i;
 291		if (ssd->path_mask & mask)
 292			chp_new(ssd->chpid[i]);
 293	}
 294}
 295
 296void css_update_ssd_info(struct subchannel *sch)
 297{
 298	int ret;
 299
 300	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
 301	if (ret)
 302		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
 303
 304	ssd_register_chpids(&sch->ssd_info);
 305}
 306
 307static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 308			 char *buf)
 309{
 310	struct subchannel *sch = to_subchannel(dev);
 311
 312	return sprintf(buf, "%01x\n", sch->st);
 313}
 314
 315static DEVICE_ATTR_RO(type);
 316
 317static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 318			     char *buf)
 319{
 320	struct subchannel *sch = to_subchannel(dev);
 321
 322	return sprintf(buf, "css:t%01X\n", sch->st);
 323}
 324
 325static DEVICE_ATTR_RO(modalias);
 326
 327static ssize_t driver_override_store(struct device *dev,
 328				     struct device_attribute *attr,
 329				     const char *buf, size_t count)
 330{
 331	struct subchannel *sch = to_subchannel(dev);
 332	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333
 334	ret = driver_set_override(dev, &sch->driver_override, buf, count);
 335	if (ret)
 336		return ret;
 337
 338	return count;
 339}
 340
 341static ssize_t driver_override_show(struct device *dev,
 342				    struct device_attribute *attr, char *buf)
 343{
 344	struct subchannel *sch = to_subchannel(dev);
 345	ssize_t len;
 346
 347	device_lock(dev);
 348	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
 349	device_unlock(dev);
 350	return len;
 351}
 352static DEVICE_ATTR_RW(driver_override);
 353
 354static struct attribute *subch_attrs[] = {
 355	&dev_attr_type.attr,
 356	&dev_attr_modalias.attr,
 357	&dev_attr_driver_override.attr,
 358	NULL,
 359};
 360
 361static struct attribute_group subch_attr_group = {
 362	.attrs = subch_attrs,
 363};
 364
 365static const struct attribute_group *default_subch_attr_groups[] = {
 366	&subch_attr_group,
 367	NULL,
 368};
 369
 370static ssize_t chpids_show(struct device *dev,
 371			   struct device_attribute *attr,
 372			   char *buf)
 373{
 374	struct subchannel *sch = to_subchannel(dev);
 375	struct chsc_ssd_info *ssd = &sch->ssd_info;
 376	ssize_t ret = 0;
 377	int mask;
 378	int chp;
 379
 380	for (chp = 0; chp < 8; chp++) {
 381		mask = 0x80 >> chp;
 382		if (ssd->path_mask & mask)
 383			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
 384		else
 385			ret += sprintf(buf + ret, "00 ");
 386	}
 387	ret += sprintf(buf + ret, "\n");
 388	return ret;
 389}
 390static DEVICE_ATTR_RO(chpids);
 391
 392static ssize_t pimpampom_show(struct device *dev,
 393			      struct device_attribute *attr,
 394			      char *buf)
 395{
 396	struct subchannel *sch = to_subchannel(dev);
 397	struct pmcw *pmcw = &sch->schib.pmcw;
 398
 399	return sprintf(buf, "%02x %02x %02x\n",
 400		       pmcw->pim, pmcw->pam, pmcw->pom);
 401}
 402static DEVICE_ATTR_RO(pimpampom);
 403
 404static ssize_t dev_busid_show(struct device *dev,
 405			      struct device_attribute *attr,
 406			      char *buf)
 407{
 408	struct subchannel *sch = to_subchannel(dev);
 409	struct pmcw *pmcw = &sch->schib.pmcw;
 410
 411	if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
 412	    (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
 413		return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
 414				  pmcw->dev);
 415	else
 416		return sysfs_emit(buf, "none\n");
 417}
 418static DEVICE_ATTR_RO(dev_busid);
 419
 420static struct attribute *io_subchannel_type_attrs[] = {
 421	&dev_attr_chpids.attr,
 422	&dev_attr_pimpampom.attr,
 423	&dev_attr_dev_busid.attr,
 424	NULL,
 425};
 426ATTRIBUTE_GROUPS(io_subchannel_type);
 427
 428static const struct device_type io_subchannel_type = {
 429	.groups = io_subchannel_type_groups,
 430};
 431
 432int css_register_subchannel(struct subchannel *sch)
 433{
 434	int ret;
 435
 436	/* Initialize the subchannel structure */
 437	sch->dev.parent = &channel_subsystems[0]->device;
 438	sch->dev.bus = &css_bus_type;
 439	sch->dev.groups = default_subch_attr_groups;
 440
 441	if (sch->st == SUBCHANNEL_TYPE_IO)
 442		sch->dev.type = &io_subchannel_type;
 443
 
 
 
 
 
 
 
 
 
 
 444	css_update_ssd_info(sch);
 445	/* make it known to the system */
 446	ret = css_sch_device_register(sch);
 447	if (ret) {
 448		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
 449			      sch->schid.ssid, sch->schid.sch_no, ret);
 450		return ret;
 451	}
 
 
 
 
 
 
 
 
 
 452	return ret;
 453}
 454
 455static int css_probe_device(struct subchannel_id schid, struct schib *schib)
 456{
 457	struct subchannel *sch;
 458	int ret;
 459
 460	sch = css_alloc_subchannel(schid, schib);
 461	if (IS_ERR(sch))
 462		return PTR_ERR(sch);
 463
 464	ret = css_register_subchannel(sch);
 465	if (ret)
 466		put_device(&sch->dev);
 467
 468	return ret;
 469}
 470
 471static int
 472check_subchannel(struct device *dev, const void *data)
 473{
 474	struct subchannel *sch;
 475	struct subchannel_id *schid = (void *)data;
 476
 477	sch = to_subchannel(dev);
 478	return schid_equal(&sch->schid, schid);
 479}
 480
 481struct subchannel *
 482get_subchannel_by_schid(struct subchannel_id schid)
 483{
 484	struct device *dev;
 485
 486	dev = bus_find_device(&css_bus_type, NULL,
 487			      &schid, check_subchannel);
 488
 489	return dev ? to_subchannel(dev) : NULL;
 490}
 491
 492/**
 493 * css_sch_is_valid() - check if a subchannel is valid
 494 * @schib: subchannel information block for the subchannel
 495 */
 496int css_sch_is_valid(struct schib *schib)
 497{
 498	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
 499		return 0;
 500	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
 501		return 0;
 502	return 1;
 503}
 504EXPORT_SYMBOL_GPL(css_sch_is_valid);
 505
 506static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
 507{
 508	struct schib schib;
 509	int ccode;
 510
 511	if (!slow) {
 512		/* Will be done on the slow path. */
 513		return -EAGAIN;
 514	}
 515	/*
 516	 * The first subchannel that is not-operational (ccode==3)
 517	 * indicates that there aren't any more devices available.
 518	 * If stsch gets an exception, it means the current subchannel set
 519	 * is not valid.
 520	 */
 521	ccode = stsch(schid, &schib);
 522	if (ccode)
 523		return (ccode == 3) ? -ENXIO : ccode;
 524
 525	return css_probe_device(schid, &schib);
 526}
 527
 528static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
 529{
 530	int ret = 0;
 531
 532	if (sch->driver) {
 533		if (sch->driver->sch_event)
 534			ret = sch->driver->sch_event(sch, slow);
 535		else
 536			dev_dbg(&sch->dev,
 537				"Got subchannel machine check but "
 538				"no sch_event handler provided.\n");
 539	}
 540	if (ret != 0 && ret != -EAGAIN) {
 541		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
 542			      sch->schid.ssid, sch->schid.sch_no, ret);
 543	}
 544	return ret;
 545}
 546
 547static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
 548{
 549	struct subchannel *sch;
 550	int ret;
 551
 552	sch = get_subchannel_by_schid(schid);
 553	if (sch) {
 554		ret = css_evaluate_known_subchannel(sch, slow);
 555		put_device(&sch->dev);
 556	} else
 557		ret = css_evaluate_new_subchannel(schid, slow);
 558	if (ret == -EAGAIN)
 559		css_schedule_eval(schid);
 560}
 561
 562/**
 563 * css_sched_sch_todo - schedule a subchannel operation
 564 * @sch: subchannel
 565 * @todo: todo
 566 *
 567 * Schedule the operation identified by @todo to be performed on the slow path
 568 * workqueue. Do nothing if another operation with higher priority is already
 569 * scheduled. Needs to be called with subchannel lock held.
 570 */
 571void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
 572{
 573	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
 574		      sch->schid.ssid, sch->schid.sch_no, todo);
 575	if (sch->todo >= todo)
 576		return;
 577	/* Get workqueue ref. */
 578	if (!get_device(&sch->dev))
 579		return;
 580	sch->todo = todo;
 581	if (!queue_work(cio_work_q, &sch->todo_work)) {
 582		/* Already queued, release workqueue ref. */
 583		put_device(&sch->dev);
 584	}
 585}
 586EXPORT_SYMBOL_GPL(css_sched_sch_todo);
 587
 588static void css_sch_todo(struct work_struct *work)
 589{
 590	struct subchannel *sch;
 591	enum sch_todo todo;
 592	int ret;
 593
 594	sch = container_of(work, struct subchannel, todo_work);
 595	/* Find out todo. */
 596	spin_lock_irq(&sch->lock);
 597	todo = sch->todo;
 598	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
 599		      sch->schid.sch_no, todo);
 600	sch->todo = SCH_TODO_NOTHING;
 601	spin_unlock_irq(&sch->lock);
 602	/* Perform todo. */
 603	switch (todo) {
 604	case SCH_TODO_NOTHING:
 605		break;
 606	case SCH_TODO_EVAL:
 607		ret = css_evaluate_known_subchannel(sch, 1);
 608		if (ret == -EAGAIN) {
 609			spin_lock_irq(&sch->lock);
 610			css_sched_sch_todo(sch, todo);
 611			spin_unlock_irq(&sch->lock);
 612		}
 613		break;
 614	case SCH_TODO_UNREG:
 615		css_sch_device_unregister(sch);
 616		break;
 617	}
 618	/* Release workqueue ref. */
 619	put_device(&sch->dev);
 620}
 621
 622static struct idset *slow_subchannel_set;
 623static DEFINE_SPINLOCK(slow_subchannel_lock);
 624static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
 625static atomic_t css_eval_scheduled;
 626
 627static int __init slow_subchannel_init(void)
 628{
 
 629	atomic_set(&css_eval_scheduled, 0);
 
 630	slow_subchannel_set = idset_sch_new();
 631	if (!slow_subchannel_set) {
 632		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
 633		return -ENOMEM;
 634	}
 635	return 0;
 636}
 637
 638static int slow_eval_known_fn(struct subchannel *sch, void *data)
 639{
 640	int eval;
 641	int rc;
 642
 643	spin_lock_irq(&slow_subchannel_lock);
 644	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
 645	idset_sch_del(slow_subchannel_set, sch->schid);
 646	spin_unlock_irq(&slow_subchannel_lock);
 647	if (eval) {
 648		rc = css_evaluate_known_subchannel(sch, 1);
 649		if (rc == -EAGAIN)
 650			css_schedule_eval(sch->schid);
 651		/*
 652		 * The loop might take long time for platforms with lots of
 653		 * known devices. Allow scheduling here.
 654		 */
 655		cond_resched();
 656	}
 657	return 0;
 658}
 659
 660static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
 661{
 662	int eval;
 663	int rc = 0;
 664
 665	spin_lock_irq(&slow_subchannel_lock);
 666	eval = idset_sch_contains(slow_subchannel_set, schid);
 667	idset_sch_del(slow_subchannel_set, schid);
 668	spin_unlock_irq(&slow_subchannel_lock);
 669	if (eval) {
 670		rc = css_evaluate_new_subchannel(schid, 1);
 671		switch (rc) {
 672		case -EAGAIN:
 673			css_schedule_eval(schid);
 674			rc = 0;
 675			break;
 676		case -ENXIO:
 677		case -ENOMEM:
 678		case -EIO:
 679			/* These should abort looping */
 680			spin_lock_irq(&slow_subchannel_lock);
 681			idset_sch_del_subseq(slow_subchannel_set, schid);
 682			spin_unlock_irq(&slow_subchannel_lock);
 683			break;
 684		default:
 685			rc = 0;
 686		}
 687		/* Allow scheduling here since the containing loop might
 688		 * take a while.  */
 689		cond_resched();
 690	}
 691	return rc;
 692}
 693
 694static void css_slow_path_func(struct work_struct *unused)
 695{
 696	unsigned long flags;
 697
 698	CIO_TRACE_EVENT(4, "slowpath");
 699	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
 700				   NULL);
 701	spin_lock_irqsave(&slow_subchannel_lock, flags);
 702	if (idset_is_empty(slow_subchannel_set)) {
 703		atomic_set(&css_eval_scheduled, 0);
 704		wake_up(&css_eval_wq);
 705	}
 706	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 707}
 708
 709static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
 710struct workqueue_struct *cio_work_q;
 711
 712void css_schedule_eval(struct subchannel_id schid)
 713{
 714	unsigned long flags;
 715
 716	spin_lock_irqsave(&slow_subchannel_lock, flags);
 717	idset_sch_add(slow_subchannel_set, schid);
 718	atomic_set(&css_eval_scheduled, 1);
 719	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 720	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 721}
 722
 723void css_schedule_eval_all(void)
 724{
 725	unsigned long flags;
 726
 727	spin_lock_irqsave(&slow_subchannel_lock, flags);
 728	idset_fill(slow_subchannel_set);
 729	atomic_set(&css_eval_scheduled, 1);
 730	queue_delayed_work(cio_work_q, &slow_path_work, 0);
 731	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 732}
 733
 734static int __unset_validpath(struct device *dev, void *data)
 735{
 736	struct idset *set = data;
 737	struct subchannel *sch = to_subchannel(dev);
 738	struct pmcw *pmcw = &sch->schib.pmcw;
 739
 740	/* Here we want to make sure that we are considering only those subchannels
 741	 * which do not have an operational device attached to it. This can be found
 742	 * with the help of PAM and POM values of pmcw. OPM provides the information
 743	 * about any path which is currently vary-off, so that we should not consider.
 744	 */
 745	if (sch->st == SUBCHANNEL_TYPE_IO &&
 746	    (sch->opm & pmcw->pam & pmcw->pom))
 747		idset_sch_del(set, sch->schid);
 748
 
 749	return 0;
 750}
 751
 752static int __unset_online(struct device *dev, void *data)
 753{
 754	struct idset *set = data;
 755	struct subchannel *sch = to_subchannel(dev);
 756
 757	if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
 758		idset_sch_del(set, sch->schid);
 759
 760	return 0;
 761}
 762
 763void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
 764{
 765	unsigned long flags;
 766	struct idset *set;
 767
 768	/* Find unregistered subchannels. */
 769	set = idset_sch_new();
 770	if (!set) {
 771		/* Fallback. */
 772		css_schedule_eval_all();
 773		return;
 774	}
 775	idset_fill(set);
 776	switch (cond) {
 777	case CSS_EVAL_NO_PATH:
 778		bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
 779		break;
 780	case CSS_EVAL_NOT_ONLINE:
 781		bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
 782		break;
 783	default:
 784		break;
 785	}
 786
 787	/* Apply to slow_subchannel_set. */
 788	spin_lock_irqsave(&slow_subchannel_lock, flags);
 789	idset_add_set(slow_subchannel_set, set);
 790	atomic_set(&css_eval_scheduled, 1);
 791	queue_delayed_work(cio_work_q, &slow_path_work, delay);
 792	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 793	idset_free(set);
 794}
 795
 796void css_wait_for_slow_path(void)
 797{
 798	flush_workqueue(cio_work_q);
 799}
 800
 801/* Schedule reprobing of all subchannels with no valid operational path. */
 802void css_schedule_reprobe(void)
 803{
 804	/* Schedule with a delay to allow merging of subsequent calls. */
 805	css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
 806}
 807EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 808
 809/*
 810 * Called from the machine check handler for subchannel report words.
 811 */
 812static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 813{
 814	struct subchannel_id mchk_schid;
 815	struct subchannel *sch;
 816
 817	if (overflow) {
 818		css_schedule_eval_all();
 819		return;
 820	}
 821	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
 822		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 823		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
 824		      crw0->erc, crw0->rsid);
 825	if (crw1)
 826		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
 827			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 828			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
 829			      crw1->anc, crw1->erc, crw1->rsid);
 830	init_subchannel_id(&mchk_schid);
 831	mchk_schid.sch_no = crw0->rsid;
 832	if (crw1)
 833		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
 834
 835	if (crw0->erc == CRW_ERC_PMOD) {
 836		sch = get_subchannel_by_schid(mchk_schid);
 837		if (sch) {
 838			css_update_ssd_info(sch);
 839			put_device(&sch->dev);
 840		}
 841	}
 842	/*
 843	 * Since we are always presented with IPI in the CRW, we have to
 844	 * use stsch() to find out if the subchannel in question has come
 845	 * or gone.
 846	 */
 847	css_evaluate_subchannel(mchk_schid, 0);
 848}
 849
 850static void __init
 851css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
 852{
 853	struct cpuid cpu_id;
 854
 855	if (css_general_characteristics.mcss) {
 856		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
 857		css->global_pgid.pgid_high.ext_cssid.cssid =
 858			css->id_valid ? css->cssid : 0;
 859	} else {
 860		css->global_pgid.pgid_high.cpu_addr = stap();
 861	}
 862	get_cpu_id(&cpu_id);
 863	css->global_pgid.cpu_id = cpu_id.ident;
 864	css->global_pgid.cpu_model = cpu_id.machine;
 865	css->global_pgid.tod_high = tod_high;
 866}
 867
 868static void channel_subsystem_release(struct device *dev)
 869{
 870	struct channel_subsystem *css = to_css(dev);
 871
 872	mutex_destroy(&css->mutex);
 873	kfree(css);
 874}
 875
 876static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
 877			       char *buf)
 878{
 879	struct channel_subsystem *css = to_css(dev);
 880
 881	if (!css->id_valid)
 882		return -EINVAL;
 883
 884	return sprintf(buf, "%x\n", css->cssid);
 885}
 886static DEVICE_ATTR_RO(real_cssid);
 887
 888static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
 889			    const char *buf, size_t count)
 890{
 891	CIO_TRACE_EVENT(4, "usr-rescan");
 892
 893	css_schedule_eval_all();
 894	css_complete_work();
 895
 896	return count;
 897}
 898static DEVICE_ATTR_WO(rescan);
 899
 900static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
 901			      char *buf)
 902{
 903	struct channel_subsystem *css = to_css(dev);
 904	int ret;
 905
 906	mutex_lock(&css->mutex);
 907	ret = sprintf(buf, "%x\n", css->cm_enabled);
 908	mutex_unlock(&css->mutex);
 909	return ret;
 910}
 911
 912static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
 913			       const char *buf, size_t count)
 914{
 915	struct channel_subsystem *css = to_css(dev);
 916	unsigned long val;
 917	int ret;
 918
 919	ret = kstrtoul(buf, 16, &val);
 920	if (ret)
 921		return ret;
 922	mutex_lock(&css->mutex);
 923	switch (val) {
 924	case 0:
 925		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
 926		break;
 927	case 1:
 928		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
 929		break;
 930	default:
 931		ret = -EINVAL;
 932	}
 933	mutex_unlock(&css->mutex);
 934	return ret < 0 ? ret : count;
 935}
 936static DEVICE_ATTR_RW(cm_enable);
 937
 938static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
 939			      int index)
 940{
 941	return css_chsc_characteristics.secm ? attr->mode : 0;
 942}
 943
 944static struct attribute *cssdev_attrs[] = {
 945	&dev_attr_real_cssid.attr,
 946	&dev_attr_rescan.attr,
 947	NULL,
 948};
 949
 950static struct attribute_group cssdev_attr_group = {
 951	.attrs = cssdev_attrs,
 952};
 953
 954static struct attribute *cssdev_cm_attrs[] = {
 955	&dev_attr_cm_enable.attr,
 956	NULL,
 957};
 958
 959static struct attribute_group cssdev_cm_attr_group = {
 960	.attrs = cssdev_cm_attrs,
 961	.is_visible = cm_enable_mode,
 962};
 963
 964static const struct attribute_group *cssdev_attr_groups[] = {
 965	&cssdev_attr_group,
 966	&cssdev_cm_attr_group,
 967	NULL,
 968};
 969
 970static int __init setup_css(int nr)
 971{
 972	struct channel_subsystem *css;
 973	int ret;
 974
 975	css = kzalloc(sizeof(*css), GFP_KERNEL);
 976	if (!css)
 977		return -ENOMEM;
 978
 979	channel_subsystems[nr] = css;
 980	dev_set_name(&css->device, "css%x", nr);
 981	css->device.groups = cssdev_attr_groups;
 982	css->device.release = channel_subsystem_release;
 983	/*
 984	 * We currently allocate notifier bits with this (using
 985	 * css->device as the device argument with the DMA API)
 986	 * and are fine with 64 bit addresses.
 987	 */
 988	ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
 989	if (ret) {
 990		kfree(css);
 991		goto out_err;
 992	}
 993
 994	mutex_init(&css->mutex);
 995	ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
 996	if (!ret) {
 997		css->id_valid = true;
 998		pr_info("Partition identifier %01x.%01x\n", css->cssid,
 999			css->iid);
1000	}
1001	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1002
1003	ret = device_register(&css->device);
1004	if (ret) {
1005		put_device(&css->device);
1006		goto out_err;
1007	}
1008
1009	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1010					 GFP_KERNEL);
1011	if (!css->pseudo_subchannel) {
1012		device_unregister(&css->device);
1013		ret = -ENOMEM;
1014		goto out_err;
1015	}
1016
1017	css->pseudo_subchannel->dev.parent = &css->device;
1018	css->pseudo_subchannel->dev.release = css_subchannel_release;
1019	mutex_init(&css->pseudo_subchannel->reg_mutex);
1020	css_sch_create_locks(css->pseudo_subchannel);
 
 
 
 
 
1021
1022	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1023	ret = device_register(&css->pseudo_subchannel->dev);
1024	if (ret) {
1025		put_device(&css->pseudo_subchannel->dev);
1026		device_unregister(&css->device);
1027		goto out_err;
1028	}
1029
1030	return ret;
1031out_err:
1032	channel_subsystems[nr] = NULL;
1033	return ret;
1034}
1035
1036static int css_reboot_event(struct notifier_block *this,
1037			    unsigned long event,
1038			    void *ptr)
1039{
1040	struct channel_subsystem *css;
1041	int ret;
1042
1043	ret = NOTIFY_DONE;
1044	for_each_css(css) {
1045		mutex_lock(&css->mutex);
1046		if (css->cm_enabled)
1047			if (chsc_secm(css, 0))
1048				ret = NOTIFY_BAD;
1049		mutex_unlock(&css->mutex);
1050	}
1051
1052	return ret;
1053}
1054
1055static struct notifier_block css_reboot_notifier = {
1056	.notifier_call = css_reboot_event,
1057};
1058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059#define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1060static struct gen_pool *cio_dma_pool;
1061
1062/* Currently cio supports only a single css */
1063struct device *cio_get_dma_css_dev(void)
1064{
1065	return &channel_subsystems[0]->device;
1066}
1067
1068struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1069{
1070	struct gen_pool *gp_dma;
1071	void *cpu_addr;
1072	dma_addr_t dma_addr;
1073	int i;
1074
1075	gp_dma = gen_pool_create(3, -1);
1076	if (!gp_dma)
1077		return NULL;
1078	for (i = 0; i < nr_pages; ++i) {
1079		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1080					      CIO_DMA_GFP);
1081		if (!cpu_addr)
1082			return gp_dma;
1083		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1084				  dma_addr, PAGE_SIZE, -1);
1085	}
1086	return gp_dma;
1087}
1088
1089static void __gp_dma_free_dma(struct gen_pool *pool,
1090			      struct gen_pool_chunk *chunk, void *data)
1091{
1092	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1093
1094	dma_free_coherent((struct device *) data, chunk_size,
1095			 (void *) chunk->start_addr,
1096			 (dma_addr_t) chunk->phys_addr);
1097}
1098
1099void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1100{
1101	if (!gp_dma)
1102		return;
1103	/* this is quite ugly but no better idea */
1104	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1105	gen_pool_destroy(gp_dma);
1106}
1107
1108static int cio_dma_pool_init(void)
1109{
1110	/* No need to free up the resources: compiled in */
1111	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1112	if (!cio_dma_pool)
1113		return -ENOMEM;
1114	return 0;
1115}
1116
1117void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1118			size_t size)
1119{
1120	dma_addr_t dma_addr;
1121	unsigned long addr;
1122	size_t chunk_size;
1123
1124	if (!gp_dma)
1125		return NULL;
1126	addr = gen_pool_alloc(gp_dma, size);
1127	while (!addr) {
1128		chunk_size = round_up(size, PAGE_SIZE);
1129		addr = (unsigned long) dma_alloc_coherent(dma_dev,
1130					 chunk_size, &dma_addr, CIO_DMA_GFP);
1131		if (!addr)
1132			return NULL;
1133		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1134		addr = gen_pool_alloc(gp_dma, size);
1135	}
1136	return (void *) addr;
1137}
1138
1139void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1140{
1141	if (!cpu_addr)
1142		return;
1143	memset(cpu_addr, 0, size);
1144	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1145}
1146
1147/*
1148 * Allocate dma memory from the css global pool. Intended for memory not
1149 * specific to any single device within the css. The allocated memory
1150 * is not guaranteed to be 31-bit addressable.
1151 *
1152 * Caution: Not suitable for early stuff like console.
1153 */
1154void *cio_dma_zalloc(size_t size)
1155{
1156	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1157}
1158
1159void cio_dma_free(void *cpu_addr, size_t size)
1160{
1161	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1162}
1163
1164/*
1165 * Now that the driver core is running, we can setup our channel subsystem.
1166 * The struct subchannel's are created during probing.
1167 */
1168static int __init css_bus_init(void)
1169{
1170	int ret, i;
1171
1172	ret = chsc_init();
1173	if (ret)
1174		return ret;
1175
1176	chsc_determine_css_characteristics();
1177	/* Try to enable MSS. */
1178	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1179	if (ret)
1180		max_ssid = 0;
1181	else /* Success. */
1182		max_ssid = __MAX_SSID;
1183
1184	ret = slow_subchannel_init();
1185	if (ret)
1186		goto out;
1187
1188	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1189	if (ret)
1190		goto out;
1191
1192	if ((ret = bus_register(&css_bus_type)))
1193		goto out;
1194
1195	/* Setup css structure. */
1196	for (i = 0; i <= MAX_CSS_IDX; i++) {
1197		ret = setup_css(i);
1198		if (ret)
1199			goto out_unregister;
1200	}
1201	ret = register_reboot_notifier(&css_reboot_notifier);
1202	if (ret)
1203		goto out_unregister;
 
 
 
1204	ret = cio_dma_pool_init();
1205	if (ret)
1206		goto out_unregister_rn;
1207	airq_init();
1208	css_init_done = 1;
1209
1210	/* Enable default isc for I/O subchannels. */
1211	isc_register(IO_SCH_ISC);
1212
1213	return 0;
 
 
1214out_unregister_rn:
1215	unregister_reboot_notifier(&css_reboot_notifier);
1216out_unregister:
1217	while (i-- > 0) {
1218		struct channel_subsystem *css = channel_subsystems[i];
1219		device_unregister(&css->pseudo_subchannel->dev);
1220		device_unregister(&css->device);
1221	}
1222	bus_unregister(&css_bus_type);
1223out:
1224	crw_unregister_handler(CRW_RSC_SCH);
1225	idset_free(slow_subchannel_set);
1226	chsc_init_cleanup();
1227	pr_alert("The CSS device driver initialization failed with "
1228		 "errno=%d\n", ret);
1229	return ret;
1230}
1231
1232static void __init css_bus_cleanup(void)
1233{
1234	struct channel_subsystem *css;
1235
1236	for_each_css(css) {
1237		device_unregister(&css->pseudo_subchannel->dev);
1238		device_unregister(&css->device);
1239	}
1240	bus_unregister(&css_bus_type);
1241	crw_unregister_handler(CRW_RSC_SCH);
1242	idset_free(slow_subchannel_set);
1243	chsc_init_cleanup();
1244	isc_unregister(IO_SCH_ISC);
1245}
1246
1247static int __init channel_subsystem_init(void)
1248{
1249	int ret;
1250
1251	ret = css_bus_init();
1252	if (ret)
1253		return ret;
1254	cio_work_q = create_singlethread_workqueue("cio");
1255	if (!cio_work_q) {
1256		ret = -ENOMEM;
1257		goto out_bus;
1258	}
1259	ret = io_subchannel_init();
1260	if (ret)
1261		goto out_wq;
1262
1263	/* Register subchannels which are already in use. */
1264	cio_register_early_subchannels();
1265	/* Start initial subchannel evaluation. */
1266	css_schedule_eval_all();
1267
1268	return ret;
1269out_wq:
1270	destroy_workqueue(cio_work_q);
1271out_bus:
1272	css_bus_cleanup();
1273	return ret;
1274}
1275subsys_initcall(channel_subsystem_init);
1276
1277static int css_settle(struct device_driver *drv, void *unused)
1278{
1279	struct css_driver *cssdrv = to_cssdriver(drv);
1280
1281	if (cssdrv->settle)
1282		return cssdrv->settle();
1283	return 0;
1284}
1285
1286int css_complete_work(void)
1287{
1288	int ret;
1289
1290	/* Wait for the evaluation of subchannels to finish. */
1291	ret = wait_event_interruptible(css_eval_wq,
1292				       atomic_read(&css_eval_scheduled) == 0);
1293	if (ret)
1294		return -EINTR;
1295	flush_workqueue(cio_work_q);
1296	/* Wait for the subchannel type specific initialization to finish */
1297	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1298}
1299
1300
1301/*
1302 * Wait for the initialization of devices to finish, to make sure we are
1303 * done with our setup if the search for the root device starts.
1304 */
1305static int __init channel_subsystem_init_sync(void)
1306{
1307	css_complete_work();
1308	return 0;
1309}
1310subsys_initcall_sync(channel_subsystem_init_sync);
1311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312#ifdef CONFIG_PROC_FS
1313static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1314				size_t count, loff_t *ppos)
1315{
1316	int ret;
1317
1318	/* Handle pending CRW's. */
1319	crw_wait_for_channel_report();
1320	ret = css_complete_work();
1321
1322	return ret ? ret : count;
1323}
1324
1325static const struct proc_ops cio_settle_proc_ops = {
1326	.proc_open	= nonseekable_open,
1327	.proc_write	= cio_settle_write,
1328	.proc_lseek	= no_llseek,
1329};
1330
1331static int __init cio_settle_init(void)
1332{
1333	struct proc_dir_entry *entry;
1334
1335	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1336	if (!entry)
1337		return -ENOMEM;
1338	return 0;
1339}
1340device_initcall(cio_settle_init);
1341#endif /*CONFIG_PROC_FS*/
1342
1343int sch_is_pseudo_sch(struct subchannel *sch)
1344{
1345	if (!sch->dev.parent)
1346		return 0;
1347	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1348}
1349
1350static int css_bus_match(struct device *dev, struct device_driver *drv)
1351{
1352	struct subchannel *sch = to_subchannel(dev);
1353	struct css_driver *driver = to_cssdriver(drv);
1354	struct css_device_id *id;
1355
1356	/* When driver_override is set, only bind to the matching driver */
1357	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1358		return 0;
1359
1360	for (id = driver->subchannel_type; id->match_flags; id++) {
1361		if (sch->st == id->type)
1362			return 1;
1363	}
1364
1365	return 0;
1366}
1367
1368static int css_probe(struct device *dev)
1369{
1370	struct subchannel *sch;
1371	int ret;
1372
1373	sch = to_subchannel(dev);
1374	sch->driver = to_cssdriver(dev->driver);
1375	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1376	if (ret)
1377		sch->driver = NULL;
1378	return ret;
1379}
1380
1381static void css_remove(struct device *dev)
1382{
1383	struct subchannel *sch;
 
1384
1385	sch = to_subchannel(dev);
1386	if (sch->driver->remove)
1387		sch->driver->remove(sch);
1388	sch->driver = NULL;
 
1389}
1390
1391static void css_shutdown(struct device *dev)
1392{
1393	struct subchannel *sch;
1394
1395	sch = to_subchannel(dev);
1396	if (sch->driver && sch->driver->shutdown)
1397		sch->driver->shutdown(sch);
1398}
1399
1400static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
1401{
1402	const struct subchannel *sch = to_subchannel(dev);
1403	int ret;
1404
1405	ret = add_uevent_var(env, "ST=%01X", sch->st);
1406	if (ret)
1407		return ret;
1408	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1409	return ret;
1410}
1411
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412static struct bus_type css_bus_type = {
1413	.name     = "css",
1414	.match    = css_bus_match,
1415	.probe    = css_probe,
1416	.remove   = css_remove,
1417	.shutdown = css_shutdown,
1418	.uevent   = css_uevent,
 
1419};
1420
1421/**
1422 * css_driver_register - register a css driver
1423 * @cdrv: css driver to register
1424 *
1425 * This is mainly a wrapper around driver_register that sets name
1426 * and bus_type in the embedded struct device_driver correctly.
1427 */
1428int css_driver_register(struct css_driver *cdrv)
1429{
1430	cdrv->drv.bus = &css_bus_type;
1431	return driver_register(&cdrv->drv);
1432}
1433EXPORT_SYMBOL_GPL(css_driver_register);
1434
1435/**
1436 * css_driver_unregister - unregister a css driver
1437 * @cdrv: css driver to unregister
1438 *
1439 * This is a wrapper around driver_unregister.
1440 */
1441void css_driver_unregister(struct css_driver *cdrv)
1442{
1443	driver_unregister(&cdrv->drv);
1444}
1445EXPORT_SYMBOL_GPL(css_driver_unregister);