Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * linux/drivers/s390/crypto/ap_bus.c
   3 *
   4 * Copyright (C) 2006 IBM Corporation
   5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
   6 *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
   7 *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
   8 *	      Felix Beck <felix.beck@de.ibm.com>
   9 *	      Holger Dengler <hd@linux.vnet.ibm.com>
 
  10 *
  11 * Adjunct processor bus.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2, or (at your option)
  16 * any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21 * GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with this program; if not, write to the Free Software
  25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  26 */
  27
  28#define KMSG_COMPONENT "ap"
  29#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  30
  31#include <linux/kernel_stat.h>
  32#include <linux/module.h>
  33#include <linux/init.h>
  34#include <linux/delay.h>
  35#include <linux/err.h>
 
  36#include <linux/interrupt.h>
  37#include <linux/workqueue.h>
  38#include <linux/slab.h>
  39#include <linux/notifier.h>
  40#include <linux/kthread.h>
  41#include <linux/mutex.h>
  42#include <asm/reset.h>
  43#include <asm/airq.h>
  44#include <linux/atomic.h>
  45#include <asm/system.h>
  46#include <asm/isc.h>
  47#include <linux/hrtimer.h>
  48#include <linux/ktime.h>
 
 
 
 
 
  49
  50#include "ap_bus.h"
  51
  52/* Some prototypes. */
  53static void ap_scan_bus(struct work_struct *);
  54static void ap_poll_all(unsigned long);
  55static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
  56static int ap_poll_thread_start(void);
  57static void ap_poll_thread_stop(void);
  58static void ap_request_timeout(unsigned long);
  59static inline void ap_schedule_poll_timer(void);
  60static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
  61static int ap_device_remove(struct device *dev);
  62static int ap_device_probe(struct device *dev);
  63static void ap_interrupt_handler(void *unused1, void *unused2);
  64static void ap_reset(struct ap_device *ap_dev);
  65static void ap_config_timeout(unsigned long ptr);
  66static int ap_select_domain(void);
  67
  68/*
  69 * Module description.
  70 */
  71MODULE_AUTHOR("IBM Corporation");
  72MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
  73		   "Copyright 2006 IBM Corporation");
  74MODULE_LICENSE("GPL");
  75
  76/*
  77 * Module parameter
  78 */
  79int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
  80module_param_named(domain, ap_domain_index, int, 0000);
 
  81MODULE_PARM_DESC(domain, "domain index for ap devices");
  82EXPORT_SYMBOL(ap_domain_index);
  83
  84static int ap_thread_flag = 0;
  85module_param_named(poll_thread, ap_thread_flag, int, 0000);
  86MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
  87
  88static struct device *ap_root_device = NULL;
  89static DEFINE_SPINLOCK(ap_device_list_lock);
  90static LIST_HEAD(ap_device_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  91
  92/*
  93 * Workqueue & timer for bus rescan.
  94 */
  95static struct workqueue_struct *ap_work_queue;
  96static struct timer_list ap_config_timer;
  97static int ap_config_time = AP_CONFIG_TIME;
  98static DECLARE_WORK(ap_config_work, ap_scan_bus);
 
  99
 100/*
 101 * Tasklet & timer for AP request polling and interrupts
 102 */
 103static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
 104static atomic_t ap_poll_requests = ATOMIC_INIT(0);
 105static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
 106static struct task_struct *ap_poll_kthread = NULL;
 107static DEFINE_MUTEX(ap_poll_thread_mutex);
 108static DEFINE_SPINLOCK(ap_poll_timer_lock);
 109static void *ap_interrupt_indicator;
 110static struct hrtimer ap_poll_timer;
 111/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
 112 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
 
 
 113static unsigned long long poll_timeout = 250000;
 114
 115/* Suspend flag */
 116static int ap_suspend_flag;
 117/* Flag to check if domain was set through module parameter domain=. This is
 118 * important when supsend and resume is done in a z/VM environment where the
 119 * domain might change. */
 120static int user_set_domain = 0;
 121static struct bus_type ap_bus_type;
 122
 123/**
 124 * ap_using_interrupts() - Returns non-zero if interrupt support is
 125 * available.
 126 */
 127static inline int ap_using_interrupts(void)
 128{
 129	return ap_interrupt_indicator != NULL;
 130}
 
 131
 132/**
 133 * ap_intructions_available() - Test if AP instructions are available.
 134 *
 135 * Returns 0 if the AP instructions are installed.
 
 
 136 */
 137static inline int ap_instructions_available(void)
 138{
 139	register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
 140	register unsigned long reg1 asm ("1") = -ENODEV;
 141	register unsigned long reg2 asm ("2") = 0UL;
 142
 143	asm volatile(
 144		"   .long 0xb2af0000\n"		/* PQAP(TAPQ) */
 145		"0: la    %1,0\n"
 146		"1:\n"
 147		EX_TABLE(0b, 1b)
 148		: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
 149	return reg1;
 150}
 151
 152/**
 153 * ap_interrupts_available(): Test if AP interrupts are available.
 154 *
 155 * Returns 1 if AP interrupts are available.
 156 */
 157static int ap_interrupts_available(void)
 158{
 159	return test_facility(2) && test_facility(65);
 160}
 161
 162/**
 163 * ap_test_queue(): Test adjunct processor queue.
 164 * @qid: The AP queue number
 165 * @queue_depth: Pointer to queue depth value
 166 * @device_type: Pointer to device type value
 167 *
 168 * Returns AP queue status structure.
 169 */
 170static inline struct ap_queue_status
 171ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
 172{
 173	register unsigned long reg0 asm ("0") = qid;
 174	register struct ap_queue_status reg1 asm ("1");
 175	register unsigned long reg2 asm ("2") = 0UL;
 176
 177	asm volatile(".long 0xb2af0000"		/* PQAP(TAPQ) */
 178		     : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
 179	*device_type = (int) (reg2 >> 24);
 180	*queue_depth = (int) (reg2 & 0xff);
 181	return reg1;
 182}
 183
 184/**
 185 * ap_reset_queue(): Reset adjunct processor queue.
 186 * @qid: The AP queue number
 187 *
 188 * Returns AP queue status structure.
 189 */
 190static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
 191{
 192	register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
 193	register struct ap_queue_status reg1 asm ("1");
 194	register unsigned long reg2 asm ("2") = 0UL;
 195
 196	asm volatile(
 197		".long 0xb2af0000"		/* PQAP(RAPQ) */
 198		: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
 199	return reg1;
 200}
 201
 202#ifdef CONFIG_64BIT
 203/**
 204 * ap_queue_interruption_control(): Enable interruption for a specific AP.
 205 * @qid: The AP queue number
 206 * @ind: The notification indicator byte
 207 *
 208 * Returns AP queue status.
 209 */
 210static inline struct ap_queue_status
 211ap_queue_interruption_control(ap_qid_t qid, void *ind)
 212{
 213	register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
 214	register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
 215	register struct ap_queue_status reg1_out asm ("1");
 216	register void *reg2 asm ("2") = ind;
 217	asm volatile(
 218		".long 0xb2af0000"		/* PQAP(RAPQ) */
 219		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
 220		:
 221		: "cc" );
 222	return reg1_out;
 223}
 224#endif
 225
 226#ifdef CONFIG_64BIT
 227static inline struct ap_queue_status
 228__ap_query_functions(ap_qid_t qid, unsigned int *functions)
 229{
 230	register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
 231	register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
 232	register unsigned long reg2 asm ("2");
 233
 234	asm volatile(
 235		".long 0xb2af0000\n"
 236		"0:\n"
 237		EX_TABLE(0b, 0b)
 238		: "+d" (reg0), "+d" (reg1), "=d" (reg2)
 239		:
 240		: "cc");
 241
 242	*functions = (unsigned int)(reg2 >> 32);
 243	return reg1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 244}
 245#endif
 246
 247/**
 248 * ap_query_functions(): Query supported functions.
 249 * @qid: The AP queue number
 250 * @functions: Pointer to functions field.
 251 *
 252 * Returns
 253 *   0	     on success.
 254 *   -ENODEV  if queue not valid.
 255 *   -EBUSY   if device busy.
 256 *   -EINVAL  if query function is not supported
 257 */
 258static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
 259{
 260#ifdef CONFIG_64BIT
 261	struct ap_queue_status status;
 262	int i;
 263	status = __ap_query_functions(qid, functions);
 264
 265	for (i = 0; i < AP_MAX_RESET; i++) {
 266		if (ap_queue_status_invalid_test(&status))
 267			return -ENODEV;
 268
 269		switch (status.response_code) {
 270		case AP_RESPONSE_NORMAL:
 271			return 0;
 272		case AP_RESPONSE_RESET_IN_PROGRESS:
 273		case AP_RESPONSE_BUSY:
 274			break;
 275		case AP_RESPONSE_Q_NOT_AVAIL:
 276		case AP_RESPONSE_DECONFIGURED:
 277		case AP_RESPONSE_CHECKSTOPPED:
 278		case AP_RESPONSE_INVALID_ADDRESS:
 279			return -ENODEV;
 280		case AP_RESPONSE_OTHERWISE_CHANGED:
 281			break;
 282		default:
 283			break;
 284		}
 285		if (i < AP_MAX_RESET - 1) {
 286			udelay(5);
 287			status = __ap_query_functions(qid, functions);
 288		}
 289	}
 290	return -EBUSY;
 291#else
 292	return -EINVAL;
 293#endif
 294}
 295
 296/**
 297 * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
 298 * support.
 299 * @qid: The AP queue number
 300 *
 301 * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
 302 */
 303int ap_4096_commands_available(ap_qid_t qid)
 304{
 305	unsigned int functions;
 306
 307	if (ap_query_functions(qid, &functions))
 308		return 0;
 309
 310	return test_ap_facility(functions, 1) &&
 311	       test_ap_facility(functions, 2);
 312}
 313EXPORT_SYMBOL(ap_4096_commands_available);
 314
 315/**
 316 * ap_queue_enable_interruption(): Enable interruption on an AP.
 317 * @qid: The AP queue number
 318 * @ind: the notification indicator byte
 319 *
 320 * Enables interruption on AP queue via ap_queue_interruption_control(). Based
 321 * on the return value it waits a while and tests the AP queue if interrupts
 322 * have been switched on using ap_test_queue().
 323 */
 324static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
 325{
 326#ifdef CONFIG_64BIT
 327	struct ap_queue_status status;
 328	int t_depth, t_device_type, rc, i;
 329
 330	rc = -EBUSY;
 331	status = ap_queue_interruption_control(qid, ind);
 332
 333	for (i = 0; i < AP_MAX_RESET; i++) {
 334		switch (status.response_code) {
 335		case AP_RESPONSE_NORMAL:
 336			if (status.int_enabled)
 337				return 0;
 338			break;
 339		case AP_RESPONSE_RESET_IN_PROGRESS:
 340		case AP_RESPONSE_BUSY:
 341			break;
 342		case AP_RESPONSE_Q_NOT_AVAIL:
 343		case AP_RESPONSE_DECONFIGURED:
 344		case AP_RESPONSE_CHECKSTOPPED:
 345		case AP_RESPONSE_INVALID_ADDRESS:
 346			return -ENODEV;
 347		case AP_RESPONSE_OTHERWISE_CHANGED:
 348			if (status.int_enabled)
 349				return 0;
 350			break;
 351		default:
 352			break;
 353		}
 354		if (i < AP_MAX_RESET - 1) {
 355			udelay(5);
 356			status = ap_test_queue(qid, &t_depth, &t_device_type);
 357		}
 358	}
 359	return rc;
 360#else
 361	return -EINVAL;
 362#endif
 363}
 364
 365/**
 366 * __ap_send(): Send message to adjunct processor queue.
 367 * @qid: The AP queue number
 368 * @psmid: The program supplied message identifier
 369 * @msg: The message text
 370 * @length: The message length
 371 * @special: Special Bit
 372 *
 373 * Returns AP queue status structure.
 374 * Condition code 1 on NQAP can't happen because the L bit is 1.
 375 * Condition code 2 on NQAP also means the send is incomplete,
 376 * because a segment boundary was reached. The NQAP is repeated.
 377 */
 378static inline struct ap_queue_status
 379__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
 380	  unsigned int special)
 381{
 382	typedef struct { char _[length]; } msgblock;
 383	register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
 384	register struct ap_queue_status reg1 asm ("1");
 385	register unsigned long reg2 asm ("2") = (unsigned long) msg;
 386	register unsigned long reg3 asm ("3") = (unsigned long) length;
 387	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
 388	register unsigned long reg5 asm ("5") = (unsigned int) psmid;
 389
 390	if (special == 1)
 391		reg0 |= 0x400000UL;
 392
 393	asm volatile (
 394		"0: .long 0xb2ad0042\n"		/* DQAP */
 395		"   brc   2,0b"
 396		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
 397		: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
 398		: "cc" );
 399	return reg1;
 400}
 401
 402int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
 403{
 404	struct ap_queue_status status;
 405
 406	status = __ap_send(qid, psmid, msg, length, 0);
 407	switch (status.response_code) {
 408	case AP_RESPONSE_NORMAL:
 409		return 0;
 410	case AP_RESPONSE_Q_FULL:
 411	case AP_RESPONSE_RESET_IN_PROGRESS:
 412		return -EBUSY;
 413	case AP_RESPONSE_REQ_FAC_NOT_INST:
 414		return -EINVAL;
 415	default:	/* Device is gone. */
 416		return -ENODEV;
 417	}
 418}
 419EXPORT_SYMBOL(ap_send);
 420
 421/**
 422 * __ap_recv(): Receive message from adjunct processor queue.
 423 * @qid: The AP queue number
 424 * @psmid: Pointer to program supplied message identifier
 425 * @msg: The message text
 426 * @length: The message length
 427 *
 428 * Returns AP queue status structure.
 429 * Condition code 1 on DQAP means the receive has taken place
 430 * but only partially.	The response is incomplete, hence the
 431 * DQAP is repeated.
 432 * Condition code 2 on DQAP also means the receive is incomplete,
 433 * this time because a segment boundary was reached. Again, the
 434 * DQAP is repeated.
 435 * Note that gpr2 is used by the DQAP instruction to keep track of
 436 * any 'residual' length, in case the instruction gets interrupted.
 437 * Hence it gets zeroed before the instruction.
 438 */
 439static inline struct ap_queue_status
 440__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
 441{
 442	typedef struct { char _[length]; } msgblock;
 443	register unsigned long reg0 asm("0") = qid | 0x80000000UL;
 444	register struct ap_queue_status reg1 asm ("1");
 445	register unsigned long reg2 asm("2") = 0UL;
 446	register unsigned long reg4 asm("4") = (unsigned long) msg;
 447	register unsigned long reg5 asm("5") = (unsigned long) length;
 448	register unsigned long reg6 asm("6") = 0UL;
 449	register unsigned long reg7 asm("7") = 0UL;
 450
 451
 452	asm volatile(
 453		"0: .long 0xb2ae0064\n"
 454		"   brc   6,0b\n"
 455		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
 456		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
 457		"=m" (*(msgblock *) msg) : : "cc" );
 458	*psmid = (((unsigned long long) reg6) << 32) + reg7;
 459	return reg1;
 460}
 
 461
 462int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
 
 
 
 
 
 
 463{
 464	struct ap_queue_status status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465
 466	status = __ap_recv(qid, psmid, msg, length);
 
 467	switch (status.response_code) {
 468	case AP_RESPONSE_NORMAL:
 469		return 0;
 470	case AP_RESPONSE_NO_PENDING_REPLY:
 471		if (status.queue_empty)
 472			return -ENOENT;
 473		return -EBUSY;
 474	case AP_RESPONSE_RESET_IN_PROGRESS:
 475		return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476	default:
 477		return -ENODEV;
 
 
 
 478	}
 479}
 480EXPORT_SYMBOL(ap_recv);
 481
 482/**
 483 * ap_query_queue(): Check if an AP queue is available.
 484 * @qid: The AP queue number
 485 * @queue_depth: Pointer to queue depth value
 486 * @device_type: Pointer to device type value
 487 *
 488 * The test is repeated for AP_MAX_RESET times.
 489 */
 490static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
 491{
 492	struct ap_queue_status status;
 493	int t_depth, t_device_type, rc, i;
 494
 495	rc = -EBUSY;
 496	for (i = 0; i < AP_MAX_RESET; i++) {
 497		status = ap_test_queue(qid, &t_depth, &t_device_type);
 498		switch (status.response_code) {
 499		case AP_RESPONSE_NORMAL:
 500			*queue_depth = t_depth + 1;
 501			*device_type = t_device_type;
 502			rc = 0;
 503			break;
 504		case AP_RESPONSE_Q_NOT_AVAIL:
 505			rc = -ENODEV;
 506			break;
 507		case AP_RESPONSE_RESET_IN_PROGRESS:
 508			break;
 509		case AP_RESPONSE_DECONFIGURED:
 510			rc = -ENODEV;
 511			break;
 512		case AP_RESPONSE_CHECKSTOPPED:
 513			rc = -ENODEV;
 514			break;
 515		case AP_RESPONSE_INVALID_ADDRESS:
 516			rc = -ENODEV;
 517			break;
 518		case AP_RESPONSE_OTHERWISE_CHANGED:
 519			break;
 520		case AP_RESPONSE_BUSY:
 521			break;
 522		default:
 523			BUG();
 524		}
 525		if (rc != -EBUSY)
 526			break;
 527		if (i < AP_MAX_RESET - 1)
 528			udelay(5);
 
 
 
 
 
 
 
 
 
 529	}
 530	return rc;
 531}
 532
 533/**
 534 * ap_init_queue(): Reset an AP queue.
 535 * @qid: The AP queue number
 536 *
 537 * Reset an AP queue and wait for it to become available again.
 538 */
 539static int ap_init_queue(ap_qid_t qid)
 540{
 541	struct ap_queue_status status;
 542	int rc, dummy, i;
 543
 544	rc = -ENODEV;
 545	status = ap_reset_queue(qid);
 546	for (i = 0; i < AP_MAX_RESET; i++) {
 547		switch (status.response_code) {
 548		case AP_RESPONSE_NORMAL:
 549			if (status.queue_empty)
 550				rc = 0;
 551			break;
 552		case AP_RESPONSE_Q_NOT_AVAIL:
 553		case AP_RESPONSE_DECONFIGURED:
 554		case AP_RESPONSE_CHECKSTOPPED:
 555			i = AP_MAX_RESET;	/* return with -ENODEV */
 556			break;
 557		case AP_RESPONSE_RESET_IN_PROGRESS:
 558			rc = -EBUSY;
 559		case AP_RESPONSE_BUSY:
 560		default:
 561			break;
 562		}
 563		if (rc != -ENODEV && rc != -EBUSY)
 564			break;
 565		if (i < AP_MAX_RESET - 1) {
 566			udelay(5);
 567			status = ap_test_queue(qid, &dummy, &dummy);
 568		}
 569	}
 570	if (rc == 0 && ap_using_interrupts()) {
 571		rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
 572		/* If interruption mode is supported by the machine,
 573		* but an AP can not be enabled for interruption then
 574		* the AP will be discarded.    */
 575		if (rc)
 576			pr_err("Registering adapter interrupts for "
 577			       "AP %d failed\n", AP_QID_DEVICE(qid));
 578	}
 579	return rc;
 580}
 581
 582/**
 583 * ap_increase_queue_count(): Arm request timeout.
 584 * @ap_dev: Pointer to an AP device.
 585 *
 586 * Arm request timeout if an AP device was idle and a new request is submitted.
 587 */
 588static void ap_increase_queue_count(struct ap_device *ap_dev)
 589{
 590	int timeout = ap_dev->drv->request_timeout;
 
 
 591
 592	ap_dev->queue_count++;
 593	if (ap_dev->queue_count == 1) {
 594		mod_timer(&ap_dev->timeout, jiffies + timeout);
 595		ap_dev->reset = AP_RESET_ARMED;
 596	}
 
 
 
 597}
 598
 599/**
 600 * ap_decrease_queue_count(): Decrease queue count.
 601 * @ap_dev: Pointer to an AP device.
 602 *
 603 * If AP device is still alive, re-schedule request timeout if there are still
 604 * pending requests.
 605 */
 606static void ap_decrease_queue_count(struct ap_device *ap_dev)
 607{
 608	int timeout = ap_dev->drv->request_timeout;
 
 
 609
 610	ap_dev->queue_count--;
 611	if (ap_dev->queue_count > 0)
 612		mod_timer(&ap_dev->timeout, jiffies + timeout);
 613	else
 614		/*
 615		 * The timeout timer should to be disabled now - since
 616		 * del_timer_sync() is very expensive, we just tell via the
 617		 * reset flag to ignore the pending timeout timer.
 618		 */
 619		ap_dev->reset = AP_RESET_IGNORE;
 
 
 
 
 
 
 620}
 621
 622/*
 623 * AP device related attributes.
 624 */
 625static ssize_t ap_hwtype_show(struct device *dev,
 626			      struct device_attribute *attr, char *buf)
 627{
 628	struct ap_device *ap_dev = to_ap_dev(dev);
 629	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
 
 
 
 
 
 
 
 
 
 
 630}
 631
 632static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
 633static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
 634			     char *buf)
 
 
 
 
 
 
 
 
 635{
 636	struct ap_device *ap_dev = to_ap_dev(dev);
 637	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 638}
 639
 640static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
 641static ssize_t ap_request_count_show(struct device *dev,
 642				     struct device_attribute *attr,
 643				     char *buf)
 644{
 645	struct ap_device *ap_dev = to_ap_dev(dev);
 646	int rc;
 647
 648	spin_lock_bh(&ap_dev->lock);
 649	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
 650	spin_unlock_bh(&ap_dev->lock);
 
 
 
 
 
 651	return rc;
 652}
 653
 654static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
 655
 656static ssize_t ap_modalias_show(struct device *dev,
 657				struct device_attribute *attr, char *buf)
 658{
 659	return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
 
 
 
 
 
 660}
 661
 662static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
 663
 664static struct attribute *ap_dev_attrs[] = {
 665	&dev_attr_hwtype.attr,
 666	&dev_attr_depth.attr,
 667	&dev_attr_request_count.attr,
 668	&dev_attr_modalias.attr,
 669	NULL
 670};
 671static struct attribute_group ap_dev_attr_group = {
 672	.attrs = ap_dev_attrs
 673};
 674
 675/**
 676 * ap_bus_match()
 677 * @dev: Pointer to device
 678 * @drv: Pointer to device_driver
 679 *
 680 * AP bus driver registration/unregistration.
 681 */
 682static int ap_bus_match(struct device *dev, struct device_driver *drv)
 683{
 684	struct ap_device *ap_dev = to_ap_dev(dev);
 685	struct ap_driver *ap_drv = to_ap_drv(drv);
 686	struct ap_device_id *id;
 687
 688	/*
 689	 * Compare device type of the device with the list of
 690	 * supported types of the device_driver.
 691	 */
 692	for (id = ap_drv->ids; id->match_flags; id++) {
 693		if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
 694		    (id->dev_type != ap_dev->device_type))
 695			continue;
 696		return 1;
 
 
 
 
 697	}
 698	return 0;
 699}
 700
 701/**
 702 * ap_uevent(): Uevent function for AP devices.
 703 * @dev: Pointer to device
 704 * @env: Pointer to kobj_uevent_env
 705 *
 706 * It sets up a single environment variable DEV_TYPE which contains the
 707 * hardware device type.
 708 */
 709static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
 710{
 
 711	struct ap_device *ap_dev = to_ap_dev(dev);
 712	int retval = 0;
 713
 714	if (!ap_dev)
 715		return -ENODEV;
 
 716
 717	/* Set up DEV_TYPE environment variable. */
 718	retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
 719	if (retval)
 720		return retval;
 721
 722	/* Add MODALIAS= */
 723	retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724
 725	return retval;
 
 
 
 
 
 
 
 
 
 
 
 726}
 727
 728static int ap_bus_suspend(struct device *dev, pm_message_t state)
 729{
 730	struct ap_device *ap_dev = to_ap_dev(dev);
 731	unsigned long flags;
 732
 733	if (!ap_suspend_flag) {
 734		ap_suspend_flag = 1;
 735
 736		/* Disable scanning for devices, thus we do not want to scan
 737		 * for them after removing.
 738		 */
 739		del_timer_sync(&ap_config_timer);
 740		if (ap_work_queue != NULL) {
 741			destroy_workqueue(ap_work_queue);
 742			ap_work_queue = NULL;
 743		}
 744
 745		tasklet_disable(&ap_tasklet);
 746	}
 747	/* Poll on the device until all requests are finished. */
 748	do {
 749		flags = 0;
 750		spin_lock_bh(&ap_dev->lock);
 751		__ap_poll_device(ap_dev, &flags);
 752		spin_unlock_bh(&ap_dev->lock);
 753	} while ((flags & 1) || (flags & 2));
 754
 755	spin_lock_bh(&ap_dev->lock);
 756	ap_dev->unregistered = 1;
 757	spin_unlock_bh(&ap_dev->lock);
 758
 759	return 0;
 
 
 760}
 761
 762static int ap_bus_resume(struct device *dev)
 763{
 764	int rc = 0;
 765	struct ap_device *ap_dev = to_ap_dev(dev);
 766
 767	if (ap_suspend_flag) {
 768		ap_suspend_flag = 0;
 769		if (!ap_interrupts_available())
 770			ap_interrupt_indicator = NULL;
 771		if (!user_set_domain) {
 772			ap_domain_index = -1;
 773			ap_select_domain();
 774		}
 775		init_timer(&ap_config_timer);
 776		ap_config_timer.function = ap_config_timeout;
 777		ap_config_timer.data = 0;
 778		ap_config_timer.expires = jiffies + ap_config_time * HZ;
 779		add_timer(&ap_config_timer);
 780		ap_work_queue = create_singlethread_workqueue("kapwork");
 781		if (!ap_work_queue)
 782			return -ENOMEM;
 783		tasklet_enable(&ap_tasklet);
 784		if (!ap_using_interrupts())
 785			ap_schedule_poll_timer();
 786		else
 787			tasklet_schedule(&ap_tasklet);
 788		if (ap_thread_flag)
 789			rc = ap_poll_thread_start();
 790	}
 791	if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
 792		spin_lock_bh(&ap_dev->lock);
 793		ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
 794				       ap_domain_index);
 795		spin_unlock_bh(&ap_dev->lock);
 796	}
 797	queue_work(ap_work_queue, &ap_config_work);
 798
 799	return rc;
 800}
 
 801
 802static struct bus_type ap_bus_type = {
 803	.name = "ap",
 804	.match = &ap_bus_match,
 805	.uevent = &ap_uevent,
 806	.suspend = ap_bus_suspend,
 807	.resume = ap_bus_resume
 
 
 
 
 
 
 
 
 
 
 
 
 808};
 809
 810static int ap_device_probe(struct device *dev)
 811{
 812	struct ap_device *ap_dev = to_ap_dev(dev);
 813	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
 814	int rc;
 815
 816	ap_dev->drv = ap_drv;
 817	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
 818	if (!rc) {
 819		spin_lock_bh(&ap_device_list_lock);
 820		list_add(&ap_dev->list, &ap_device_list);
 821		spin_unlock_bh(&ap_device_list_lock);
 822	}
 823	return rc;
 
 824}
 825
 826/**
 827 * __ap_flush_queue(): Flush requests.
 828 * @ap_dev: Pointer to the AP device
 829 *
 830 * Flush all requests from the request/pending queue of an AP device.
 
 
 
 
 
 
 
 
 
 831 */
 832static void __ap_flush_queue(struct ap_device *ap_dev)
 833{
 834	struct ap_message *ap_msg, *next;
 835
 836	list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
 837		list_del_init(&ap_msg->list);
 838		ap_dev->pendingq_count--;
 839		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
 
 
 
 
 
 840	}
 841	list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
 842		list_del_init(&ap_msg->list);
 843		ap_dev->requestq_count--;
 844		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845	}
 
 
 846}
 847
 848void ap_flush_queue(struct ap_device *ap_dev)
 849{
 850	spin_lock_bh(&ap_dev->lock);
 851	__ap_flush_queue(ap_dev);
 852	spin_unlock_bh(&ap_dev->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853}
 854EXPORT_SYMBOL(ap_flush_queue);
 855
 856static int ap_device_remove(struct device *dev)
 857{
 858	struct ap_device *ap_dev = to_ap_dev(dev);
 859	struct ap_driver *ap_drv = ap_dev->drv;
 860
 861	ap_flush_queue(ap_dev);
 862	del_timer_sync(&ap_dev->timeout);
 863	spin_lock_bh(&ap_device_list_lock);
 864	list_del_init(&ap_dev->list);
 865	spin_unlock_bh(&ap_device_list_lock);
 866	if (ap_drv->remove)
 867		ap_drv->remove(ap_dev);
 868	spin_lock_bh(&ap_dev->lock);
 869	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
 870	spin_unlock_bh(&ap_dev->lock);
 
 
 
 
 
 
 
 
 
 
 
 871	return 0;
 872}
 873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 874int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
 875		       char *name)
 876{
 877	struct device_driver *drv = &ap_drv->driver;
 878
 879	drv->bus = &ap_bus_type;
 880	drv->probe = ap_device_probe;
 881	drv->remove = ap_device_remove;
 882	drv->owner = owner;
 883	drv->name = name;
 884	return driver_register(drv);
 885}
 886EXPORT_SYMBOL(ap_driver_register);
 887
 888void ap_driver_unregister(struct ap_driver *ap_drv)
 889{
 890	driver_unregister(&ap_drv->driver);
 891}
 892EXPORT_SYMBOL(ap_driver_unregister);
 893
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894/*
 895 * AP bus attributes.
 896 */
 
 897static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
 898{
 899	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
 900}
 901
 902static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 903
 904static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
 
 
 
 
 
 
 
 905{
 906	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
 
 
 
 
 
 
 
 
 907}
 908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
 910{
 911	return snprintf(buf, PAGE_SIZE, "%d\n",
 912			ap_using_interrupts() ? 1 : 0);
 913}
 914
 915static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
 
 
 
 
 
 916
 917static ssize_t ap_config_time_store(struct bus_type *bus,
 918				    const char *buf, size_t count)
 919{
 920	int time;
 921
 922	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
 923		return -EINVAL;
 924	ap_config_time = time;
 925	if (!timer_pending(&ap_config_timer) ||
 926	    !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
 927		ap_config_timer.expires = jiffies + ap_config_time * HZ;
 928		add_timer(&ap_config_timer);
 929	}
 930	return count;
 931}
 932
 933static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
 934
 935static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
 936{
 937	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
 938}
 939
 940static ssize_t ap_poll_thread_store(struct bus_type *bus,
 941				    const char *buf, size_t count)
 942{
 943	int flag, rc;
 944
 945	if (sscanf(buf, "%d\n", &flag) != 1)
 946		return -EINVAL;
 947	if (flag) {
 948		rc = ap_poll_thread_start();
 949		if (rc)
 950			return rc;
 951	}
 952	else
 953		ap_poll_thread_stop();
 954	return count;
 955}
 956
 957static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
 958
 959static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
 960{
 961	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
 962}
 963
 964static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
 965				  size_t count)
 966{
 967	unsigned long long time;
 968	ktime_t hr_time;
 969
 970	/* 120 seconds = maximum poll interval */
 971	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
 972	    time > 120000000000ULL)
 973		return -EINVAL;
 974	poll_timeout = time;
 975	hr_time = ktime_set(0, poll_timeout);
 
 
 
 
 
 
 976
 977	if (!hrtimer_is_queued(&ap_poll_timer) ||
 978	    !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
 979		hrtimer_set_expires(&ap_poll_timer, hr_time);
 980		hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
 981	}
 982	return count;
 983}
 984
 985static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
 986
 987static struct bus_attribute *const ap_bus_attrs[] = {
 988	&bus_attr_ap_domain,
 989	&bus_attr_config_time,
 990	&bus_attr_poll_thread,
 991	&bus_attr_ap_interrupts,
 992	&bus_attr_poll_timeout,
 993	NULL,
 994};
 995
 996/**
 997 * ap_select_domain(): Select an AP domain.
 998 *
 999 * Pick one of the 16 AP domains.
1000 */
1001static int ap_select_domain(void)
1002{
1003	int queue_depth, device_type, count, max_count, best_domain;
1004	int rc, i, j;
1005
1006	/*
1007	 * We want to use a single domain. Either the one specified with
1008	 * the "domain=" parameter or the domain with the maximum number
1009	 * of devices.
1010	 */
1011	if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
1012		/* Domain has already been selected. */
1013		return 0;
1014	best_domain = -1;
1015	max_count = 0;
1016	for (i = 0; i < AP_DOMAINS; i++) {
1017		count = 0;
1018		for (j = 0; j < AP_DEVICES; j++) {
1019			ap_qid_t qid = AP_MKQID(j, i);
1020			rc = ap_query_queue(qid, &queue_depth, &device_type);
1021			if (rc)
1022				continue;
1023			count++;
1024		}
1025		if (count > max_count) {
1026			max_count = count;
1027			best_domain = i;
1028		}
1029	}
1030	if (best_domain >= 0){
1031		ap_domain_index = best_domain;
1032		return 0;
1033	}
1034	return -ENODEV;
1035}
1036
1037/**
1038 * ap_probe_device_type(): Find the device type of an AP.
1039 * @ap_dev: pointer to the AP device.
1040 *
1041 * Find the device type if query queue returned a device type of 0.
1042 */
1043static int ap_probe_device_type(struct ap_device *ap_dev)
1044{
1045	static unsigned char msg[] = {
1046		0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1047		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1048		0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1049		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1050		0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1051		0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1052		0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1053		0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1054		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1055		0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1056		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1057		0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1058		0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1059		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1060		0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1061		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1062		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1063		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1064		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1065		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1066		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1067		0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1068		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1069		0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1070		0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1071		0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1072		0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1073		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1074		0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1075		0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1076		0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1077		0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1078		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1079		0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1080		0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1081		0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1082		0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1083		0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1084		0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1085		0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1086		0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1087		0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1088		0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1089		0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1090		0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1091	};
1092	struct ap_queue_status status;
1093	unsigned long long psmid;
1094	char *reply;
1095	int rc, i;
1096
1097	reply = (void *) get_zeroed_page(GFP_KERNEL);
1098	if (!reply) {
1099		rc = -ENOMEM;
1100		goto out;
1101	}
1102
1103	status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1104			   msg, sizeof(msg), 0);
1105	if (status.response_code != AP_RESPONSE_NORMAL) {
1106		rc = -ENODEV;
1107		goto out_free;
1108	}
1109
1110	/* Wait for the test message to complete. */
1111	for (i = 0; i < 6; i++) {
1112		mdelay(300);
1113		status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1114		if (status.response_code == AP_RESPONSE_NORMAL &&
1115		    psmid == 0x0102030405060708ULL)
1116			break;
1117	}
1118	if (i < 6) {
1119		/* Got an answer. */
1120		if (reply[0] == 0x00 && reply[1] == 0x86)
1121			ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1122		else
1123			ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1124		rc = 0;
1125	} else
1126		rc = -ENODEV;
1127
1128out_free:
1129	free_page((unsigned long) reply);
1130out:
1131	return rc;
1132}
1133
1134static void ap_interrupt_handler(void *unused1, void *unused2)
 
1135{
1136	kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
1137	tasklet_schedule(&ap_tasklet);
1138}
1139
1140/**
1141 * __ap_scan_bus(): Scan the AP bus.
1142 * @dev: Pointer to device
1143 * @data: Pointer to data
1144 *
1145 * Scan the AP bus for new devices.
1146 */
1147static int __ap_scan_bus(struct device *dev, void *data)
1148{
1149	return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1150}
1151
1152static void ap_device_release(struct device *dev)
 
 
1153{
1154	struct ap_device *ap_dev = to_ap_dev(dev);
 
 
 
 
 
 
 
 
1155
1156	kfree(ap_dev);
1157}
1158
1159static void ap_scan_bus(struct work_struct *unused)
 
1160{
1161	struct ap_device *ap_dev;
1162	struct device *dev;
1163	ap_qid_t qid;
1164	int queue_depth, device_type;
1165	unsigned int device_functions;
1166	int rc, i;
1167
1168	if (ap_select_domain() != 0)
1169		return;
1170	for (i = 0; i < AP_DEVICES; i++) {
1171		qid = AP_MKQID(i, ap_domain_index);
1172		dev = bus_find_device(&ap_bus_type, NULL,
1173				      (void *)(unsigned long)qid,
1174				      __ap_scan_bus);
1175		rc = ap_query_queue(qid, &queue_depth, &device_type);
1176		if (dev) {
1177			if (rc == -EBUSY) {
1178				set_current_state(TASK_UNINTERRUPTIBLE);
1179				schedule_timeout(AP_RESET_TIMEOUT);
1180				rc = ap_query_queue(qid, &queue_depth,
1181						    &device_type);
1182			}
1183			ap_dev = to_ap_dev(dev);
1184			spin_lock_bh(&ap_dev->lock);
1185			if (rc || ap_dev->unregistered) {
1186				spin_unlock_bh(&ap_dev->lock);
1187				if (ap_dev->unregistered)
1188					i--;
1189				device_unregister(dev);
1190				put_device(dev);
1191				continue;
1192			}
1193			spin_unlock_bh(&ap_dev->lock);
1194			put_device(dev);
1195			continue;
1196		}
1197		if (rc)
1198			continue;
1199		rc = ap_init_queue(qid);
1200		if (rc)
1201			continue;
1202		ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1203		if (!ap_dev)
1204			break;
1205		ap_dev->qid = qid;
1206		ap_dev->queue_depth = queue_depth;
1207		ap_dev->unregistered = 1;
1208		spin_lock_init(&ap_dev->lock);
1209		INIT_LIST_HEAD(&ap_dev->pendingq);
1210		INIT_LIST_HEAD(&ap_dev->requestq);
1211		INIT_LIST_HEAD(&ap_dev->list);
1212		setup_timer(&ap_dev->timeout, ap_request_timeout,
1213			    (unsigned long) ap_dev);
1214		switch (device_type) {
1215		case 0:
1216			if (ap_probe_device_type(ap_dev)) {
1217				kfree(ap_dev);
1218				continue;
1219			}
1220			break;
1221		case 10:
1222			if (ap_query_functions(qid, &device_functions)) {
1223				kfree(ap_dev);
1224				continue;
1225			}
1226			if (test_ap_facility(device_functions, 3))
1227				ap_dev->device_type = AP_DEVICE_TYPE_CEX3C;
1228			else if (test_ap_facility(device_functions, 4))
1229				ap_dev->device_type = AP_DEVICE_TYPE_CEX3A;
1230			else {
1231				kfree(ap_dev);
1232				continue;
1233			}
1234			break;
1235		default:
1236			ap_dev->device_type = device_type;
1237		}
1238
1239		ap_dev->device.bus = &ap_bus_type;
1240		ap_dev->device.parent = ap_root_device;
1241		if (dev_set_name(&ap_dev->device, "card%02x",
1242				 AP_QID_DEVICE(ap_dev->qid))) {
1243			kfree(ap_dev);
1244			continue;
1245		}
1246		ap_dev->device.release = ap_device_release;
1247		rc = device_register(&ap_dev->device);
1248		if (rc) {
1249			put_device(&ap_dev->device);
1250			continue;
1251		}
1252		/* Add device attributes. */
1253		rc = sysfs_create_group(&ap_dev->device.kobj,
1254					&ap_dev_attr_group);
1255		if (!rc) {
1256			spin_lock_bh(&ap_dev->lock);
1257			ap_dev->unregistered = 0;
1258			spin_unlock_bh(&ap_dev->lock);
1259		}
1260		else
1261			device_unregister(&ap_dev->device);
1262	}
1263}
1264
1265static void
1266ap_config_timeout(unsigned long ptr)
 
1267{
1268	queue_work(ap_work_queue, &ap_config_work);
1269	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1270	add_timer(&ap_config_timer);
1271}
1272
1273/**
1274 * ap_schedule_poll_timer(): Schedule poll timer.
1275 *
1276 * Set up the timer to run the poll tasklet
1277 */
1278static inline void ap_schedule_poll_timer(void)
1279{
1280	ktime_t hr_time;
 
1281
1282	spin_lock_bh(&ap_poll_timer_lock);
1283	if (ap_using_interrupts() || ap_suspend_flag)
1284		goto out;
1285	if (hrtimer_is_queued(&ap_poll_timer))
1286		goto out;
1287	if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1288		hr_time = ktime_set(0, poll_timeout);
1289		hrtimer_forward_now(&ap_poll_timer, hr_time);
1290		hrtimer_restart(&ap_poll_timer);
1291	}
1292out:
1293	spin_unlock_bh(&ap_poll_timer_lock);
1294}
1295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296/**
1297 * ap_poll_read(): Receive pending reply messages from an AP device.
1298 * @ap_dev: pointer to the AP device
1299 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1300 *	   required, bit 2^1 is set if the poll timer needs to get armed
1301 *
1302 * Returns 0 if the device is still present, -ENODEV if not.
1303 */
1304static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1305{
1306	struct ap_queue_status status;
1307	struct ap_message *ap_msg;
1308
1309	if (ap_dev->queue_count <= 0)
1310		return 0;
1311	status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1312			   ap_dev->reply->message, ap_dev->reply->length);
1313	switch (status.response_code) {
1314	case AP_RESPONSE_NORMAL:
1315		atomic_dec(&ap_poll_requests);
1316		ap_decrease_queue_count(ap_dev);
1317		list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1318			if (ap_msg->psmid != ap_dev->reply->psmid)
 
 
 
 
 
 
 
1319				continue;
1320			list_del_init(&ap_msg->list);
1321			ap_dev->pendingq_count--;
1322			ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
 
 
 
 
1323			break;
1324		}
1325		if (ap_dev->queue_count > 0)
1326			*flags |= 1;
1327		break;
1328	case AP_RESPONSE_NO_PENDING_REPLY:
1329		if (status.queue_empty) {
1330			/* The card shouldn't forget requests but who knows. */
1331			atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1332			ap_dev->queue_count = 0;
1333			list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1334			ap_dev->requestq_count += ap_dev->pendingq_count;
1335			ap_dev->pendingq_count = 0;
1336		} else
1337			*flags |= 2;
1338		break;
1339	default:
1340		return -ENODEV;
1341	}
1342	return 0;
 
 
 
 
 
 
1343}
1344
1345/**
1346 * ap_poll_write(): Send messages from the request queue to an AP device.
1347 * @ap_dev: pointer to the AP device
1348 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1349 *	   required, bit 2^1 is set if the poll timer needs to get armed
1350 *
1351 * Returns 0 if the device is still present, -ENODEV if not.
1352 */
1353static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1354{
1355	struct ap_queue_status status;
1356	struct ap_message *ap_msg;
1357
1358	if (ap_dev->requestq_count <= 0 ||
1359	    ap_dev->queue_count >= ap_dev->queue_depth)
1360		return 0;
1361	/* Start the next request on the queue. */
1362	ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1363	status = __ap_send(ap_dev->qid, ap_msg->psmid,
1364			   ap_msg->message, ap_msg->length, ap_msg->special);
1365	switch (status.response_code) {
1366	case AP_RESPONSE_NORMAL:
1367		atomic_inc(&ap_poll_requests);
1368		ap_increase_queue_count(ap_dev);
1369		list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1370		ap_dev->requestq_count--;
1371		ap_dev->pendingq_count++;
1372		if (ap_dev->queue_count < ap_dev->queue_depth &&
1373		    ap_dev->requestq_count > 0)
1374			*flags |= 1;
1375		*flags |= 2;
1376		break;
1377	case AP_RESPONSE_Q_FULL:
1378	case AP_RESPONSE_RESET_IN_PROGRESS:
1379		*flags |= 2;
1380		break;
1381	case AP_RESPONSE_MESSAGE_TOO_BIG:
1382	case AP_RESPONSE_REQ_FAC_NOT_INST:
1383		return -EINVAL;
1384	default:
1385		return -ENODEV;
1386	}
1387	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388}
1389
1390/**
1391 * ap_poll_queue(): Poll AP device for pending replies and send new messages.
1392 * @ap_dev: pointer to the bus device
1393 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1394 *	   required, bit 2^1 is set if the poll timer needs to get armed
1395 *
1396 * Poll AP device for pending replies and send new messages. If either
1397 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
1398 * Returns 0.
1399 */
1400static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1401{
1402	int rc;
1403
1404	rc = ap_poll_read(ap_dev, flags);
1405	if (rc)
1406		return rc;
1407	return ap_poll_write(ap_dev, flags);
1408}
1409
1410/**
1411 * __ap_queue_message(): Queue a message to a device.
1412 * @ap_dev: pointer to the AP device
1413 * @ap_msg: the message to be queued
1414 *
1415 * Queue a message to a device. Returns 0 if successful.
1416 */
1417static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1418{
1419	struct ap_queue_status status;
1420
1421	if (list_empty(&ap_dev->requestq) &&
1422	    ap_dev->queue_count < ap_dev->queue_depth) {
1423		status = __ap_send(ap_dev->qid, ap_msg->psmid,
1424				   ap_msg->message, ap_msg->length,
1425				   ap_msg->special);
1426		switch (status.response_code) {
1427		case AP_RESPONSE_NORMAL:
1428			list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1429			atomic_inc(&ap_poll_requests);
1430			ap_dev->pendingq_count++;
1431			ap_increase_queue_count(ap_dev);
1432			ap_dev->total_request_count++;
1433			break;
1434		case AP_RESPONSE_Q_FULL:
1435		case AP_RESPONSE_RESET_IN_PROGRESS:
1436			list_add_tail(&ap_msg->list, &ap_dev->requestq);
1437			ap_dev->requestq_count++;
1438			ap_dev->total_request_count++;
1439			return -EBUSY;
1440		case AP_RESPONSE_REQ_FAC_NOT_INST:
1441		case AP_RESPONSE_MESSAGE_TOO_BIG:
1442			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1443			return -EINVAL;
1444		default:	/* Device is gone. */
1445			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1446			return -ENODEV;
1447		}
1448	} else {
1449		list_add_tail(&ap_msg->list, &ap_dev->requestq);
1450		ap_dev->requestq_count++;
1451		ap_dev->total_request_count++;
1452		return -EBUSY;
1453	}
1454	ap_schedule_poll_timer();
1455	return 0;
1456}
1457
1458void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
 
 
 
 
1459{
1460	unsigned long flags;
1461	int rc;
1462
1463	spin_lock_bh(&ap_dev->lock);
1464	if (!ap_dev->unregistered) {
1465		/* Make room on the queue by polling for finished requests. */
1466		rc = ap_poll_queue(ap_dev, &flags);
1467		if (!rc)
1468			rc = __ap_queue_message(ap_dev, ap_msg);
1469		if (!rc)
1470			wake_up(&ap_poll_wait);
1471		if (rc == -ENODEV)
1472			ap_dev->unregistered = 1;
1473	} else {
1474		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1475		rc = -ENODEV;
1476	}
1477	spin_unlock_bh(&ap_dev->lock);
1478	if (rc == -ENODEV)
1479		device_unregister(&ap_dev->device);
1480}
1481EXPORT_SYMBOL(ap_queue_message);
1482
1483/**
1484 * ap_cancel_message(): Cancel a crypto request.
1485 * @ap_dev: The AP device that has the message queued
1486 * @ap_msg: The message that is to be removed
1487 *
1488 * Cancel a crypto request. This is done by removing the request
1489 * from the device pending or request queue. Note that the
1490 * request stays on the AP queue. When it finishes the message
1491 * reply will be discarded because the psmid can't be found.
1492 */
1493void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1494{
1495	struct ap_message *tmp;
1496
1497	spin_lock_bh(&ap_dev->lock);
1498	if (!list_empty(&ap_msg->list)) {
1499		list_for_each_entry(tmp, &ap_dev->pendingq, list)
1500			if (tmp->psmid == ap_msg->psmid) {
1501				ap_dev->pendingq_count--;
1502				goto found;
1503			}
1504		ap_dev->requestq_count--;
1505	found:
1506		list_del_init(&ap_msg->list);
1507	}
1508	spin_unlock_bh(&ap_dev->lock);
1509}
1510EXPORT_SYMBOL(ap_cancel_message);
1511
1512/**
1513 * ap_poll_timeout(): AP receive polling for finished AP requests.
1514 * @unused: Unused pointer.
1515 *
1516 * Schedules the AP tasklet using a high resolution timer.
1517 */
1518static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1519{
1520	tasklet_schedule(&ap_tasklet);
1521	return HRTIMER_NORESTART;
 
 
1522}
1523
1524/**
1525 * ap_reset(): Reset a not responding AP device.
1526 * @ap_dev: Pointer to the AP device
1527 *
1528 * Reset a not responding AP device and move all requests from the
1529 * pending queue to the request queue.
1530 */
1531static void ap_reset(struct ap_device *ap_dev)
1532{
1533	int rc;
 
 
 
 
 
 
 
 
 
 
 
1534
1535	ap_dev->reset = AP_RESET_IGNORE;
1536	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1537	ap_dev->queue_count = 0;
1538	list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1539	ap_dev->requestq_count += ap_dev->pendingq_count;
1540	ap_dev->pendingq_count = 0;
1541	rc = ap_init_queue(ap_dev->qid);
1542	if (rc == -ENODEV)
1543		ap_dev->unregistered = 1;
1544}
1545
1546static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1547{
1548	if (!ap_dev->unregistered) {
1549		if (ap_poll_queue(ap_dev, flags))
1550			ap_dev->unregistered = 1;
1551		if (ap_dev->reset == AP_RESET_DO)
1552			ap_reset(ap_dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1553	}
1554	return 0;
1555}
1556
1557/**
1558 * ap_poll_all(): Poll all AP devices.
1559 * @dummy: Unused variable
1560 *
1561 * Poll all AP devices on the bus in a round robin fashion. Continue
1562 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1563 * of the control flags has been set arm the poll timer.
1564 */
1565static void ap_poll_all(unsigned long dummy)
1566{
1567	unsigned long flags;
1568	struct ap_device *ap_dev;
 
 
 
 
1569
1570	/* Reset the indicator if interrupts are used. Thus new interrupts can
1571	 * be received. Doing it in the beginning of the tasklet is therefor
1572	 * important that no requests on any AP get lost.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1573	 */
1574	if (ap_using_interrupts())
1575		xchg((u8 *)ap_interrupt_indicator, 0);
1576	do {
1577		flags = 0;
1578		spin_lock(&ap_device_list_lock);
1579		list_for_each_entry(ap_dev, &ap_device_list, list) {
1580			spin_lock(&ap_dev->lock);
1581			__ap_poll_device(ap_dev, &flags);
1582			spin_unlock(&ap_dev->lock);
1583		}
1584		spin_unlock(&ap_device_list_lock);
1585	} while (flags & 1);
1586	if (flags & 2)
1587		ap_schedule_poll_timer();
1588}
1589
1590/**
1591 * ap_poll_thread(): Thread that polls for finished requests.
1592 * @data: Unused pointer
1593 *
1594 * AP bus poll thread. The purpose of this thread is to poll for
1595 * finished requests in a loop if there is a "free" cpu - that is
1596 * a cpu that doesn't have anything better to do. The polling stops
1597 * as soon as there is another task or if all messages have been
1598 * delivered.
1599 */
1600static int ap_poll_thread(void *data)
1601{
1602	DECLARE_WAITQUEUE(wait, current);
1603	unsigned long flags;
1604	int requests;
1605	struct ap_device *ap_dev;
1606
1607	set_user_nice(current, 19);
1608	while (1) {
1609		if (ap_suspend_flag)
1610			return 0;
1611		if (need_resched()) {
1612			schedule();
1613			continue;
1614		}
1615		add_wait_queue(&ap_poll_wait, &wait);
1616		set_current_state(TASK_INTERRUPTIBLE);
1617		if (kthread_should_stop())
1618			break;
1619		requests = atomic_read(&ap_poll_requests);
1620		if (requests <= 0)
1621			schedule();
1622		set_current_state(TASK_RUNNING);
1623		remove_wait_queue(&ap_poll_wait, &wait);
1624
1625		flags = 0;
1626		spin_lock_bh(&ap_device_list_lock);
1627		list_for_each_entry(ap_dev, &ap_device_list, list) {
1628			spin_lock(&ap_dev->lock);
1629			__ap_poll_device(ap_dev, &flags);
1630			spin_unlock(&ap_dev->lock);
1631		}
1632		spin_unlock_bh(&ap_device_list_lock);
1633	}
1634	set_current_state(TASK_RUNNING);
1635	remove_wait_queue(&ap_poll_wait, &wait);
1636	return 0;
1637}
1638
1639static int ap_poll_thread_start(void)
1640{
1641	int rc;
1642
1643	if (ap_using_interrupts() || ap_suspend_flag)
1644		return 0;
1645	mutex_lock(&ap_poll_thread_mutex);
1646	if (!ap_poll_kthread) {
1647		ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1648		rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1649		if (rc)
1650			ap_poll_kthread = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1651	}
1652	else
1653		rc = 0;
1654	mutex_unlock(&ap_poll_thread_mutex);
1655	return rc;
1656}
1657
1658static void ap_poll_thread_stop(void)
1659{
1660	mutex_lock(&ap_poll_thread_mutex);
1661	if (ap_poll_kthread) {
1662		kthread_stop(ap_poll_kthread);
1663		ap_poll_kthread = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1664	}
1665	mutex_unlock(&ap_poll_thread_mutex);
 
 
 
 
 
1666}
1667
1668/**
1669 * ap_request_timeout(): Handling of request timeouts
1670 * @data: Holds the AP device.
1671 *
1672 * Handles request timeouts.
1673 */
1674static void ap_request_timeout(unsigned long data)
1675{
1676	struct ap_device *ap_dev = (struct ap_device *) data;
1677
1678	if (ap_dev->reset == AP_RESET_ARMED) {
1679		ap_dev->reset = AP_RESET_DO;
1680
1681		if (ap_using_interrupts())
1682			tasklet_schedule(&ap_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1683	}
 
 
 
 
 
 
 
 
1684}
1685
1686static void ap_reset_domain(void)
1687{
1688	int i;
1689
1690	if (ap_domain_index != -1)
1691		for (i = 0; i < AP_DEVICES; i++)
1692			ap_reset_queue(AP_MKQID(i, ap_domain_index));
1693}
1694
1695static void ap_reset_all(void)
1696{
1697	int i, j;
 
 
 
1698
1699	for (i = 0; i < AP_DOMAINS; i++)
1700		for (j = 0; j < AP_DEVICES; j++)
1701			ap_reset_queue(AP_MKQID(j, i));
1702}
1703
1704static struct reset_call ap_reset_call = {
1705	.fn = ap_reset_all,
1706};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1707
1708/**
1709 * ap_module_init(): The module initialization code.
1710 *
1711 * Initializes the module.
1712 */
1713int __init ap_module_init(void)
1714{
1715	int rc, i;
1716
1717	if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1718		pr_warning("%d is not a valid cryptographic domain\n",
1719			   ap_domain_index);
1720		return -EINVAL;
1721	}
1722	/* In resume callback we need to know if the user had set the domain.
1723	 * If so, we can not just reset it.
1724	 */
1725	if (ap_domain_index >= 0)
1726		user_set_domain = 1;
1727
1728	if (ap_instructions_available() != 0) {
1729		pr_warning("The hardware system does not support "
1730			   "AP instructions\n");
1731		return -ENODEV;
1732	}
1733	if (ap_interrupts_available()) {
1734		isc_register(AP_ISC);
1735		ap_interrupt_indicator = s390_register_adapter_interrupt(
1736			&ap_interrupt_handler, NULL, AP_ISC);
1737		if (IS_ERR(ap_interrupt_indicator)) {
1738			ap_interrupt_indicator = NULL;
1739			isc_unregister(AP_ISC);
1740		}
 
 
 
 
 
 
 
 
 
1741	}
1742
1743	register_reset_call(&ap_reset_call);
 
 
 
 
1744
1745	/* Create /sys/bus/ap. */
1746	rc = bus_register(&ap_bus_type);
1747	if (rc)
1748		goto out;
1749	for (i = 0; ap_bus_attrs[i]; i++) {
1750		rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1751		if (rc)
1752			goto out_bus;
1753	}
1754
1755	/* Create /sys/devices/ap. */
1756	ap_root_device = root_device_register("ap");
1757	rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1758	if (rc)
1759		goto out_bus;
1760
1761	ap_work_queue = create_singlethread_workqueue("kapwork");
1762	if (!ap_work_queue) {
1763		rc = -ENOMEM;
1764		goto out_root;
1765	}
1766
1767	if (ap_select_domain() == 0)
1768		ap_scan_bus(NULL);
1769
1770	/* Setup the AP bus rescan timer. */
1771	init_timer(&ap_config_timer);
1772	ap_config_timer.function = ap_config_timeout;
1773	ap_config_timer.data = 0;
1774	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1775	add_timer(&ap_config_timer);
1776
1777	/* Setup the high resultion poll timer.
 
1778	 * If we are running under z/VM adjust polling to z/VM polling rate.
1779	 */
1780	if (MACHINE_IS_VM)
1781		poll_timeout = 1500000;
1782	spin_lock_init(&ap_poll_timer_lock);
1783	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1784	ap_poll_timer.function = ap_poll_timeout;
1785
1786	/* Start the low priority AP bus poll thread. */
1787	if (ap_thread_flag) {
1788		rc = ap_poll_thread_start();
1789		if (rc)
1790			goto out_work;
1791	}
1792
 
 
1793	return 0;
1794
1795out_work:
1796	del_timer_sync(&ap_config_timer);
1797	hrtimer_cancel(&ap_poll_timer);
1798	destroy_workqueue(ap_work_queue);
1799out_root:
1800	root_device_unregister(ap_root_device);
1801out_bus:
1802	while (i--)
1803		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1804	bus_unregister(&ap_bus_type);
1805out:
1806	unregister_reset_call(&ap_reset_call);
1807	if (ap_using_interrupts()) {
1808		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1809		isc_unregister(AP_ISC);
1810	}
1811	return rc;
1812}
1813
1814static int __ap_match_all(struct device *dev, void *data)
1815{
1816	return 1;
1817}
1818
1819/**
1820 * ap_modules_exit(): The module termination code
1821 *
1822 * Terminates the module.
1823 */
1824void ap_module_exit(void)
1825{
1826	int i;
1827	struct device *dev;
1828
1829	ap_reset_domain();
1830	ap_poll_thread_stop();
1831	del_timer_sync(&ap_config_timer);
1832	hrtimer_cancel(&ap_poll_timer);
1833	destroy_workqueue(ap_work_queue);
1834	tasklet_kill(&ap_tasklet);
1835	root_device_unregister(ap_root_device);
1836	while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1837		    __ap_match_all)))
1838	{
1839		device_unregister(dev);
1840		put_device(dev);
1841	}
1842	for (i = 0; ap_bus_attrs[i]; i++)
1843		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1844	bus_unregister(&ap_bus_type);
1845	unregister_reset_call(&ap_reset_call);
1846	if (ap_using_interrupts()) {
1847		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1848		isc_unregister(AP_ISC);
1849	}
1850}
1851
1852#ifndef CONFIG_ZCRYPT_MONOLITHIC
1853module_init(ap_module_init);
1854module_exit(ap_module_exit);
1855#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright IBM Corp. 2006, 2021
 
 
   4 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
   5 *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
   6 *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
   7 *	      Felix Beck <felix.beck@de.ibm.com>
   8 *	      Holger Dengler <hd@linux.vnet.ibm.com>
   9 *	      Harald Freudenberger <freude@linux.ibm.com>
  10 *
  11 * Adjunct processor bus.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 */
  13
  14#define KMSG_COMPONENT "ap"
  15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  16
  17#include <linux/kernel_stat.h>
  18#include <linux/moduleparam.h>
  19#include <linux/init.h>
  20#include <linux/delay.h>
  21#include <linux/err.h>
  22#include <linux/freezer.h>
  23#include <linux/interrupt.h>
  24#include <linux/workqueue.h>
  25#include <linux/slab.h>
  26#include <linux/notifier.h>
  27#include <linux/kthread.h>
  28#include <linux/mutex.h>
 
  29#include <asm/airq.h>
  30#include <linux/atomic.h>
 
  31#include <asm/isc.h>
  32#include <linux/hrtimer.h>
  33#include <linux/ktime.h>
  34#include <asm/facility.h>
  35#include <linux/crypto.h>
  36#include <linux/mod_devicetable.h>
  37#include <linux/debugfs.h>
  38#include <linux/ctype.h>
  39
  40#include "ap_bus.h"
  41#include "ap_debug.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42
  43/*
  44 * Module parameters; note though this file itself isn't modular.
  45 */
  46int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
  47static DEFINE_SPINLOCK(ap_domain_lock);
  48module_param_named(domain, ap_domain_index, int, 0440);
  49MODULE_PARM_DESC(domain, "domain index for ap devices");
  50EXPORT_SYMBOL(ap_domain_index);
  51
  52static int ap_thread_flag;
  53module_param_named(poll_thread, ap_thread_flag, int, 0440);
  54MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
  55
  56static char *apm_str;
  57module_param_named(apmask, apm_str, charp, 0440);
  58MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
  59
  60static char *aqm_str;
  61module_param_named(aqmask, aqm_str, charp, 0440);
  62MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
  63
  64atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
  65EXPORT_SYMBOL(ap_max_msg_size);
  66
  67static struct device *ap_root_device;
  68
  69/* Hashtable of all queue devices on the AP bus */
  70DEFINE_HASHTABLE(ap_queues, 8);
  71/* lock used for the ap_queues hashtable */
  72DEFINE_SPINLOCK(ap_queues_lock);
  73
  74/* Default permissions (ioctl, card and domain masking) */
  75struct ap_perms ap_perms;
  76EXPORT_SYMBOL(ap_perms);
  77DEFINE_MUTEX(ap_perms_mutex);
  78EXPORT_SYMBOL(ap_perms_mutex);
  79
  80/* # of bus scans since init */
  81static atomic64_t ap_scan_bus_count;
  82
  83/* # of bindings complete since init */
  84static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
  85
  86/* completion for initial APQN bindings complete */
  87static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
  88
  89static struct ap_config_info *ap_qci_info;
  90
  91/*
  92 * AP bus related debug feature things.
  93 */
  94debug_info_t *ap_dbf_info;
  95
  96/*
  97 * Workqueue timer for bus rescan.
  98 */
 
  99static struct timer_list ap_config_timer;
 100static int ap_config_time = AP_CONFIG_TIME;
 101static void ap_scan_bus(struct work_struct *);
 102static DECLARE_WORK(ap_scan_work, ap_scan_bus);
 103
 104/*
 105 * Tasklet & timer for AP request polling and interrupts
 106 */
 107static void ap_tasklet_fn(unsigned long);
 108static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
 109static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
 110static struct task_struct *ap_poll_kthread;
 111static DEFINE_MUTEX(ap_poll_thread_mutex);
 112static DEFINE_SPINLOCK(ap_poll_timer_lock);
 
 113static struct hrtimer ap_poll_timer;
 114/*
 115 * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
 116 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
 117 */
 118static unsigned long long poll_timeout = 250000;
 119
 120/* Maximum domain id, if not given via qci */
 121static int ap_max_domain_id = 15;
 122/* Maximum adapter id, if not given via qci */
 123static int ap_max_adapter_id = 63;
 124
 
 125static struct bus_type ap_bus_type;
 126
 127/* Adapter interrupt definitions */
 128static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
 129
 130static bool ap_irq_flag;
 131
 132static struct airq_struct ap_airq = {
 133	.handler = ap_interrupt_handler,
 134	.isc = AP_ISC,
 135};
 136
 137/**
 138 * ap_airq_ptr() - Get the address of the adapter interrupt indicator
 139 *
 140 * Returns the address of the local-summary-indicator of the adapter
 141 * interrupt handler for AP, or NULL if adapter interrupts are not
 142 * available.
 143 */
 144void *ap_airq_ptr(void)
 145{
 146	if (ap_irq_flag)
 147		return ap_airq.lsi_ptr;
 148	return NULL;
 
 
 
 
 
 
 
 
 149}
 150
 151/**
 152 * ap_interrupts_available(): Test if AP interrupts are available.
 153 *
 154 * Returns 1 if AP interrupts are available.
 155 */
 156static int ap_interrupts_available(void)
 157{
 158	return test_facility(65);
 159}
 160
 161/**
 162 * ap_qci_available(): Test if AP configuration
 163 * information can be queried via QCI subfunction.
 
 
 164 *
 165 * Returns 1 if subfunction PQAP(QCI) is available.
 166 */
 167static int ap_qci_available(void)
 
 168{
 169	return test_facility(12);
 
 
 
 
 
 
 
 
 170}
 171
 172/**
 173 * ap_apft_available(): Test if AP facilities test (APFT)
 174 * facility is available.
 175 *
 176 * Returns 1 if APFT is is available.
 177 */
 178static int ap_apft_available(void)
 179{
 180	return test_facility(15);
 
 
 
 
 
 
 
 181}
 182
 183/*
 184 * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
 
 
 
 185 *
 186 * Returns 1 if the QACT subfunction is available.
 187 */
 188static inline int ap_qact_available(void)
 
 189{
 190	if (ap_qci_info)
 191		return ap_qci_info->qact;
 192	return 0;
 193}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194
 195/*
 196 * ap_fetch_qci_info(): Fetch cryptographic config info
 197 *
 198 * Returns the ap configuration info fetched via PQAP(QCI).
 199 * On success 0 is returned, on failure a negative errno
 200 * is returned, e.g. if the PQAP(QCI) instruction is not
 201 * available, the return value will be -EOPNOTSUPP.
 202 */
 203static inline int ap_fetch_qci_info(struct ap_config_info *info)
 204{
 205	if (!ap_qci_available())
 206		return -EOPNOTSUPP;
 207	if (!info)
 208		return -EINVAL;
 209	return ap_qci(info);
 210}
 
 211
 212/**
 213 * ap_init_qci_info(): Allocate and query qci config info.
 214 * Does also update the static variables ap_max_domain_id
 215 * and ap_max_adapter_id if this info is available.
 216
 
 
 
 
 
 217 */
 218static void __init ap_init_qci_info(void)
 219{
 220	if (!ap_qci_available()) {
 221		AP_DBF_INFO("%s QCI not supported\n", __func__);
 222		return;
 223	}
 224
 225	ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
 226	if (!ap_qci_info)
 227		return;
 228	if (ap_fetch_qci_info(ap_qci_info) != 0) {
 229		kfree(ap_qci_info);
 230		ap_qci_info = NULL;
 231		return;
 232	}
 233	AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
 234
 235	if (ap_qci_info->apxa) {
 236		if (ap_qci_info->Na) {
 237			ap_max_adapter_id = ap_qci_info->Na;
 238			AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
 239				    __func__, ap_max_adapter_id);
 240		}
 241		if (ap_qci_info->Nd) {
 242			ap_max_domain_id = ap_qci_info->Nd;
 243			AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
 244				    __func__, ap_max_domain_id);
 
 
 
 245		}
 246	}
 
 
 
 
 247}
 248
 249/*
 250 * ap_test_config(): helper function to extract the nrth bit
 251 *		     within the unsigned int array field.
 
 
 
 252 */
 253static inline int ap_test_config(unsigned int *field, unsigned int nr)
 254{
 255	return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
 
 
 
 
 
 
 256}
 
 257
 258/*
 259 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
 
 
 260 *
 261 * Returns 0 if the card is not configured
 262 *	   1 if the card is configured or
 263 *	     if the configuration information is not available
 264 */
 265static inline int ap_test_config_card_id(unsigned int id)
 266{
 267	if (id > ap_max_adapter_id)
 268		return 0;
 269	if (ap_qci_info)
 270		return ap_test_config(ap_qci_info->apm, id);
 271	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 272}
 273
 274/*
 275 * ap_test_config_usage_domain(): Test, whether an AP usage domain
 276 * is configured.
 
 
 
 
 277 *
 278 * Returns 0 if the usage domain is not configured
 279 *	   1 if the usage domain is configured or
 280 *	     if the configuration information is not available
 281 */
 282int ap_test_config_usage_domain(unsigned int domain)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283{
 284	if (domain > ap_max_domain_id)
 
 
 
 
 285		return 0;
 286	if (ap_qci_info)
 287		return ap_test_config(ap_qci_info->aqm, domain);
 288	return 1;
 
 
 
 
 
 289}
 290EXPORT_SYMBOL(ap_test_config_usage_domain);
 291
 292/*
 293 * ap_test_config_ctrl_domain(): Test, whether an AP control domain
 294 * is configured.
 295 * @domain AP control domain ID
 
 
 296 *
 297 * Returns 1 if the control domain is configured
 298 *	   0 in all other cases
 299 */
 300int ap_test_config_ctrl_domain(unsigned int domain)
 301{
 302	if (!ap_qci_info || domain > ap_max_domain_id)
 303		return 0;
 304	return ap_test_config(ap_qci_info->adm, domain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305}
 306EXPORT_SYMBOL(ap_test_config_ctrl_domain);
 307
 308/*
 309 * ap_queue_info(): Check and get AP queue info.
 310 * Returns true if TAPQ succeeded and the info is filled or
 311 * false otherwise.
 312 */
 313static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
 314			  int *q_depth, int *q_ml, bool *q_decfg)
 315{
 316	struct ap_queue_status status;
 317	union {
 318		unsigned long value;
 319		struct {
 320			unsigned int fac   : 32; /* facility bits */
 321			unsigned int at	   :  8; /* ap type */
 322			unsigned int _res1 :  8;
 323			unsigned int _res2 :  4;
 324			unsigned int ml	   :  4; /* apxl ml */
 325			unsigned int _res3 :  4;
 326			unsigned int qd	   :  4; /* queue depth */
 327		} tapq_gr2;
 328	} tapq_info;
 329
 330	tapq_info.value = 0;
 331
 332	/* make sure we don't run into a specifiation exception */
 333	if (AP_QID_CARD(qid) > ap_max_adapter_id ||
 334	    AP_QID_QUEUE(qid) > ap_max_domain_id)
 335		return false;
 336
 337	/* call TAPQ on this APQN */
 338	status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value);
 339	switch (status.response_code) {
 340	case AP_RESPONSE_NORMAL:
 
 
 
 
 
 341	case AP_RESPONSE_RESET_IN_PROGRESS:
 342	case AP_RESPONSE_DECONFIGURED:
 343	case AP_RESPONSE_CHECKSTOPPED:
 344	case AP_RESPONSE_BUSY:
 345		/*
 346		 * According to the architecture in all these cases the
 347		 * info should be filled. All bits 0 is not possible as
 348		 * there is at least one of the mode bits set.
 349		 */
 350		if (WARN_ON_ONCE(!tapq_info.value))
 351			return false;
 352		*q_type = tapq_info.tapq_gr2.at;
 353		*q_fac = tapq_info.tapq_gr2.fac;
 354		*q_depth = tapq_info.tapq_gr2.qd;
 355		*q_ml = tapq_info.tapq_gr2.ml;
 356		*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
 357		switch (*q_type) {
 358			/* For CEX2 and CEX3 the available functions
 359			 * are not reflected by the facilities bits.
 360			 * Instead it is coded into the type. So here
 361			 * modify the function bits based on the type.
 362			 */
 363		case AP_DEVICE_TYPE_CEX2A:
 364		case AP_DEVICE_TYPE_CEX3A:
 365			*q_fac |= 0x08000000;
 366			break;
 367		case AP_DEVICE_TYPE_CEX2C:
 368		case AP_DEVICE_TYPE_CEX3C:
 369			*q_fac |= 0x10000000;
 370			break;
 371		default:
 372			break;
 373		}
 374		return true;
 375	default:
 376		/*
 377		 * A response code which indicates, there is no info available.
 378		 */
 379		return false;
 380	}
 381}
 
 382
 383void ap_wait(enum ap_sm_wait wait)
 
 
 
 
 
 
 
 
 384{
 385	ktime_t hr_time;
 
 386
 387	switch (wait) {
 388	case AP_SM_WAIT_AGAIN:
 389	case AP_SM_WAIT_INTERRUPT:
 390		if (ap_irq_flag)
 
 
 
 
 391			break;
 392		if (ap_poll_kthread) {
 393			wake_up(&ap_poll_wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 394			break;
 
 
 395		}
 396		fallthrough;
 397	case AP_SM_WAIT_TIMEOUT:
 398		spin_lock_bh(&ap_poll_timer_lock);
 399		if (!hrtimer_is_queued(&ap_poll_timer)) {
 400			hr_time = poll_timeout;
 401			hrtimer_forward_now(&ap_poll_timer, hr_time);
 402			hrtimer_restart(&ap_poll_timer);
 403		}
 404		spin_unlock_bh(&ap_poll_timer_lock);
 405		break;
 406	case AP_SM_WAIT_NONE:
 407	default:
 408		break;
 409	}
 
 410}
 411
 412/**
 413 * ap_request_timeout(): Handling of request timeouts
 414 * @t: timer making this callback
 415 *
 416 * Handles request timeouts.
 417 */
 418void ap_request_timeout(struct timer_list *t)
 419{
 420	struct ap_queue *aq = from_timer(aq, t, timeout);
 
 421
 422	spin_lock_bh(&aq->lock);
 423	ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
 424	spin_unlock_bh(&aq->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425}
 426
 427/**
 428 * ap_poll_timeout(): AP receive polling for finished AP requests.
 429 * @unused: Unused pointer.
 430 *
 431 * Schedules the AP tasklet using a high resolution timer.
 432 */
 433static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
 434{
 435	tasklet_schedule(&ap_tasklet);
 436	return HRTIMER_NORESTART;
 437}
 438
 439/**
 440 * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
 441 * @airq: pointer to adapter interrupt descriptor
 442 */
 443static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
 444{
 445	inc_irq_stat(IRQIO_APB);
 446	tasklet_schedule(&ap_tasklet);
 447}
 448
 449/**
 450 * ap_tasklet_fn(): Tasklet to poll all AP devices.
 451 * @dummy: Unused variable
 452 *
 453 * Poll all AP devices on the bus.
 
 454 */
 455static void ap_tasklet_fn(unsigned long dummy)
 456{
 457	int bkt;
 458	struct ap_queue *aq;
 459	enum ap_sm_wait wait = AP_SM_WAIT_NONE;
 460
 461	/* Reset the indicator if interrupts are used. Thus new interrupts can
 462	 * be received. Doing it in the beginning of the tasklet is therefor
 463	 * important that no requests on any AP get lost.
 464	 */
 465	if (ap_irq_flag)
 466		xchg(ap_airq.lsi_ptr, 0);
 467
 468	spin_lock_bh(&ap_queues_lock);
 469	hash_for_each(ap_queues, bkt, aq, hnode) {
 470		spin_lock_bh(&aq->lock);
 471		wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
 472		spin_unlock_bh(&aq->lock);
 473	}
 474	spin_unlock_bh(&ap_queues_lock);
 475
 476	ap_wait(wait);
 477}
 478
 479static int ap_pending_requests(void)
 
 
 
 
 480{
 481	int bkt;
 482	struct ap_queue *aq;
 483
 484	spin_lock_bh(&ap_queues_lock);
 485	hash_for_each(ap_queues, bkt, aq, hnode) {
 486		if (aq->queue_count == 0)
 487			continue;
 488		spin_unlock_bh(&ap_queues_lock);
 489		return 1;
 490	}
 491	spin_unlock_bh(&ap_queues_lock);
 492	return 0;
 493}
 494
 495/**
 496 * ap_poll_thread(): Thread that polls for finished requests.
 497 * @data: Unused pointer
 498 *
 499 * AP bus poll thread. The purpose of this thread is to poll for
 500 * finished requests in a loop if there is a "free" cpu - that is
 501 * a cpu that doesn't have anything better to do. The polling stops
 502 * as soon as there is another task or if all messages have been
 503 * delivered.
 504 */
 505static int ap_poll_thread(void *data)
 506{
 507	DECLARE_WAITQUEUE(wait, current);
 508
 509	set_user_nice(current, MAX_NICE);
 510	set_freezable();
 511	while (!kthread_should_stop()) {
 512		add_wait_queue(&ap_poll_wait, &wait);
 513		set_current_state(TASK_INTERRUPTIBLE);
 514		if (!ap_pending_requests()) {
 515			schedule();
 516			try_to_freeze();
 517		}
 518		set_current_state(TASK_RUNNING);
 519		remove_wait_queue(&ap_poll_wait, &wait);
 520		if (need_resched()) {
 521			schedule();
 522			try_to_freeze();
 523			continue;
 524		}
 525		ap_tasklet_fn(0);
 526	}
 527
 528	return 0;
 529}
 530
 531static int ap_poll_thread_start(void)
 
 
 
 532{
 
 533	int rc;
 534
 535	if (ap_irq_flag || ap_poll_kthread)
 536		return 0;
 537	mutex_lock(&ap_poll_thread_mutex);
 538	ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
 539	rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
 540	if (rc)
 541		ap_poll_kthread = NULL;
 542	mutex_unlock(&ap_poll_thread_mutex);
 543	return rc;
 544}
 545
 546static void ap_poll_thread_stop(void)
 
 
 
 547{
 548	if (!ap_poll_kthread)
 549		return;
 550	mutex_lock(&ap_poll_thread_mutex);
 551	kthread_stop(ap_poll_kthread);
 552	ap_poll_kthread = NULL;
 553	mutex_unlock(&ap_poll_thread_mutex);
 554}
 555
 556#define is_card_dev(x) ((x)->parent == ap_root_device)
 557#define is_queue_dev(x) ((x)->parent != ap_root_device)
 
 
 
 
 
 
 
 
 
 
 558
 559/**
 560 * ap_bus_match()
 561 * @dev: Pointer to device
 562 * @drv: Pointer to device_driver
 563 *
 564 * AP bus driver registration/unregistration.
 565 */
 566static int ap_bus_match(struct device *dev, struct device_driver *drv)
 567{
 
 568	struct ap_driver *ap_drv = to_ap_drv(drv);
 569	struct ap_device_id *id;
 570
 571	/*
 572	 * Compare device type of the device with the list of
 573	 * supported types of the device_driver.
 574	 */
 575	for (id = ap_drv->ids; id->match_flags; id++) {
 576		if (is_card_dev(dev) &&
 577		    id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
 578		    id->dev_type == to_ap_dev(dev)->device_type)
 579			return 1;
 580		if (is_queue_dev(dev) &&
 581		    id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
 582		    id->dev_type == to_ap_dev(dev)->device_type)
 583			return 1;
 584	}
 585	return 0;
 586}
 587
 588/**
 589 * ap_uevent(): Uevent function for AP devices.
 590 * @dev: Pointer to device
 591 * @env: Pointer to kobj_uevent_env
 592 *
 593 * It sets up a single environment variable DEV_TYPE which contains the
 594 * hardware device type.
 595 */
 596static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
 597{
 598	int rc = 0;
 599	struct ap_device *ap_dev = to_ap_dev(dev);
 
 600
 601	/* Uevents from ap bus core don't need extensions to the env */
 602	if (dev == ap_root_device)
 603		return 0;
 604
 605	if (is_card_dev(dev)) {
 606		struct ap_card *ac = to_ap_card(&ap_dev->device);
 
 
 607
 608		/* Set up DEV_TYPE environment variable. */
 609		rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
 610		if (rc)
 611			return rc;
 612		/* Add MODALIAS= */
 613		rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
 614		if (rc)
 615			return rc;
 616
 617		/* Add MODE=<accel|cca|ep11> */
 618		if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
 619			rc = add_uevent_var(env, "MODE=accel");
 620		else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
 621			rc = add_uevent_var(env, "MODE=cca");
 622		else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
 623			rc = add_uevent_var(env, "MODE=ep11");
 624		if (rc)
 625			return rc;
 626	} else {
 627		struct ap_queue *aq = to_ap_queue(&ap_dev->device);
 628
 629		/* Add MODE=<accel|cca|ep11> */
 630		if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
 631			rc = add_uevent_var(env, "MODE=accel");
 632		else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
 633			rc = add_uevent_var(env, "MODE=cca");
 634		else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
 635			rc = add_uevent_var(env, "MODE=ep11");
 636		if (rc)
 637			return rc;
 638	}
 639
 640	return 0;
 641}
 642
 643static void ap_send_init_scan_done_uevent(void)
 644{
 645	char *envp[] = { "INITSCAN=done", NULL };
 
 646
 647	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
 648}
 649
 650static void ap_send_bindings_complete_uevent(void)
 651{
 652	char buf[32];
 653	char *envp[] = { "BINDINGS=complete", buf, NULL };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654
 655	snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
 656		 atomic64_inc_return(&ap_bindings_complete_count));
 657	kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
 658}
 659
 660void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
 661{
 662	char buf[16];
 663	char *envp[] = { buf, NULL };
 664
 665	snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666
 667	kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
 668}
 669EXPORT_SYMBOL(ap_send_config_uevent);
 670
 671void ap_send_online_uevent(struct ap_device *ap_dev, int online)
 672{
 673	char buf[16];
 674	char *envp[] = { buf, NULL };
 675
 676	snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
 677
 678	kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
 679}
 680EXPORT_SYMBOL(ap_send_online_uevent);
 681
 682/*
 683 * calc # of bound APQNs
 684 */
 685
 686struct __ap_calc_ctrs {
 687	unsigned int apqns;
 688	unsigned int bound;
 689};
 690
 691static int __ap_calc_helper(struct device *dev, void *arg)
 692{
 693	struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *) arg;
 
 
 694
 695	if (is_queue_dev(dev)) {
 696		pctrs->apqns++;
 697		if ((to_ap_dev(dev))->drv)
 698			pctrs->bound++;
 
 
 699	}
 700
 701	return 0;
 702}
 703
 704static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
 705{
 706	struct __ap_calc_ctrs ctrs;
 707
 708	memset(&ctrs, 0, sizeof(ctrs));
 709	bus_for_each_dev(&ap_bus_type, NULL, (void *) &ctrs, __ap_calc_helper);
 710
 711	*apqns = ctrs.apqns;
 712	*bound = ctrs.bound;
 713}
 714
 715/*
 716 * After initial ap bus scan do check if all existing APQNs are
 717 * bound to device drivers.
 718 */
 719static void ap_check_bindings_complete(void)
 720{
 721	unsigned int apqns, bound;
 722
 723	if (atomic64_read(&ap_scan_bus_count) >= 1) {
 724		ap_calc_bound_apqns(&apqns, &bound);
 725		if (bound == apqns) {
 726			if (!completion_done(&ap_init_apqn_bindings_complete)) {
 727				complete_all(&ap_init_apqn_bindings_complete);
 728				AP_DBF(DBF_INFO, "%s complete\n", __func__);
 729			}
 730			ap_send_bindings_complete_uevent();
 731		}
 732	}
 733}
 734
 735/*
 736 * Interface to wait for the AP bus to have done one initial ap bus
 737 * scan and all detected APQNs have been bound to device drivers.
 738 * If these both conditions are not fulfilled, this function blocks
 739 * on a condition with wait_for_completion_interruptible_timeout().
 740 * If these both conditions are fulfilled (before the timeout hits)
 741 * the return value is 0. If the timeout (in jiffies) hits instead
 742 * -ETIME is returned. On failures negative return values are
 743 * returned to the caller.
 744 */
 745int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
 746{
 747	long l;
 748
 749	if (completion_done(&ap_init_apqn_bindings_complete))
 750		return 0;
 751
 752	if (timeout)
 753		l = wait_for_completion_interruptible_timeout(
 754			&ap_init_apqn_bindings_complete, timeout);
 755	else
 756		l = wait_for_completion_interruptible(
 757			&ap_init_apqn_bindings_complete);
 758	if (l < 0)
 759		return l == -ERESTARTSYS ? -EINTR : l;
 760	else if (l == 0 && timeout)
 761		return -ETIME;
 762
 763	return 0;
 764}
 765EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
 766
 767static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
 768{
 769	if (is_queue_dev(dev) &&
 770	    AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
 771		device_unregister(dev);
 772	return 0;
 773}
 774
 775static int __ap_revise_reserved(struct device *dev, void *dummy)
 776{
 777	int rc, card, queue, devres, drvres;
 778
 779	if (is_queue_dev(dev)) {
 780		card = AP_QID_CARD(to_ap_queue(dev)->qid);
 781		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
 782		mutex_lock(&ap_perms_mutex);
 783		devres = test_bit_inv(card, ap_perms.apm)
 784			&& test_bit_inv(queue, ap_perms.aqm);
 785		mutex_unlock(&ap_perms_mutex);
 786		drvres = to_ap_drv(dev->driver)->flags
 787			& AP_DRIVER_FLAG_DEFAULT;
 788		if (!!devres != !!drvres) {
 789			AP_DBF_DBG("reprobing queue=%02x.%04x\n",
 790				   card, queue);
 791			rc = device_reprobe(dev);
 792		}
 793	}
 794
 795	return 0;
 796}
 797
 798static void ap_bus_revise_bindings(void)
 799{
 800	bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
 801}
 802
 803int ap_owned_by_def_drv(int card, int queue)
 804{
 805	int rc = 0;
 806
 807	if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
 808		return -EINVAL;
 809
 810	mutex_lock(&ap_perms_mutex);
 811
 812	if (test_bit_inv(card, ap_perms.apm)
 813	    && test_bit_inv(queue, ap_perms.aqm))
 814		rc = 1;
 815
 816	mutex_unlock(&ap_perms_mutex);
 817
 818	return rc;
 819}
 820EXPORT_SYMBOL(ap_owned_by_def_drv);
 821
 822int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
 823				       unsigned long *aqm)
 824{
 825	int card, queue, rc = 0;
 826
 827	mutex_lock(&ap_perms_mutex);
 828
 829	for (card = 0; !rc && card < AP_DEVICES; card++)
 830		if (test_bit_inv(card, apm) &&
 831		    test_bit_inv(card, ap_perms.apm))
 832			for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
 833				if (test_bit_inv(queue, aqm) &&
 834				    test_bit_inv(queue, ap_perms.aqm))
 835					rc = 1;
 836
 837	mutex_unlock(&ap_perms_mutex);
 838
 839	return rc;
 840}
 841EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
 842
 843static int ap_device_probe(struct device *dev)
 844{
 845	struct ap_device *ap_dev = to_ap_dev(dev);
 846	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
 847	int card, queue, devres, drvres, rc = -ENODEV;
 848
 849	if (!get_device(dev))
 850		return rc;
 851
 852	if (is_queue_dev(dev)) {
 853		/*
 854		 * If the apqn is marked as reserved/used by ap bus and
 855		 * default drivers, only probe with drivers with the default
 856		 * flag set. If it is not marked, only probe with drivers
 857		 * with the default flag not set.
 858		 */
 859		card = AP_QID_CARD(to_ap_queue(dev)->qid);
 860		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
 861		mutex_lock(&ap_perms_mutex);
 862		devres = test_bit_inv(card, ap_perms.apm)
 863			&& test_bit_inv(queue, ap_perms.aqm);
 864		mutex_unlock(&ap_perms_mutex);
 865		drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
 866		if (!!devres != !!drvres)
 867			goto out;
 868	}
 869
 870	/* Add queue/card to list of active queues/cards */
 871	spin_lock_bh(&ap_queues_lock);
 872	if (is_queue_dev(dev))
 873		hash_add(ap_queues, &to_ap_queue(dev)->hnode,
 874			 to_ap_queue(dev)->qid);
 875	spin_unlock_bh(&ap_queues_lock);
 876
 877	ap_dev->drv = ap_drv;
 878	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
 879
 880	if (rc) {
 881		spin_lock_bh(&ap_queues_lock);
 882		if (is_queue_dev(dev))
 883			hash_del(&to_ap_queue(dev)->hnode);
 884		spin_unlock_bh(&ap_queues_lock);
 885		ap_dev->drv = NULL;
 886	} else
 887		ap_check_bindings_complete();
 888
 889out:
 890	if (rc)
 891		put_device(dev);
 892	return rc;
 893}
 
 894
 895static int ap_device_remove(struct device *dev)
 896{
 897	struct ap_device *ap_dev = to_ap_dev(dev);
 898	struct ap_driver *ap_drv = ap_dev->drv;
 899
 900	/* prepare ap queue device removal */
 901	if (is_queue_dev(dev))
 902		ap_queue_prepare_remove(to_ap_queue(dev));
 903
 904	/* driver's chance to clean up gracefully */
 905	if (ap_drv->remove)
 906		ap_drv->remove(ap_dev);
 907
 908	/* now do the ap queue device remove */
 909	if (is_queue_dev(dev))
 910		ap_queue_remove(to_ap_queue(dev));
 911
 912	/* Remove queue/card from list of active queues/cards */
 913	spin_lock_bh(&ap_queues_lock);
 914	if (is_queue_dev(dev))
 915		hash_del(&to_ap_queue(dev)->hnode);
 916	spin_unlock_bh(&ap_queues_lock);
 917	ap_dev->drv = NULL;
 918
 919	put_device(dev);
 920
 921	return 0;
 922}
 923
 924struct ap_queue *ap_get_qdev(ap_qid_t qid)
 925{
 926	int bkt;
 927	struct ap_queue *aq;
 928
 929	spin_lock_bh(&ap_queues_lock);
 930	hash_for_each(ap_queues, bkt, aq, hnode) {
 931		if (aq->qid == qid) {
 932			get_device(&aq->ap_dev.device);
 933			spin_unlock_bh(&ap_queues_lock);
 934			return aq;
 935		}
 936	}
 937	spin_unlock_bh(&ap_queues_lock);
 938
 939	return NULL;
 940}
 941EXPORT_SYMBOL(ap_get_qdev);
 942
 943int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
 944		       char *name)
 945{
 946	struct device_driver *drv = &ap_drv->driver;
 947
 948	drv->bus = &ap_bus_type;
 
 
 949	drv->owner = owner;
 950	drv->name = name;
 951	return driver_register(drv);
 952}
 953EXPORT_SYMBOL(ap_driver_register);
 954
 955void ap_driver_unregister(struct ap_driver *ap_drv)
 956{
 957	driver_unregister(&ap_drv->driver);
 958}
 959EXPORT_SYMBOL(ap_driver_unregister);
 960
 961void ap_bus_force_rescan(void)
 962{
 963	/* processing a asynchronous bus rescan */
 964	del_timer(&ap_config_timer);
 965	queue_work(system_long_wq, &ap_scan_work);
 966	flush_work(&ap_scan_work);
 967}
 968EXPORT_SYMBOL(ap_bus_force_rescan);
 969
 970/*
 971* A config change has happened, force an ap bus rescan.
 972*/
 973void ap_bus_cfg_chg(void)
 974{
 975	AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
 976
 977	ap_bus_force_rescan();
 978}
 979
 980/*
 981 * hex2bitmap() - parse hex mask string and set bitmap.
 982 * Valid strings are "0x012345678" with at least one valid hex number.
 983 * Rest of the bitmap to the right is padded with 0. No spaces allowed
 984 * within the string, the leading 0x may be omitted.
 985 * Returns the bitmask with exactly the bits set as given by the hex
 986 * string (both in big endian order).
 987 */
 988static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
 989{
 990	int i, n, b;
 991
 992	/* bits needs to be a multiple of 8 */
 993	if (bits & 0x07)
 994		return -EINVAL;
 995
 996	if (str[0] == '0' && str[1] == 'x')
 997		str++;
 998	if (*str == 'x')
 999		str++;
1000
1001	for (i = 0; isxdigit(*str) && i < bits; str++) {
1002		b = hex_to_bin(*str);
1003		for (n = 0; n < 4; n++)
1004			if (b & (0x08 >> n))
1005				set_bit_inv(i + n, bitmap);
1006		i += 4;
1007	}
1008
1009	if (*str == '\n')
1010		str++;
1011	if (*str)
1012		return -EINVAL;
1013	return 0;
1014}
1015
1016/*
1017 * modify_bitmap() - parse bitmask argument and modify an existing
1018 * bit mask accordingly. A concatenation (done with ',') of these
1019 * terms is recognized:
1020 *   +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
1021 * <bitnr> may be any valid number (hex, decimal or octal) in the range
1022 * 0...bits-1; the leading + or - is required. Here are some examples:
1023 *   +0-15,+32,-128,-0xFF
1024 *   -0-255,+1-16,+0x128
1025 *   +1,+2,+3,+4,-5,-7-10
1026 * Returns the new bitmap after all changes have been applied. Every
1027 * positive value in the string will set a bit and every negative value
1028 * in the string will clear a bit. As a bit may be touched more than once,
1029 * the last 'operation' wins:
1030 * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
1031 * cleared again. All other bits are unmodified.
1032 */
1033static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
1034{
1035	int a, i, z;
1036	char *np, sign;
1037
1038	/* bits needs to be a multiple of 8 */
1039	if (bits & 0x07)
1040		return -EINVAL;
1041
1042	while (*str) {
1043		sign = *str++;
1044		if (sign != '+' && sign != '-')
1045			return -EINVAL;
1046		a = z = simple_strtoul(str, &np, 0);
1047		if (str == np || a >= bits)
1048			return -EINVAL;
1049		str = np;
1050		if (*str == '-') {
1051			z = simple_strtoul(++str, &np, 0);
1052			if (str == np || a > z || z >= bits)
1053				return -EINVAL;
1054			str = np;
1055		}
1056		for (i = a; i <= z; i++)
1057			if (sign == '+')
1058				set_bit_inv(i, bitmap);
1059			else
1060				clear_bit_inv(i, bitmap);
1061		while (*str == ',' || *str == '\n')
1062			str++;
1063	}
1064
1065	return 0;
1066}
1067
1068int ap_parse_mask_str(const char *str,
1069		      unsigned long *bitmap, int bits,
1070		      struct mutex *lock)
1071{
1072	unsigned long *newmap, size;
1073	int rc;
1074
1075	/* bits needs to be a multiple of 8 */
1076	if (bits & 0x07)
1077		return -EINVAL;
1078
1079	size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
1080	newmap = kmalloc(size, GFP_KERNEL);
1081	if (!newmap)
1082		return -ENOMEM;
1083	if (mutex_lock_interruptible(lock)) {
1084		kfree(newmap);
1085		return -ERESTARTSYS;
1086	}
1087
1088	if (*str == '+' || *str == '-') {
1089		memcpy(newmap, bitmap, size);
1090		rc = modify_bitmap(str, newmap, bits);
1091	} else {
1092		memset(newmap, 0, size);
1093		rc = hex2bitmap(str, newmap, bits);
1094	}
1095	if (rc == 0)
1096		memcpy(bitmap, newmap, size);
1097	mutex_unlock(lock);
1098	kfree(newmap);
1099	return rc;
1100}
1101EXPORT_SYMBOL(ap_parse_mask_str);
1102
1103/*
1104 * AP bus attributes.
1105 */
1106
1107static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
1108{
1109	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
1110}
1111
1112static ssize_t ap_domain_store(struct bus_type *bus,
1113			       const char *buf, size_t count)
1114{
1115	int domain;
1116
1117	if (sscanf(buf, "%i\n", &domain) != 1 ||
1118	    domain < 0 || domain > ap_max_domain_id ||
1119	    !test_bit_inv(domain, ap_perms.aqm))
1120		return -EINVAL;
1121
1122	spin_lock_bh(&ap_domain_lock);
1123	ap_domain_index = domain;
1124	spin_unlock_bh(&ap_domain_lock);
1125
1126	AP_DBF_INFO("stored new default domain=%d\n", domain);
1127
1128	return count;
1129}
1130
1131static BUS_ATTR_RW(ap_domain);
1132
1133static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1134{
1135	if (!ap_qci_info)	/* QCI not supported */
1136		return scnprintf(buf, PAGE_SIZE, "not supported\n");
1137
1138	return scnprintf(buf, PAGE_SIZE,
1139			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1140			 ap_qci_info->adm[0], ap_qci_info->adm[1],
1141			 ap_qci_info->adm[2], ap_qci_info->adm[3],
1142			 ap_qci_info->adm[4], ap_qci_info->adm[5],
1143			 ap_qci_info->adm[6], ap_qci_info->adm[7]);
1144}
1145
1146static BUS_ATTR_RO(ap_control_domain_mask);
1147
1148static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
1149{
1150	if (!ap_qci_info)	/* QCI not supported */
1151		return scnprintf(buf, PAGE_SIZE, "not supported\n");
1152
1153	return scnprintf(buf, PAGE_SIZE,
1154			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1155			 ap_qci_info->aqm[0], ap_qci_info->aqm[1],
1156			 ap_qci_info->aqm[2], ap_qci_info->aqm[3],
1157			 ap_qci_info->aqm[4], ap_qci_info->aqm[5],
1158			 ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
1159}
1160
1161static BUS_ATTR_RO(ap_usage_domain_mask);
1162
1163static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
1164{
1165	if (!ap_qci_info)	/* QCI not supported */
1166		return scnprintf(buf, PAGE_SIZE, "not supported\n");
1167
1168	return scnprintf(buf, PAGE_SIZE,
1169			 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1170			 ap_qci_info->apm[0], ap_qci_info->apm[1],
1171			 ap_qci_info->apm[2], ap_qci_info->apm[3],
1172			 ap_qci_info->apm[4], ap_qci_info->apm[5],
1173			 ap_qci_info->apm[6], ap_qci_info->apm[7]);
1174}
1175
1176static BUS_ATTR_RO(ap_adapter_mask);
1177
1178static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
1179{
1180	return scnprintf(buf, PAGE_SIZE, "%d\n",
1181			 ap_irq_flag ? 1 : 0);
1182}
1183
1184static BUS_ATTR_RO(ap_interrupts);
1185
1186static ssize_t config_time_show(struct bus_type *bus, char *buf)
1187{
1188	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
1189}
1190
1191static ssize_t config_time_store(struct bus_type *bus,
1192				 const char *buf, size_t count)
1193{
1194	int time;
1195
1196	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
1197		return -EINVAL;
1198	ap_config_time = time;
1199	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
 
 
 
 
1200	return count;
1201}
1202
1203static BUS_ATTR_RW(config_time);
1204
1205static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
1206{
1207	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1208}
1209
1210static ssize_t poll_thread_store(struct bus_type *bus,
1211				 const char *buf, size_t count)
1212{
1213	int flag, rc;
1214
1215	if (sscanf(buf, "%d\n", &flag) != 1)
1216		return -EINVAL;
1217	if (flag) {
1218		rc = ap_poll_thread_start();
1219		if (rc)
1220			count = rc;
1221	} else
 
1222		ap_poll_thread_stop();
1223	return count;
1224}
1225
1226static BUS_ATTR_RW(poll_thread);
1227
1228static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
1229{
1230	return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1231}
1232
1233static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1234				  size_t count)
1235{
1236	unsigned long long time;
1237	ktime_t hr_time;
1238
1239	/* 120 seconds = maximum poll interval */
1240	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
1241	    time > 120000000000ULL)
1242		return -EINVAL;
1243	poll_timeout = time;
1244	hr_time = poll_timeout;
1245
1246	spin_lock_bh(&ap_poll_timer_lock);
1247	hrtimer_cancel(&ap_poll_timer);
1248	hrtimer_set_expires(&ap_poll_timer, hr_time);
1249	hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1250	spin_unlock_bh(&ap_poll_timer_lock);
1251
 
 
 
 
 
1252	return count;
1253}
1254
1255static BUS_ATTR_RW(poll_timeout);
 
 
 
 
 
 
 
 
 
1256
1257static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
 
 
 
 
 
1258{
1259	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1260}
1261
1262static BUS_ATTR_RO(ap_max_domain_id);
1263
1264static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
 
 
 
 
1265{
1266	return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
1267}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268
1269static BUS_ATTR_RO(ap_max_adapter_id);
 
 
 
 
 
1270
1271static ssize_t apmask_show(struct bus_type *bus, char *buf)
1272{
1273	int rc;
1274
1275	if (mutex_lock_interruptible(&ap_perms_mutex))
1276		return -ERESTARTSYS;
1277	rc = scnprintf(buf, PAGE_SIZE,
1278		       "0x%016lx%016lx%016lx%016lx\n",
1279		       ap_perms.apm[0], ap_perms.apm[1],
1280		       ap_perms.apm[2], ap_perms.apm[3]);
1281	mutex_unlock(&ap_perms_mutex);
 
 
 
 
 
 
1282
 
 
 
1283	return rc;
1284}
1285
1286static ssize_t apmask_store(struct bus_type *bus, const char *buf,
1287			    size_t count)
1288{
1289	int rc;
 
 
1290
1291	rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
1292	if (rc)
1293		return rc;
1294
1295	ap_bus_revise_bindings();
1296
1297	return count;
 
 
 
1298}
1299
1300static BUS_ATTR_RW(apmask);
1301
1302static ssize_t aqmask_show(struct bus_type *bus, char *buf)
1303{
1304	int rc;
1305
1306	if (mutex_lock_interruptible(&ap_perms_mutex))
1307		return -ERESTARTSYS;
1308	rc = scnprintf(buf, PAGE_SIZE,
1309		       "0x%016lx%016lx%016lx%016lx\n",
1310		       ap_perms.aqm[0], ap_perms.aqm[1],
1311		       ap_perms.aqm[2], ap_perms.aqm[3]);
1312	mutex_unlock(&ap_perms_mutex);
1313
1314	return rc;
1315}
1316
1317static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
1318			    size_t count)
1319{
1320	int rc;
 
 
 
 
 
1321
1322	rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
1323	if (rc)
1324		return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1325
1326	ap_bus_revise_bindings();
1327
1328	return count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329}
1330
1331static BUS_ATTR_RW(aqmask);
1332
1333static ssize_t scans_show(struct bus_type *bus, char *buf)
1334{
1335	return scnprintf(buf, PAGE_SIZE, "%llu\n",
1336			 atomic64_read(&ap_scan_bus_count));
 
1337}
1338
1339static BUS_ATTR_RO(scans);
1340
1341static ssize_t bindings_show(struct bus_type *bus, char *buf)
 
 
 
1342{
1343	int rc;
1344	unsigned int apqns, n;
1345
1346	ap_calc_bound_apqns(&apqns, &n);
1347	if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
1348		rc = scnprintf(buf, PAGE_SIZE, "%u/%u (complete)\n", n, apqns);
1349	else
1350		rc = scnprintf(buf, PAGE_SIZE, "%u/%u\n", n, apqns);
1351
1352	return rc;
 
 
 
 
 
1353}
1354
1355static BUS_ATTR_RO(bindings);
1356
1357static struct attribute *ap_bus_attrs[] = {
1358	&bus_attr_ap_domain.attr,
1359	&bus_attr_ap_control_domain_mask.attr,
1360	&bus_attr_ap_usage_domain_mask.attr,
1361	&bus_attr_ap_adapter_mask.attr,
1362	&bus_attr_config_time.attr,
1363	&bus_attr_poll_thread.attr,
1364	&bus_attr_ap_interrupts.attr,
1365	&bus_attr_poll_timeout.attr,
1366	&bus_attr_ap_max_domain_id.attr,
1367	&bus_attr_ap_max_adapter_id.attr,
1368	&bus_attr_apmask.attr,
1369	&bus_attr_aqmask.attr,
1370	&bus_attr_scans.attr,
1371	&bus_attr_bindings.attr,
1372	NULL,
1373};
1374ATTRIBUTE_GROUPS(ap_bus);
1375
1376static struct bus_type ap_bus_type = {
1377	.name = "ap",
1378	.bus_groups = ap_bus_groups,
1379	.match = &ap_bus_match,
1380	.uevent = &ap_uevent,
1381	.probe = ap_device_probe,
1382	.remove = ap_device_remove,
1383};
1384
1385/**
1386 * ap_select_domain(): Select an AP domain if possible and we haven't
1387 * already done so before.
 
 
 
 
1388 */
1389static void ap_select_domain(void)
1390{
1391	struct ap_queue_status status;
1392	int card, dom;
1393
1394	/*
1395	 * Choose the default domain. Either the one specified with
1396	 * the "domain=" parameter or the first domain with at least
1397	 * one valid APQN.
1398	 */
1399	spin_lock_bh(&ap_domain_lock);
1400	if (ap_domain_index >= 0) {
1401		/* Domain has already been selected. */
1402		goto out;
1403	}
1404	for (dom = 0; dom <= ap_max_domain_id; dom++) {
1405		if (!ap_test_config_usage_domain(dom) ||
1406		    !test_bit_inv(dom, ap_perms.aqm))
1407			continue;
1408		for (card = 0; card <= ap_max_adapter_id; card++) {
1409			if (!ap_test_config_card_id(card) ||
1410			    !test_bit_inv(card, ap_perms.apm))
1411				continue;
1412			status = ap_test_queue(AP_MKQID(card, dom),
1413					       ap_apft_available(),
1414					       NULL);
1415			if (status.response_code == AP_RESPONSE_NORMAL)
1416				break;
1417		}
1418		if (card <= ap_max_adapter_id)
1419			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1420	}
1421	if (dom <= ap_max_domain_id) {
1422		ap_domain_index = dom;
1423		AP_DBF_INFO("%s new default domain is %d\n",
1424			    __func__, ap_domain_index);
1425	}
1426out:
1427	spin_unlock_bh(&ap_domain_lock);
1428}
1429
1430/*
1431 * This function checks the type and returns either 0 for not
1432 * supported or the highest compatible type value (which may
1433 * include the input type value).
1434 */
1435static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
1436{
1437	int comp_type = 0;
1438
1439	/* < CEX2A is not supported */
1440	if (rawtype < AP_DEVICE_TYPE_CEX2A) {
1441		AP_DBF_WARN("get_comp_type queue=%02x.%04x unsupported type %d\n",
1442			    AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
 
 
1443		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444	}
1445	/* up to CEX7 known and fully supported */
1446	if (rawtype <= AP_DEVICE_TYPE_CEX7)
1447		return rawtype;
1448	/*
1449	 * unknown new type > CEX7, check for compatibility
1450	 * to the highest known and supported type which is
1451	 * currently CEX7 with the help of the QACT function.
1452	 */
1453	if (ap_qact_available()) {
1454		struct ap_queue_status status;
1455		union ap_qact_ap_info apinfo = {0};
1456
1457		apinfo.mode = (func >> 26) & 0x07;
1458		apinfo.cat = AP_DEVICE_TYPE_CEX7;
1459		status = ap_qact(qid, 0, &apinfo);
1460		if (status.response_code == AP_RESPONSE_NORMAL
1461		    && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
1462		    && apinfo.cat <= AP_DEVICE_TYPE_CEX7)
1463			comp_type = apinfo.cat;
1464	}
1465	if (!comp_type)
1466		AP_DBF_WARN("get_comp_type queue=%02x.%04x unable to map type %d\n",
1467			    AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
1468	else if (comp_type != rawtype)
1469		AP_DBF_INFO("get_comp_type queue=%02x.%04x map type %d to %d\n",
1470			    AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1471			    rawtype, comp_type);
1472	return comp_type;
1473}
1474
1475/*
1476 * Helper function to be used with bus_find_dev
1477 * matches for the card device with the given id
 
 
 
 
 
 
1478 */
1479static int __match_card_device_with_id(struct device *dev, const void *data)
1480{
1481	return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *) data;
 
 
 
 
 
1482}
1483
1484/*
1485 * Helper function to be used with bus_find_dev
1486 * matches for the queue device with a given qid
 
 
 
1487 */
1488static int __match_queue_device_with_qid(struct device *dev, const void *data)
1489{
1490	return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491}
1492
1493/*
1494 * Helper function to be used with bus_find_dev
1495 * matches any queue device with given queue id
1496 */
1497static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
1498{
1499	return is_queue_dev(dev)
1500		&& AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501}
 
1502
1503/*
1504 * Helper function for ap_scan_bus().
1505 * Remove card device and associated queue devices.
 
 
1506 */
1507static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
1508{
1509	bus_for_each_dev(&ap_bus_type, NULL,
1510			 (void *)(long) ac->id,
1511			 __ap_queue_devices_with_id_unregister);
1512	device_unregister(&ac->ap_dev.device);
1513}
1514
1515/*
1516 * Helper function for ap_scan_bus().
1517 * Does the scan bus job for all the domains within
1518 * a valid adapter given by an ap_card ptr.
 
 
1519 */
1520static inline void ap_scan_domains(struct ap_card *ac)
1521{
1522	bool decfg;
1523	ap_qid_t qid;
1524	unsigned int func;
1525	struct device *dev;
1526	struct ap_queue *aq;
1527	int rc, dom, depth, type, ml;
1528
1529	/*
1530	 * Go through the configuration for the domains and compare them
1531	 * to the existing queue devices. Also take care of the config
1532	 * and error state for the queue devices.
1533	 */
1534
1535	for (dom = 0; dom <= ap_max_domain_id; dom++) {
1536		qid = AP_MKQID(ac->id, dom);
1537		dev = bus_find_device(&ap_bus_type, NULL,
1538				      (void *)(long) qid,
1539				      __match_queue_device_with_qid);
1540		aq = dev ? to_ap_queue(dev) : NULL;
1541		if (!ap_test_config_usage_domain(dom)) {
1542			if (dev) {
1543				AP_DBF_INFO("%s(%d,%d) not in config any more, rm queue device\n",
1544					    __func__, ac->id, dom);
1545				device_unregister(dev);
1546				put_device(dev);
1547			}
1548			continue;
1549		}
1550		/* domain is valid, get info from this APQN */
1551		if (!ap_queue_info(qid, &type, &func, &depth, &ml, &decfg)) {
1552			if (aq) {
1553				AP_DBF_INFO(
1554					"%s(%d,%d) ap_queue_info() not successful, rm queue device\n",
1555					__func__, ac->id, dom);
1556				device_unregister(dev);
1557				put_device(dev);
1558			}
1559			continue;
1560		}
1561		/* if no queue device exists, create a new one */
1562		if (!aq) {
1563			aq = ap_queue_create(qid, ac->ap_dev.device_type);
1564			if (!aq) {
1565				AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
1566					    __func__, ac->id, dom);
1567				continue;
1568			}
1569			aq->card = ac;
1570			aq->config = !decfg;
1571			dev = &aq->ap_dev.device;
1572			dev->bus = &ap_bus_type;
1573			dev->parent = &ac->ap_dev.device;
1574			dev_set_name(dev, "%02x.%04x", ac->id, dom);
1575			/* register queue device */
1576			rc = device_register(dev);
1577			if (rc) {
1578				AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
1579					    __func__, ac->id, dom);
1580				goto put_dev_and_continue;
1581			}
1582			/* get it and thus adjust reference counter */
1583			get_device(dev);
1584			if (decfg)
1585				AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n",
1586					    __func__, ac->id, dom);
1587			else
1588				AP_DBF_INFO("%s(%d,%d) new queue device created\n",
1589					    __func__, ac->id, dom);
1590			goto put_dev_and_continue;
1591		}
1592		/* Check config state on the already existing queue device */
1593		spin_lock_bh(&aq->lock);
1594		if (decfg && aq->config) {
1595			/* config off this queue device */
1596			aq->config = false;
1597			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
1598				aq->dev_state = AP_DEV_STATE_ERROR;
1599				aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
1600			}
1601			spin_unlock_bh(&aq->lock);
1602			AP_DBF_INFO("%s(%d,%d) queue device config off\n",
1603				    __func__, ac->id, dom);
1604			ap_send_config_uevent(&aq->ap_dev, aq->config);
1605			/* 'receive' pending messages with -EAGAIN */
1606			ap_flush_queue(aq);
1607			goto put_dev_and_continue;
1608		}
1609		if (!decfg && !aq->config) {
1610			/* config on this queue device */
1611			aq->config = true;
1612			if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
1613				aq->dev_state = AP_DEV_STATE_OPERATING;
1614				aq->sm_state = AP_SM_STATE_RESET_START;
1615			}
1616			spin_unlock_bh(&aq->lock);
1617			AP_DBF_INFO("%s(%d,%d) queue device config on\n",
1618				    __func__, ac->id, dom);
1619			ap_send_config_uevent(&aq->ap_dev, aq->config);
1620			goto put_dev_and_continue;
1621		}
1622		/* handle other error states */
1623		if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
1624			spin_unlock_bh(&aq->lock);
1625			/* 'receive' pending messages with -EAGAIN */
1626			ap_flush_queue(aq);
1627			/* re-init (with reset) the queue device */
1628			ap_queue_init_state(aq);
1629			AP_DBF_INFO("%s(%d,%d) queue device reinit enforced\n",
1630				    __func__, ac->id, dom);
1631			goto put_dev_and_continue;
1632		}
1633		spin_unlock_bh(&aq->lock);
1634put_dev_and_continue:
1635		put_device(dev);
1636	}
 
1637}
1638
1639/*
1640 * Helper function for ap_scan_bus().
1641 * Does the scan bus job for the given adapter id.
 
 
 
 
1642 */
1643static inline void ap_scan_adapter(int ap)
1644{
1645	bool decfg;
1646	ap_qid_t qid;
1647	unsigned int func;
1648	struct device *dev;
1649	struct ap_card *ac;
1650	int rc, dom, depth, type, comp_type, ml;
1651
1652	/* Is there currently a card device for this adapter ? */
1653	dev = bus_find_device(&ap_bus_type, NULL,
1654			      (void *)(long) ap,
1655			      __match_card_device_with_id);
1656	ac = dev ? to_ap_card(dev) : NULL;
1657
1658	/* Adapter not in configuration ? */
1659	if (!ap_test_config_card_id(ap)) {
1660		if (ac) {
1661			AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devices\n",
1662				    __func__, ap);
1663			ap_scan_rm_card_dev_and_queue_devs(ac);
1664			put_device(dev);
1665		}
1666		return;
1667	}
1668
1669	/*
1670	 * Adapter ap is valid in the current configuration. So do some checks:
1671	 * If no card device exists, build one. If a card device exists, check
1672	 * for type and functions changed. For all this we need to find a valid
1673	 * APQN first.
1674	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675
1676	for (dom = 0; dom <= ap_max_domain_id; dom++)
1677		if (ap_test_config_usage_domain(dom)) {
1678			qid = AP_MKQID(ap, dom);
1679			if (ap_queue_info(qid, &type, &func,
1680					  &depth, &ml, &decfg))
1681				break;
1682		}
1683	if (dom > ap_max_domain_id) {
1684		/* Could not find a valid APQN for this adapter */
1685		if (ac) {
1686			AP_DBF_INFO(
1687				"%s(%d) no type info (no APQN found), rm card and queue devices\n",
1688				__func__, ap);
1689			ap_scan_rm_card_dev_and_queue_devs(ac);
1690			put_device(dev);
1691		} else {
1692			AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
1693				   __func__, ap);
 
 
 
 
 
 
1694		}
1695		return;
1696	}
1697	if (!type) {
1698		/* No apdater type info available, an unusable adapter */
1699		if (ac) {
1700			AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devices\n",
1701				    __func__, ap);
1702			ap_scan_rm_card_dev_and_queue_devs(ac);
1703			put_device(dev);
1704		} else {
1705			AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
1706				   __func__, ap);
 
 
 
 
1707		}
1708		return;
1709	}
 
 
 
 
1710
1711	if (ac) {
1712		/* Check APQN against existing card device for changes */
1713		if (ac->raw_hwtype != type) {
1714			AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devices\n",
1715				    __func__, ap, type);
1716			ap_scan_rm_card_dev_and_queue_devs(ac);
1717			put_device(dev);
1718			ac = NULL;
1719		} else if (ac->functions != func) {
1720			AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devices\n",
1721				    __func__, ap, type);
1722			ap_scan_rm_card_dev_and_queue_devs(ac);
1723			put_device(dev);
1724			ac = NULL;
1725		} else {
1726			if (decfg && ac->config) {
1727				ac->config = false;
1728				AP_DBF_INFO("%s(%d) card device config off\n",
1729					    __func__, ap);
1730				ap_send_config_uevent(&ac->ap_dev, ac->config);
1731			}
1732			if (!decfg && !ac->config) {
1733				ac->config = true;
1734				AP_DBF_INFO("%s(%d) card device config on\n",
1735					    __func__, ap);
1736				ap_send_config_uevent(&ac->ap_dev, ac->config);
1737			}
1738		}
1739	}
 
 
 
 
 
1740
1741	if (!ac) {
1742		/* Build a new card device */
1743		comp_type = ap_get_compatible_type(qid, type, func);
1744		if (!comp_type) {
1745			AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
1746				    __func__, ap, type);
1747			return;
1748		}
1749		ac = ap_card_create(ap, depth, type, comp_type, func, ml);
1750		if (!ac) {
1751			AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
1752				    __func__, ap);
1753			return;
1754		}
1755		ac->config = !decfg;
1756		dev = &ac->ap_dev.device;
1757		dev->bus = &ap_bus_type;
1758		dev->parent = ap_root_device;
1759		dev_set_name(dev, "card%02x", ap);
1760		/* maybe enlarge ap_max_msg_size to support this card */
1761		if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
1762			atomic_set(&ap_max_msg_size, ac->maxmsgsize);
1763			AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
1764				    __func__, ap, atomic_read(&ap_max_msg_size));
1765		}
1766		/* Register the new card device with AP bus */
1767		rc = device_register(dev);
1768		if (rc) {
1769			AP_DBF_WARN("%s(%d) device_register() failed\n",
1770				    __func__, ap);
1771			put_device(dev);
1772			return;
1773		}
1774		/* get it and thus adjust reference counter */
1775		get_device(dev);
1776		if (decfg)
1777			AP_DBF_INFO("%s(%d) new (decfg) card device type=%d func=0x%08x created\n",
1778				    __func__, ap, type, func);
1779		else
1780			AP_DBF_INFO("%s(%d) new card device type=%d func=0x%08x created\n",
1781				    __func__, ap, type, func);
1782	}
1783
1784	/* Verify the domains and the queue devices for this card */
1785	ap_scan_domains(ac);
1786
1787	/* release the card device */
1788	put_device(&ac->ap_dev.device);
1789}
1790
1791/**
1792 * ap_scan_bus(): Scan the AP bus for new devices
1793 * Runs periodically, workqueue timer (ap_config_time)
 
 
1794 */
1795static void ap_scan_bus(struct work_struct *unused)
1796{
1797	int ap;
1798
1799	ap_fetch_qci_info(ap_qci_info);
1800	ap_select_domain();
1801
1802	AP_DBF_DBG("%s running\n", __func__);
1803
1804	/* loop over all possible adapters */
1805	for (ap = 0; ap <= ap_max_adapter_id; ap++)
1806		ap_scan_adapter(ap);
1807
1808	/* check if there is at least one queue available with default domain */
1809	if (ap_domain_index >= 0) {
1810		struct device *dev =
1811			bus_find_device(&ap_bus_type, NULL,
1812					(void *)(long) ap_domain_index,
1813					__match_queue_device_with_queue_id);
1814		if (dev)
1815			put_device(dev);
1816		else
1817			AP_DBF_INFO("no queue device with default domain %d available\n",
1818				    ap_domain_index);
1819	}
1820
1821	if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
1822		AP_DBF(DBF_DEBUG, "%s init scan complete\n", __func__);
1823		ap_send_init_scan_done_uevent();
1824		ap_check_bindings_complete();
1825	}
1826
1827	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1828}
1829
1830static void ap_config_timeout(struct timer_list *unused)
1831{
1832	queue_work(system_long_wq, &ap_scan_work);
 
 
 
 
1833}
1834
1835static int __init ap_debug_init(void)
1836{
1837	ap_dbf_info = debug_register("ap", 1, 1,
1838				     DBF_MAX_SPRINTF_ARGS * sizeof(long));
1839	debug_register_view(ap_dbf_info, &debug_sprintf_view);
1840	debug_set_level(ap_dbf_info, DBF_ERR);
1841
1842	return 0;
 
 
1843}
1844
1845static void __init ap_perms_init(void)
1846{
1847	/* all resources useable if no kernel parameter string given */
1848	memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
1849	memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
1850	memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
1851
1852	/* apm kernel parameter string */
1853	if (apm_str) {
1854		memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
1855		ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
1856				  &ap_perms_mutex);
1857	}
1858
1859	/* aqm kernel parameter string */
1860	if (aqm_str) {
1861		memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
1862		ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
1863				  &ap_perms_mutex);
1864	}
1865}
1866
1867/**
1868 * ap_module_init(): The module initialization code.
1869 *
1870 * Initializes the module.
1871 */
1872static int __init ap_module_init(void)
1873{
1874	int rc;
1875
1876	rc = ap_debug_init();
1877	if (rc)
1878		return rc;
 
 
 
 
 
 
 
1879
1880	if (!ap_instructions_available()) {
1881		pr_warn("The hardware system does not support AP instructions\n");
 
1882		return -ENODEV;
1883	}
1884
1885	/* init ap_queue hashtable */
1886	hash_init(ap_queues);
1887
1888	/* set up the AP permissions (ioctls, ap and aq masks) */
1889	ap_perms_init();
1890
1891	/* Get AP configuration data if available */
1892	ap_init_qci_info();
1893
1894	/* check default domain setting */
1895	if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
1896	    (ap_domain_index >= 0 &&
1897	     !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
1898		pr_warn("%d is not a valid cryptographic domain\n",
1899			ap_domain_index);
1900		ap_domain_index = -1;
1901	}
1902
1903	/* enable interrupts if available */
1904	if (ap_interrupts_available()) {
1905		rc = register_adapter_interrupt(&ap_airq);
1906		ap_irq_flag = (rc == 0);
1907	}
1908
1909	/* Create /sys/bus/ap. */
1910	rc = bus_register(&ap_bus_type);
1911	if (rc)
1912		goto out;
 
 
 
 
 
1913
1914	/* Create /sys/devices/ap. */
1915	ap_root_device = root_device_register("ap");
1916	rc = PTR_ERR_OR_ZERO(ap_root_device);
1917	if (rc)
1918		goto out_bus;
1919	ap_root_device->bus = &ap_bus_type;
 
 
 
 
 
 
 
 
1920
1921	/* Setup the AP bus rescan timer. */
1922	timer_setup(&ap_config_timer, ap_config_timeout, 0);
 
 
 
 
1923
1924	/*
1925	 * Setup the high resultion poll timer.
1926	 * If we are running under z/VM adjust polling to z/VM polling rate.
1927	 */
1928	if (MACHINE_IS_VM)
1929		poll_timeout = 1500000;
 
1930	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1931	ap_poll_timer.function = ap_poll_timeout;
1932
1933	/* Start the low priority AP bus poll thread. */
1934	if (ap_thread_flag) {
1935		rc = ap_poll_thread_start();
1936		if (rc)
1937			goto out_work;
1938	}
1939
1940	queue_work(system_long_wq, &ap_scan_work);
1941
1942	return 0;
1943
1944out_work:
 
1945	hrtimer_cancel(&ap_poll_timer);
 
 
1946	root_device_unregister(ap_root_device);
1947out_bus:
 
 
1948	bus_unregister(&ap_bus_type);
1949out:
1950	if (ap_irq_flag)
1951		unregister_adapter_interrupt(&ap_airq);
1952	kfree(ap_qci_info);
 
 
1953	return rc;
1954}
1955device_initcall(ap_module_init);