Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright IBM Corp. 2016, 2023
   4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 *
   6 * Adjunct processor bus, queue related code.
   7 */
   8
   9#define KMSG_COMPONENT "ap"
  10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11
  12#include <linux/init.h>
  13#include <linux/slab.h>
  14#include <asm/facility.h>
  15
  16#include "ap_bus.h"
  17#include "ap_debug.h"
  18
  19static void __ap_flush_queue(struct ap_queue *aq);
  20
  21/*
  22 * some AP queue helper functions
  23 */
  24
  25static inline bool ap_q_supports_bind(struct ap_queue *aq)
  26{
  27	return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
  28}
  29
  30static inline bool ap_q_supports_assoc(struct ap_queue *aq)
  31{
  32	return aq->card->hwinfo.ep11;
  33}
  34
  35static inline bool ap_q_needs_bind(struct ap_queue *aq)
  36{
  37	return ap_q_supports_bind(aq) && ap_sb_available();
  38}
  39
  40/**
  41 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
  42 * @aq: The AP queue
  43 * @ind: the notification indicator byte
  44 *
  45 * Enables interruption on AP queue via ap_aqic(). Based on the return
  46 * value it waits a while and tests the AP queue if interrupts
  47 * have been switched on using ap_test_queue().
  48 */
  49static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
  50{
  51	union ap_qirq_ctrl qirqctrl = { .value = 0 };
  52	struct ap_queue_status status;
 
  53
  54	qirqctrl.ir = 1;
  55	qirqctrl.isc = AP_ISC;
  56	status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
  57	if (status.async)
  58		return -EPERM;
  59	switch (status.response_code) {
  60	case AP_RESPONSE_NORMAL:
  61	case AP_RESPONSE_OTHERWISE_CHANGED:
  62		return 0;
  63	case AP_RESPONSE_Q_NOT_AVAIL:
  64	case AP_RESPONSE_DECONFIGURED:
  65	case AP_RESPONSE_CHECKSTOPPED:
  66	case AP_RESPONSE_INVALID_ADDRESS:
  67		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
  68		       AP_QID_CARD(aq->qid),
  69		       AP_QID_QUEUE(aq->qid));
  70		return -EOPNOTSUPP;
  71	case AP_RESPONSE_RESET_IN_PROGRESS:
  72	case AP_RESPONSE_BUSY:
  73	default:
  74		return -EBUSY;
  75	}
  76}
  77
  78/**
  79 * __ap_send(): Send message to adjunct processor queue.
  80 * @qid: The AP queue number
  81 * @psmid: The program supplied message identifier
  82 * @msg: The message text
  83 * @msglen: The message length
  84 * @special: Special Bit
  85 *
  86 * Returns AP queue status structure.
  87 * Condition code 1 on NQAP can't happen because the L bit is 1.
  88 * Condition code 2 on NQAP also means the send is incomplete,
  89 * because a segment boundary was reached. The NQAP is repeated.
  90 */
  91static inline struct ap_queue_status
  92__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
  93	  int special)
  94{
  95	if (special)
  96		qid |= 0x400000UL;
  97	return ap_nqap(qid, psmid, msg, msglen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  98}
 
  99
 100/* State machine definitions and helpers */
 101
 102static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
 103{
 104	return AP_SM_WAIT_NONE;
 105}
 106
 107/**
 108 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
 109 *	not change the state of the device.
 110 * @aq: pointer to the AP queue
 111 *
 112 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
 113 */
 114static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
 115{
 116	struct ap_queue_status status;
 117	struct ap_message *ap_msg;
 118	bool found = false;
 119	size_t reslen;
 120	unsigned long resgr0 = 0;
 121	int parts = 0;
 122
 123	/*
 124	 * DQAP loop until response code and resgr0 indicate that
 125	 * the msg is totally received. As we use the very same buffer
 126	 * the msg is overwritten with each invocation. That's intended
 127	 * and the receiver of the msg is informed with a msg rc code
 128	 * of EMSGSIZE in such a case.
 129	 */
 130	do {
 131		status = ap_dqap(aq->qid, &aq->reply->psmid,
 132				 aq->reply->msg, aq->reply->bufsize,
 133				 &aq->reply->len, &reslen, &resgr0);
 134		parts++;
 135	} while (status.response_code == 0xFF && resgr0 != 0);
 136
 
 
 137	switch (status.response_code) {
 138	case AP_RESPONSE_NORMAL:
 139		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
 140		if (!status.queue_empty && !aq->queue_count)
 141			aq->queue_count++;
 142		if (aq->queue_count > 0)
 143			mod_timer(&aq->timeout,
 144				  jiffies + aq->request_timeout);
 145		list_for_each_entry(ap_msg, &aq->pendingq, list) {
 146			if (ap_msg->psmid != aq->reply->psmid)
 147				continue;
 148			list_del_init(&ap_msg->list);
 149			aq->pendingq_count--;
 150			if (parts > 1) {
 151				ap_msg->rc = -EMSGSIZE;
 152				ap_msg->receive(aq, ap_msg, NULL);
 153			} else {
 154				ap_msg->receive(aq, ap_msg, aq->reply);
 155			}
 156			found = true;
 157			break;
 158		}
 159		if (!found) {
 160			AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
 161				    __func__, aq->reply->psmid,
 162				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 163		}
 164		fallthrough;
 165	case AP_RESPONSE_NO_PENDING_REPLY:
 166		if (!status.queue_empty || aq->queue_count <= 0)
 167			break;
 168		/* The card shouldn't forget requests but who knows. */
 169		aq->queue_count = 0;
 170		list_splice_init(&aq->pendingq, &aq->requestq);
 171		aq->requestq_count += aq->pendingq_count;
 172		aq->pendingq_count = 0;
 173		break;
 174	default:
 175		break;
 176	}
 177	return status;
 178}
 179
 180/**
 181 * ap_sm_read(): Receive pending reply messages from an AP queue.
 182 * @aq: pointer to the AP queue
 183 *
 184 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
 185 */
 186static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
 187{
 188	struct ap_queue_status status;
 189
 190	if (!aq->reply)
 191		return AP_SM_WAIT_NONE;
 192	status = ap_sm_recv(aq);
 193	if (status.async)
 194		return AP_SM_WAIT_NONE;
 195	switch (status.response_code) {
 196	case AP_RESPONSE_NORMAL:
 197		if (aq->queue_count > 0) {
 198			aq->sm_state = AP_SM_STATE_WORKING;
 199			return AP_SM_WAIT_AGAIN;
 200		}
 201		aq->sm_state = AP_SM_STATE_IDLE;
 202		break;
 203	case AP_RESPONSE_NO_PENDING_REPLY:
 204		if (aq->queue_count > 0)
 205			return status.irq_enabled ?
 206				AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
 207		aq->sm_state = AP_SM_STATE_IDLE;
 208		break;
 209	default:
 210		aq->dev_state = AP_DEV_STATE_ERROR;
 211		aq->last_err_rc = status.response_code;
 212		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 213			    __func__, status.response_code,
 214			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 215		return AP_SM_WAIT_NONE;
 216	}
 217	/* Check and maybe enable irq support (again) on this queue */
 218	if (!status.irq_enabled && status.queue_empty) {
 219		void *lsi_ptr = ap_airq_ptr();
 220
 221		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
 222			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
 223			return AP_SM_WAIT_AGAIN;
 224		}
 225	}
 226	return AP_SM_WAIT_NONE;
 227}
 228
 229/**
 230 * ap_sm_write(): Send messages from the request queue to an AP queue.
 231 * @aq: pointer to the AP queue
 232 *
 233 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
 234 */
 235static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
 236{
 237	struct ap_queue_status status;
 238	struct ap_message *ap_msg;
 239	ap_qid_t qid = aq->qid;
 240
 241	if (aq->requestq_count <= 0)
 242		return AP_SM_WAIT_NONE;
 243
 244	/* Start the next request on the queue. */
 245	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
 246	status = __ap_send(qid, ap_msg->psmid,
 247			   ap_msg->msg, ap_msg->len,
 248			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
 249	if (status.async)
 250		return AP_SM_WAIT_NONE;
 251	switch (status.response_code) {
 252	case AP_RESPONSE_NORMAL:
 253		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
 254		if (aq->queue_count == 1)
 255			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
 256		list_move_tail(&ap_msg->list, &aq->pendingq);
 257		aq->requestq_count--;
 258		aq->pendingq_count++;
 259		if (aq->queue_count < aq->card->hwinfo.qd) {
 260			aq->sm_state = AP_SM_STATE_WORKING;
 261			return AP_SM_WAIT_AGAIN;
 262		}
 263		fallthrough;
 264	case AP_RESPONSE_Q_FULL:
 265		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
 266		return status.irq_enabled ?
 267			AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
 268	case AP_RESPONSE_RESET_IN_PROGRESS:
 269		aq->sm_state = AP_SM_STATE_RESET_WAIT;
 270		return AP_SM_WAIT_LOW_TIMEOUT;
 271	case AP_RESPONSE_INVALID_DOMAIN:
 272		AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
 273		fallthrough;
 274	case AP_RESPONSE_MESSAGE_TOO_BIG:
 275	case AP_RESPONSE_REQ_FAC_NOT_INST:
 276		list_del_init(&ap_msg->list);
 277		aq->requestq_count--;
 278		ap_msg->rc = -EINVAL;
 279		ap_msg->receive(aq, ap_msg, NULL);
 280		return AP_SM_WAIT_AGAIN;
 281	default:
 282		aq->dev_state = AP_DEV_STATE_ERROR;
 283		aq->last_err_rc = status.response_code;
 284		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 285			    __func__, status.response_code,
 286			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 287		return AP_SM_WAIT_NONE;
 288	}
 289}
 290
 291/**
 292 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
 293 * @aq: pointer to the AP queue
 294 *
 295 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
 296 */
 297static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
 298{
 299	return min(ap_sm_read(aq), ap_sm_write(aq));
 300}
 301
 302/**
 303 * ap_sm_reset(): Reset an AP queue.
 304 * @aq: The AP queue
 305 *
 306 * Submit the Reset command to an AP queue.
 307 */
 308static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
 309{
 310	struct ap_queue_status status;
 311
 312	status = ap_rapq(aq->qid, aq->rapq_fbit);
 313	if (status.async)
 314		return AP_SM_WAIT_NONE;
 315	switch (status.response_code) {
 316	case AP_RESPONSE_NORMAL:
 317	case AP_RESPONSE_RESET_IN_PROGRESS:
 318		aq->sm_state = AP_SM_STATE_RESET_WAIT;
 319		aq->rapq_fbit = 0;
 320		return AP_SM_WAIT_LOW_TIMEOUT;
 
 
 
 
 
 321	default:
 322		aq->dev_state = AP_DEV_STATE_ERROR;
 323		aq->last_err_rc = status.response_code;
 324		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 325			    __func__, status.response_code,
 326			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 327		return AP_SM_WAIT_NONE;
 328	}
 329}
 330
 331/**
 332 * ap_sm_reset_wait(): Test queue for completion of the reset operation
 333 * @aq: pointer to the AP queue
 334 *
 335 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
 336 */
 337static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
 338{
 339	struct ap_queue_status status;
 340	struct ap_tapq_hwinfo hwinfo;
 341	void *lsi_ptr;
 342
 343	/* Get the status with TAPQ */
 344	status = ap_test_queue(aq->qid, 1, &hwinfo);
 
 
 
 
 345
 346	switch (status.response_code) {
 347	case AP_RESPONSE_NORMAL:
 348		aq->se_bstate = hwinfo.bs;
 349		lsi_ptr = ap_airq_ptr();
 350		if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
 351			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
 352		else
 353			aq->sm_state = (aq->queue_count > 0) ?
 354				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
 355		return AP_SM_WAIT_AGAIN;
 356	case AP_RESPONSE_BUSY:
 357	case AP_RESPONSE_RESET_IN_PROGRESS:
 358		return AP_SM_WAIT_LOW_TIMEOUT;
 359	case AP_RESPONSE_Q_NOT_AVAIL:
 360	case AP_RESPONSE_DECONFIGURED:
 361	case AP_RESPONSE_CHECKSTOPPED:
 362	default:
 363		aq->dev_state = AP_DEV_STATE_ERROR;
 364		aq->last_err_rc = status.response_code;
 365		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 366			    __func__, status.response_code,
 367			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 368		return AP_SM_WAIT_NONE;
 369	}
 370}
 371
 372/**
 373 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
 374 * @aq: pointer to the AP queue
 375 *
 376 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
 377 */
 378static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
 379{
 380	struct ap_queue_status status;
 381
 382	if (aq->queue_count > 0 && aq->reply)
 383		/* Try to read a completed message and get the status */
 384		status = ap_sm_recv(aq);
 385	else
 386		/* Get the status with TAPQ */
 387		status = ap_tapq(aq->qid, NULL);
 388
 389	if (status.irq_enabled == 1) {
 390		/* Irqs are now enabled */
 
 391		aq->sm_state = (aq->queue_count > 0) ?
 392			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
 393	}
 394
 395	switch (status.response_code) {
 396	case AP_RESPONSE_NORMAL:
 397		if (aq->queue_count > 0)
 398			return AP_SM_WAIT_AGAIN;
 399		fallthrough;
 400	case AP_RESPONSE_NO_PENDING_REPLY:
 401		return AP_SM_WAIT_LOW_TIMEOUT;
 402	default:
 403		aq->dev_state = AP_DEV_STATE_ERROR;
 404		aq->last_err_rc = status.response_code;
 405		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 406			    __func__, status.response_code,
 407			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 408		return AP_SM_WAIT_NONE;
 409	}
 410}
 411
 412/**
 413 * ap_sm_assoc_wait(): Test queue for completion of a pending
 414 *		       association request.
 415 * @aq: pointer to the AP queue
 416 */
 417static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
 418{
 419	struct ap_queue_status status;
 420	struct ap_tapq_hwinfo hwinfo;
 421
 422	status = ap_test_queue(aq->qid, 1, &hwinfo);
 423	/* handle asynchronous error on this queue */
 424	if (status.async && status.response_code) {
 425		aq->dev_state = AP_DEV_STATE_ERROR;
 426		aq->last_err_rc = status.response_code;
 427		AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 428			    __func__, status.response_code,
 429			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 430		return AP_SM_WAIT_NONE;
 431	}
 432	if (status.response_code > AP_RESPONSE_BUSY) {
 433		aq->dev_state = AP_DEV_STATE_ERROR;
 434		aq->last_err_rc = status.response_code;
 435		AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 436			    __func__, status.response_code,
 437			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 438		return AP_SM_WAIT_NONE;
 439	}
 440
 441	/* update queue's SE bind state */
 442	aq->se_bstate = hwinfo.bs;
 443
 444	/* check bs bits */
 445	switch (hwinfo.bs) {
 446	case AP_BS_Q_USABLE:
 447		/* association is through */
 448		aq->sm_state = AP_SM_STATE_IDLE;
 449		AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
 450			   __func__, AP_QID_CARD(aq->qid),
 451			   AP_QID_QUEUE(aq->qid), aq->assoc_idx);
 452		return AP_SM_WAIT_NONE;
 453	case AP_BS_Q_USABLE_NO_SECURE_KEY:
 454		/* association still pending */
 455		return AP_SM_WAIT_LOW_TIMEOUT;
 456	default:
 457		/* reset from 'outside' happened or no idea at all */
 458		aq->assoc_idx = ASSOC_IDX_INVALID;
 459		aq->dev_state = AP_DEV_STATE_ERROR;
 460		aq->last_err_rc = status.response_code;
 461		AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
 462			    __func__, hwinfo.bs,
 463			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 464		return AP_SM_WAIT_NONE;
 465	}
 466}
 467
 468/*
 469 * AP state machine jump table
 470 */
 471static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
 472	[AP_SM_STATE_RESET_START] = {
 473		[AP_SM_EVENT_POLL] = ap_sm_reset,
 474		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 475	},
 476	[AP_SM_STATE_RESET_WAIT] = {
 477		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
 478		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 479	},
 480	[AP_SM_STATE_SETIRQ_WAIT] = {
 481		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
 482		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 483	},
 484	[AP_SM_STATE_IDLE] = {
 485		[AP_SM_EVENT_POLL] = ap_sm_write,
 486		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
 487	},
 488	[AP_SM_STATE_WORKING] = {
 489		[AP_SM_EVENT_POLL] = ap_sm_read_write,
 490		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
 491	},
 492	[AP_SM_STATE_QUEUE_FULL] = {
 493		[AP_SM_EVENT_POLL] = ap_sm_read,
 494		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
 495	},
 496	[AP_SM_STATE_ASSOC_WAIT] = {
 497		[AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
 498		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
 
 
 
 
 
 
 
 
 499	},
 500};
 501
 502enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
 503{
 504	if (aq->config && !aq->chkstop &&
 505	    aq->dev_state > AP_DEV_STATE_UNINITIATED)
 506		return ap_jumptable[aq->sm_state][event](aq);
 507	else
 508		return AP_SM_WAIT_NONE;
 509}
 510
 511enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
 512{
 513	enum ap_sm_wait wait;
 514
 515	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
 516		;
 517	return wait;
 518}
 519
 520/*
 521 * AP queue related attributes.
 522 */
 523static ssize_t request_count_show(struct device *dev,
 524				  struct device_attribute *attr,
 525				  char *buf)
 526{
 527	struct ap_queue *aq = to_ap_queue(dev);
 528	bool valid = false;
 529	u64 req_cnt;
 530
 531	spin_lock_bh(&aq->lock);
 532	if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
 533		req_cnt = aq->total_request_count;
 534		valid = true;
 535	}
 536	spin_unlock_bh(&aq->lock);
 537
 538	if (valid)
 539		return sysfs_emit(buf, "%llu\n", req_cnt);
 540	else
 541		return sysfs_emit(buf, "-\n");
 542}
 543
 544static ssize_t request_count_store(struct device *dev,
 545				   struct device_attribute *attr,
 546				   const char *buf, size_t count)
 547{
 548	struct ap_queue *aq = to_ap_queue(dev);
 549
 550	spin_lock_bh(&aq->lock);
 551	aq->total_request_count = 0;
 552	spin_unlock_bh(&aq->lock);
 553
 554	return count;
 555}
 556
 557static DEVICE_ATTR_RW(request_count);
 558
 559static ssize_t requestq_count_show(struct device *dev,
 560				   struct device_attribute *attr, char *buf)
 561{
 562	struct ap_queue *aq = to_ap_queue(dev);
 563	unsigned int reqq_cnt = 0;
 564
 565	spin_lock_bh(&aq->lock);
 566	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
 567		reqq_cnt = aq->requestq_count;
 568	spin_unlock_bh(&aq->lock);
 569	return sysfs_emit(buf, "%d\n", reqq_cnt);
 570}
 571
 572static DEVICE_ATTR_RO(requestq_count);
 573
 574static ssize_t pendingq_count_show(struct device *dev,
 575				   struct device_attribute *attr, char *buf)
 576{
 577	struct ap_queue *aq = to_ap_queue(dev);
 578	unsigned int penq_cnt = 0;
 579
 580	spin_lock_bh(&aq->lock);
 581	if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
 582		penq_cnt = aq->pendingq_count;
 583	spin_unlock_bh(&aq->lock);
 584	return sysfs_emit(buf, "%d\n", penq_cnt);
 585}
 586
 587static DEVICE_ATTR_RO(pendingq_count);
 588
 589static ssize_t reset_show(struct device *dev,
 590			  struct device_attribute *attr, char *buf)
 591{
 592	struct ap_queue *aq = to_ap_queue(dev);
 593	int rc = 0;
 594
 595	spin_lock_bh(&aq->lock);
 596	switch (aq->sm_state) {
 597	case AP_SM_STATE_RESET_START:
 598	case AP_SM_STATE_RESET_WAIT:
 599		rc = sysfs_emit(buf, "Reset in progress.\n");
 600		break;
 601	case AP_SM_STATE_WORKING:
 602	case AP_SM_STATE_QUEUE_FULL:
 603		rc = sysfs_emit(buf, "Reset Timer armed.\n");
 604		break;
 605	default:
 606		rc = sysfs_emit(buf, "No Reset Timer set.\n");
 607	}
 608	spin_unlock_bh(&aq->lock);
 609	return rc;
 610}
 611
 612static ssize_t reset_store(struct device *dev,
 613			   struct device_attribute *attr,
 614			   const char *buf, size_t count)
 615{
 616	struct ap_queue *aq = to_ap_queue(dev);
 617
 618	spin_lock_bh(&aq->lock);
 619	__ap_flush_queue(aq);
 620	aq->sm_state = AP_SM_STATE_RESET_START;
 621	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
 622	spin_unlock_bh(&aq->lock);
 623
 624	AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
 625		    __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 626
 627	return count;
 628}
 629
 630static DEVICE_ATTR_RW(reset);
 631
 632static ssize_t interrupt_show(struct device *dev,
 633			      struct device_attribute *attr, char *buf)
 634{
 635	struct ap_queue *aq = to_ap_queue(dev);
 636	struct ap_queue_status status;
 637	int rc = 0;
 638
 639	spin_lock_bh(&aq->lock);
 640	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
 641		rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
 642	} else {
 643		status = ap_tapq(aq->qid, NULL);
 644		if (status.irq_enabled)
 645			rc = sysfs_emit(buf, "Interrupts enabled.\n");
 646		else
 647			rc = sysfs_emit(buf, "Interrupts disabled.\n");
 648	}
 649	spin_unlock_bh(&aq->lock);
 650
 651	return rc;
 652}
 653
 654static DEVICE_ATTR_RO(interrupt);
 655
 656static ssize_t config_show(struct device *dev,
 657			   struct device_attribute *attr, char *buf)
 658{
 659	struct ap_queue *aq = to_ap_queue(dev);
 660	int rc;
 661
 662	spin_lock_bh(&aq->lock);
 663	rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
 664	spin_unlock_bh(&aq->lock);
 665	return rc;
 666}
 667
 668static DEVICE_ATTR_RO(config);
 669
 670static ssize_t chkstop_show(struct device *dev,
 671			    struct device_attribute *attr, char *buf)
 672{
 673	struct ap_queue *aq = to_ap_queue(dev);
 674	int rc;
 675
 676	spin_lock_bh(&aq->lock);
 677	rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
 678	spin_unlock_bh(&aq->lock);
 679	return rc;
 680}
 681
 682static DEVICE_ATTR_RO(chkstop);
 683
 684static ssize_t ap_functions_show(struct device *dev,
 685				 struct device_attribute *attr, char *buf)
 686{
 687	struct ap_queue *aq = to_ap_queue(dev);
 688	struct ap_queue_status status;
 689	struct ap_tapq_hwinfo hwinfo;
 690
 691	status = ap_test_queue(aq->qid, 1, &hwinfo);
 692	if (status.response_code > AP_RESPONSE_BUSY) {
 693		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
 694			   __func__, status.response_code,
 695			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 696		return -EIO;
 697	}
 698
 699	return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
 700}
 701
 702static DEVICE_ATTR_RO(ap_functions);
 703
 704#ifdef CONFIG_ZCRYPT_DEBUG
 705static ssize_t states_show(struct device *dev,
 706			   struct device_attribute *attr, char *buf)
 707{
 708	struct ap_queue *aq = to_ap_queue(dev);
 709	int rc = 0;
 710
 711	spin_lock_bh(&aq->lock);
 712	/* queue device state */
 713	switch (aq->dev_state) {
 714	case AP_DEV_STATE_UNINITIATED:
 715		rc = sysfs_emit(buf, "UNINITIATED\n");
 716		break;
 717	case AP_DEV_STATE_OPERATING:
 718		rc = sysfs_emit(buf, "OPERATING");
 719		break;
 720	case AP_DEV_STATE_SHUTDOWN:
 721		rc = sysfs_emit(buf, "SHUTDOWN");
 722		break;
 723	case AP_DEV_STATE_ERROR:
 724		rc = sysfs_emit(buf, "ERROR");
 725		break;
 726	default:
 727		rc = sysfs_emit(buf, "UNKNOWN");
 728	}
 729	/* state machine state */
 730	if (aq->dev_state) {
 731		switch (aq->sm_state) {
 732		case AP_SM_STATE_RESET_START:
 733			rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
 734			break;
 735		case AP_SM_STATE_RESET_WAIT:
 736			rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
 737			break;
 738		case AP_SM_STATE_SETIRQ_WAIT:
 739			rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
 740			break;
 741		case AP_SM_STATE_IDLE:
 742			rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
 743			break;
 744		case AP_SM_STATE_WORKING:
 745			rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
 746			break;
 747		case AP_SM_STATE_QUEUE_FULL:
 748			rc += sysfs_emit_at(buf, rc, " [FULL]\n");
 749			break;
 750		case AP_SM_STATE_ASSOC_WAIT:
 751			rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
 752			break;
 753		default:
 754			rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
 755		}
 756	}
 757	spin_unlock_bh(&aq->lock);
 758
 759	return rc;
 760}
 761static DEVICE_ATTR_RO(states);
 762
 763static ssize_t last_err_rc_show(struct device *dev,
 764				struct device_attribute *attr, char *buf)
 765{
 766	struct ap_queue *aq = to_ap_queue(dev);
 767	int rc;
 768
 769	spin_lock_bh(&aq->lock);
 770	rc = aq->last_err_rc;
 771	spin_unlock_bh(&aq->lock);
 772
 773	switch (rc) {
 774	case AP_RESPONSE_NORMAL:
 775		return sysfs_emit(buf, "NORMAL\n");
 776	case AP_RESPONSE_Q_NOT_AVAIL:
 777		return sysfs_emit(buf, "Q_NOT_AVAIL\n");
 778	case AP_RESPONSE_RESET_IN_PROGRESS:
 779		return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
 780	case AP_RESPONSE_DECONFIGURED:
 781		return sysfs_emit(buf, "DECONFIGURED\n");
 782	case AP_RESPONSE_CHECKSTOPPED:
 783		return sysfs_emit(buf, "CHECKSTOPPED\n");
 784	case AP_RESPONSE_BUSY:
 785		return sysfs_emit(buf, "BUSY\n");
 786	case AP_RESPONSE_INVALID_ADDRESS:
 787		return sysfs_emit(buf, "INVALID_ADDRESS\n");
 788	case AP_RESPONSE_OTHERWISE_CHANGED:
 789		return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
 790	case AP_RESPONSE_Q_FULL:
 791		return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
 792	case AP_RESPONSE_INDEX_TOO_BIG:
 793		return sysfs_emit(buf, "INDEX_TOO_BIG\n");
 794	case AP_RESPONSE_NO_FIRST_PART:
 795		return sysfs_emit(buf, "NO_FIRST_PART\n");
 796	case AP_RESPONSE_MESSAGE_TOO_BIG:
 797		return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
 798	case AP_RESPONSE_REQ_FAC_NOT_INST:
 799		return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
 800	default:
 801		return sysfs_emit(buf, "response code %d\n", rc);
 802	}
 803}
 804static DEVICE_ATTR_RO(last_err_rc);
 805#endif
 806
 807static struct attribute *ap_queue_dev_attrs[] = {
 808	&dev_attr_request_count.attr,
 809	&dev_attr_requestq_count.attr,
 810	&dev_attr_pendingq_count.attr,
 811	&dev_attr_reset.attr,
 812	&dev_attr_interrupt.attr,
 813	&dev_attr_config.attr,
 814	&dev_attr_chkstop.attr,
 815	&dev_attr_ap_functions.attr,
 816#ifdef CONFIG_ZCRYPT_DEBUG
 817	&dev_attr_states.attr,
 818	&dev_attr_last_err_rc.attr,
 819#endif
 820	NULL
 821};
 822
 823static struct attribute_group ap_queue_dev_attr_group = {
 824	.attrs = ap_queue_dev_attrs
 825};
 826
 827static const struct attribute_group *ap_queue_dev_attr_groups[] = {
 828	&ap_queue_dev_attr_group,
 829	NULL
 830};
 831
 832static struct device_type ap_queue_type = {
 833	.name = "ap_queue",
 834	.groups = ap_queue_dev_attr_groups,
 835};
 836
 837static ssize_t se_bind_show(struct device *dev,
 838			    struct device_attribute *attr, char *buf)
 839{
 840	struct ap_queue *aq = to_ap_queue(dev);
 841	struct ap_queue_status status;
 842	struct ap_tapq_hwinfo hwinfo;
 843
 844	if (!ap_q_supports_bind(aq))
 845		return sysfs_emit(buf, "-\n");
 846
 847	status = ap_test_queue(aq->qid, 1, &hwinfo);
 848	if (status.response_code > AP_RESPONSE_BUSY) {
 849		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
 850			   __func__, status.response_code,
 851			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 852		return -EIO;
 853	}
 854
 855	/* update queue's SE bind state */
 856	spin_lock_bh(&aq->lock);
 857	aq->se_bstate = hwinfo.bs;
 858	spin_unlock_bh(&aq->lock);
 859
 860	switch (hwinfo.bs) {
 861	case AP_BS_Q_USABLE:
 862	case AP_BS_Q_USABLE_NO_SECURE_KEY:
 863		return sysfs_emit(buf, "bound\n");
 864	default:
 865		return sysfs_emit(buf, "unbound\n");
 866	}
 867}
 868
 869static ssize_t se_bind_store(struct device *dev,
 870			     struct device_attribute *attr,
 871			     const char *buf, size_t count)
 872{
 873	struct ap_queue *aq = to_ap_queue(dev);
 874	struct ap_queue_status status;
 875	struct ap_tapq_hwinfo hwinfo;
 876	bool value;
 877	int rc;
 878
 879	if (!ap_q_supports_bind(aq))
 880		return -EINVAL;
 881
 882	/* only 0 (unbind) and 1 (bind) allowed */
 883	rc = kstrtobool(buf, &value);
 884	if (rc)
 885		return rc;
 886
 887	if (!value) {
 888		/* Unbind. Set F bit arg and trigger RAPQ */
 889		spin_lock_bh(&aq->lock);
 890		__ap_flush_queue(aq);
 891		aq->rapq_fbit = 1;
 892		_ap_queue_init_state(aq);
 893		rc = count;
 894		goto out;
 895	}
 896
 897	/* Bind. Check current SE bind state */
 898	status = ap_test_queue(aq->qid, 1, &hwinfo);
 899	if (status.response_code) {
 900		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
 901			    __func__, status.response_code,
 902			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 903		return -EIO;
 904	}
 905
 906	/* Update BS state */
 907	spin_lock_bh(&aq->lock);
 908	aq->se_bstate = hwinfo.bs;
 909	if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
 910		AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
 911			    __func__, hwinfo.bs,
 912			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 913		rc = -EINVAL;
 914		goto out;
 915	}
 916
 917	/* Check SM state */
 918	if (aq->sm_state < AP_SM_STATE_IDLE) {
 919		rc = -EBUSY;
 920		goto out;
 921	}
 922
 923	/* invoke BAPQ */
 924	status = ap_bapq(aq->qid);
 925	if (status.response_code) {
 926		AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
 927			    __func__, status.response_code,
 928			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 929		rc = -EIO;
 930		goto out;
 931	}
 932	aq->assoc_idx = ASSOC_IDX_INVALID;
 933
 934	/* verify SE bind state */
 935	status = ap_test_queue(aq->qid, 1, &hwinfo);
 936	if (status.response_code) {
 937		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
 938			    __func__, status.response_code,
 939			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 940		rc = -EIO;
 941		goto out;
 942	}
 943	aq->se_bstate = hwinfo.bs;
 944	if (!(hwinfo.bs == AP_BS_Q_USABLE ||
 945	      hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
 946		AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
 947			    __func__, hwinfo.bs,
 948			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 949		rc = -EIO;
 950		goto out;
 951	}
 952
 953	/* SE bind was successful */
 954	AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
 955		    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 956	rc = count;
 957
 958out:
 959	spin_unlock_bh(&aq->lock);
 960	return rc;
 961}
 962
 963static DEVICE_ATTR_RW(se_bind);
 964
 965static ssize_t se_associate_show(struct device *dev,
 966				 struct device_attribute *attr, char *buf)
 967{
 968	struct ap_queue *aq = to_ap_queue(dev);
 969	struct ap_queue_status status;
 970	struct ap_tapq_hwinfo hwinfo;
 971
 972	if (!ap_q_supports_assoc(aq))
 973		return sysfs_emit(buf, "-\n");
 974
 975	status = ap_test_queue(aq->qid, 1, &hwinfo);
 976	if (status.response_code > AP_RESPONSE_BUSY) {
 977		AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
 978			   __func__, status.response_code,
 979			   AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
 980		return -EIO;
 981	}
 982
 983	/* update queue's SE bind state */
 984	spin_lock_bh(&aq->lock);
 985	aq->se_bstate = hwinfo.bs;
 986	spin_unlock_bh(&aq->lock);
 987
 988	switch (hwinfo.bs) {
 989	case AP_BS_Q_USABLE:
 990		if (aq->assoc_idx == ASSOC_IDX_INVALID) {
 991			AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
 992			return -EIO;
 993		}
 994		return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
 995	case AP_BS_Q_USABLE_NO_SECURE_KEY:
 996		if (aq->assoc_idx != ASSOC_IDX_INVALID)
 997			return sysfs_emit(buf, "association pending\n");
 998		fallthrough;
 999	default:
1000		return sysfs_emit(buf, "unassociated\n");
1001	}
1002}
1003
1004static ssize_t se_associate_store(struct device *dev,
1005				  struct device_attribute *attr,
1006				  const char *buf, size_t count)
1007{
1008	struct ap_queue *aq = to_ap_queue(dev);
1009	struct ap_queue_status status;
1010	struct ap_tapq_hwinfo hwinfo;
1011	unsigned int value;
1012	int rc;
1013
1014	if (!ap_q_supports_assoc(aq))
1015		return -EINVAL;
1016
1017	/* association index needs to be >= 0 */
1018	rc = kstrtouint(buf, 0, &value);
1019	if (rc)
1020		return rc;
1021	if (value >= ASSOC_IDX_INVALID)
1022		return -EINVAL;
1023
1024	/* check current SE bind state */
1025	status = ap_test_queue(aq->qid, 1, &hwinfo);
1026	if (status.response_code) {
1027		AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1028			    __func__, status.response_code,
1029			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1030		return -EIO;
1031	}
1032	spin_lock_bh(&aq->lock);
1033	aq->se_bstate = hwinfo.bs;
1034	if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1035		AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1036			    __func__, hwinfo.bs,
1037			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1038		rc = -EINVAL;
1039		goto out;
1040	}
1041
1042	/* check SM state */
1043	if (aq->sm_state != AP_SM_STATE_IDLE) {
1044		rc = -EBUSY;
1045		goto out;
1046	}
1047
1048	/* trigger the asynchronous association request */
1049	status = ap_aapq(aq->qid, value);
1050	switch (status.response_code) {
1051	case AP_RESPONSE_NORMAL:
1052	case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1053		aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1054		aq->assoc_idx = value;
1055		ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1056		break;
1057	default:
1058		AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1059			    __func__, status.response_code,
1060			    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1061		rc = -EIO;
1062		goto out;
1063	}
1064
1065	rc = count;
1066
1067out:
1068	spin_unlock_bh(&aq->lock);
1069	return rc;
1070}
1071
1072static DEVICE_ATTR_RW(se_associate);
1073
1074static struct attribute *ap_queue_dev_sb_attrs[] = {
1075	&dev_attr_se_bind.attr,
1076	&dev_attr_se_associate.attr,
1077	NULL
1078};
1079
1080static struct attribute_group ap_queue_dev_sb_attr_group = {
1081	.attrs = ap_queue_dev_sb_attrs
1082};
1083
1084static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1085	&ap_queue_dev_sb_attr_group,
1086	NULL
1087};
1088
1089static void ap_queue_device_release(struct device *dev)
1090{
1091	struct ap_queue *aq = to_ap_queue(dev);
1092
1093	spin_lock_bh(&ap_queues_lock);
1094	hash_del(&aq->hnode);
1095	spin_unlock_bh(&ap_queues_lock);
1096
1097	kfree(aq);
1098}
1099
1100struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1101{
1102	struct ap_queue *aq;
1103
1104	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1105	if (!aq)
1106		return NULL;
1107	aq->ap_dev.device.release = ap_queue_device_release;
1108	aq->ap_dev.device.type = &ap_queue_type;
1109	aq->ap_dev.device_type = device_type;
1110	// add optional SE secure binding attributes group
1111	if (ap_sb_available() && is_prot_virt_guest())
1112		aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1113	aq->qid = qid;
 
 
1114	spin_lock_init(&aq->lock);
1115	INIT_LIST_HEAD(&aq->pendingq);
1116	INIT_LIST_HEAD(&aq->requestq);
1117	timer_setup(&aq->timeout, ap_request_timeout, 0);
1118
1119	return aq;
1120}
1121
1122void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1123{
1124	aq->reply = reply;
1125
1126	spin_lock_bh(&aq->lock);
1127	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1128	spin_unlock_bh(&aq->lock);
1129}
1130EXPORT_SYMBOL(ap_queue_init_reply);
1131
1132/**
1133 * ap_queue_message(): Queue a request to an AP device.
1134 * @aq: The AP device to queue the message to
1135 * @ap_msg: The message that is to be added
1136 */
1137int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1138{
1139	int rc = 0;
1140
1141	/* msg needs to have a valid receive-callback */
1142	BUG_ON(!ap_msg->receive);
1143
1144	spin_lock_bh(&aq->lock);
1145
1146	/* only allow to queue new messages if device state is ok */
1147	if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1148		list_add_tail(&ap_msg->list, &aq->requestq);
1149		aq->requestq_count++;
1150		aq->total_request_count++;
1151		atomic64_inc(&aq->card->total_request_count);
1152	} else {
1153		rc = -ENODEV;
1154	}
1155
1156	/* Send/receive as many request from the queue as possible. */
1157	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1158
1159	spin_unlock_bh(&aq->lock);
1160
1161	return rc;
1162}
1163EXPORT_SYMBOL(ap_queue_message);
1164
1165/**
1166 * ap_queue_usable(): Check if queue is usable just now.
1167 * @aq: The AP queue device to test for usability.
1168 * This function is intended for the scheduler to query if it makes
1169 * sense to enqueue a message into this AP queue device by calling
1170 * ap_queue_message(). The perspective is very short-term as the
1171 * state machine and device state(s) may change at any time.
1172 */
1173bool ap_queue_usable(struct ap_queue *aq)
1174{
1175	bool rc = true;
1176
1177	spin_lock_bh(&aq->lock);
1178
1179	/* check for not configured or checkstopped */
1180	if (!aq->config || aq->chkstop) {
1181		rc = false;
1182		goto unlock_and_out;
1183	}
1184
1185	/* device state needs to be ok */
1186	if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1187		rc = false;
1188		goto unlock_and_out;
1189	}
1190
1191	/* SE guest's queues additionally need to be bound */
1192	if (ap_q_needs_bind(aq) &&
1193	    !(aq->se_bstate == AP_BS_Q_USABLE ||
1194	      aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1195		rc = false;
1196
1197unlock_and_out:
1198	spin_unlock_bh(&aq->lock);
1199	return rc;
1200}
1201EXPORT_SYMBOL(ap_queue_usable);
1202
1203/**
1204 * ap_cancel_message(): Cancel a crypto request.
1205 * @aq: The AP device that has the message queued
1206 * @ap_msg: The message that is to be removed
1207 *
1208 * Cancel a crypto request. This is done by removing the request
1209 * from the device pending or request queue. Note that the
1210 * request stays on the AP queue. When it finishes the message
1211 * reply will be discarded because the psmid can't be found.
1212 */
1213void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1214{
1215	struct ap_message *tmp;
1216
1217	spin_lock_bh(&aq->lock);
1218	if (!list_empty(&ap_msg->list)) {
1219		list_for_each_entry(tmp, &aq->pendingq, list)
1220			if (tmp->psmid == ap_msg->psmid) {
1221				aq->pendingq_count--;
1222				goto found;
1223			}
1224		aq->requestq_count--;
1225found:
1226		list_del_init(&ap_msg->list);
1227	}
1228	spin_unlock_bh(&aq->lock);
1229}
1230EXPORT_SYMBOL(ap_cancel_message);
1231
1232/**
1233 * __ap_flush_queue(): Flush requests.
1234 * @aq: Pointer to the AP queue
1235 *
1236 * Flush all requests from the request/pending queue of an AP device.
1237 */
1238static void __ap_flush_queue(struct ap_queue *aq)
1239{
1240	struct ap_message *ap_msg, *next;
1241
1242	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1243		list_del_init(&ap_msg->list);
1244		aq->pendingq_count--;
1245		ap_msg->rc = -EAGAIN;
1246		ap_msg->receive(aq, ap_msg, NULL);
1247	}
1248	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1249		list_del_init(&ap_msg->list);
1250		aq->requestq_count--;
1251		ap_msg->rc = -EAGAIN;
1252		ap_msg->receive(aq, ap_msg, NULL);
1253	}
1254	aq->queue_count = 0;
1255}
1256
1257void ap_flush_queue(struct ap_queue *aq)
1258{
1259	spin_lock_bh(&aq->lock);
1260	__ap_flush_queue(aq);
1261	spin_unlock_bh(&aq->lock);
1262}
1263EXPORT_SYMBOL(ap_flush_queue);
1264
1265void ap_queue_prepare_remove(struct ap_queue *aq)
1266{
1267	spin_lock_bh(&aq->lock);
1268	/* flush queue */
1269	__ap_flush_queue(aq);
1270	/* move queue device state to SHUTDOWN in progress */
1271	aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1272	spin_unlock_bh(&aq->lock);
1273	del_timer_sync(&aq->timeout);
1274}
1275
1276void ap_queue_remove(struct ap_queue *aq)
1277{
1278	/*
1279	 * all messages have been flushed and the device state
1280	 * is SHUTDOWN. Now reset with zero which also clears
1281	 * the irq registration and move the device state
1282	 * to the initial value AP_DEV_STATE_UNINITIATED.
 
1283	 */
1284	spin_lock_bh(&aq->lock);
1285	ap_zapq(aq->qid, 0);
1286	aq->dev_state = AP_DEV_STATE_UNINITIATED;
1287	spin_unlock_bh(&aq->lock);
1288}
1289
1290void _ap_queue_init_state(struct ap_queue *aq)
1291{
1292	aq->dev_state = AP_DEV_STATE_OPERATING;
1293	aq->sm_state = AP_SM_STATE_RESET_START;
1294	aq->last_err_rc = 0;
1295	aq->assoc_idx = ASSOC_IDX_INVALID;
1296	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1297}
1298
1299void ap_queue_init_state(struct ap_queue *aq)
1300{
1301	spin_lock_bh(&aq->lock);
1302	_ap_queue_init_state(aq);
 
1303	spin_unlock_bh(&aq->lock);
1304}
1305EXPORT_SYMBOL(ap_queue_init_state);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright IBM Corp. 2016
  4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  5 *
  6 * Adjunct processor bus, queue related code.
  7 */
  8
  9#define KMSG_COMPONENT "ap"
 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 11
 12#include <linux/init.h>
 13#include <linux/slab.h>
 14#include <asm/facility.h>
 15
 16#include "ap_bus.h"
 17#include "ap_debug.h"
 18
 19static void __ap_flush_queue(struct ap_queue *aq);
 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21/**
 22 * ap_queue_enable_interruption(): Enable interruption on an AP queue.
 23 * @qid: The AP queue number
 24 * @ind: the notification indicator byte
 25 *
 26 * Enables interruption on AP queue via ap_aqic(). Based on the return
 27 * value it waits a while and tests the AP queue if interrupts
 28 * have been switched on using ap_test_queue().
 29 */
 30static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
 31{
 
 32	struct ap_queue_status status;
 33	struct ap_qirq_ctrl qirqctrl = { 0 };
 34
 35	qirqctrl.ir = 1;
 36	qirqctrl.isc = AP_ISC;
 37	status = ap_aqic(aq->qid, qirqctrl, ind);
 
 
 38	switch (status.response_code) {
 39	case AP_RESPONSE_NORMAL:
 40	case AP_RESPONSE_OTHERWISE_CHANGED:
 41		return 0;
 42	case AP_RESPONSE_Q_NOT_AVAIL:
 43	case AP_RESPONSE_DECONFIGURED:
 44	case AP_RESPONSE_CHECKSTOPPED:
 45	case AP_RESPONSE_INVALID_ADDRESS:
 46		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
 47		       AP_QID_CARD(aq->qid),
 48		       AP_QID_QUEUE(aq->qid));
 49		return -EOPNOTSUPP;
 50	case AP_RESPONSE_RESET_IN_PROGRESS:
 51	case AP_RESPONSE_BUSY:
 52	default:
 53		return -EBUSY;
 54	}
 55}
 56
 57/**
 58 * __ap_send(): Send message to adjunct processor queue.
 59 * @qid: The AP queue number
 60 * @psmid: The program supplied message identifier
 61 * @msg: The message text
 62 * @length: The message length
 63 * @special: Special Bit
 64 *
 65 * Returns AP queue status structure.
 66 * Condition code 1 on NQAP can't happen because the L bit is 1.
 67 * Condition code 2 on NQAP also means the send is incomplete,
 68 * because a segment boundary was reached. The NQAP is repeated.
 69 */
 70static inline struct ap_queue_status
 71__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
 72	  int special)
 73{
 74	if (special)
 75		qid |= 0x400000UL;
 76	return ap_nqap(qid, psmid, msg, length);
 77}
 78
 79int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
 80{
 81	struct ap_queue_status status;
 82
 83	status = __ap_send(qid, psmid, msg, length, 0);
 84	switch (status.response_code) {
 85	case AP_RESPONSE_NORMAL:
 86		return 0;
 87	case AP_RESPONSE_Q_FULL:
 88	case AP_RESPONSE_RESET_IN_PROGRESS:
 89		return -EBUSY;
 90	case AP_RESPONSE_REQ_FAC_NOT_INST:
 91		return -EINVAL;
 92	default:	/* Device is gone. */
 93		return -ENODEV;
 94	}
 95}
 96EXPORT_SYMBOL(ap_send);
 97
 98int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
 99{
100	struct ap_queue_status status;
101
102	if (msg == NULL)
103		return -EINVAL;
104	status = ap_dqap(qid, psmid, msg, length);
105	switch (status.response_code) {
106	case AP_RESPONSE_NORMAL:
107		return 0;
108	case AP_RESPONSE_NO_PENDING_REPLY:
109		if (status.queue_empty)
110			return -ENOENT;
111		return -EBUSY;
112	case AP_RESPONSE_RESET_IN_PROGRESS:
113		return -EBUSY;
114	default:
115		return -ENODEV;
116	}
117}
118EXPORT_SYMBOL(ap_recv);
119
120/* State machine definitions and helpers */
121
122static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
123{
124	return AP_SM_WAIT_NONE;
125}
126
127/**
128 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129 *	not change the state of the device.
130 * @aq: pointer to the AP queue
131 *
132 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
133 */
134static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135{
136	struct ap_queue_status status;
137	struct ap_message *ap_msg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
139	status = ap_dqap(aq->qid, &aq->reply->psmid,
140			 aq->reply->msg, aq->reply->len);
141	switch (status.response_code) {
142	case AP_RESPONSE_NORMAL:
143		aq->queue_count--;
 
 
144		if (aq->queue_count > 0)
145			mod_timer(&aq->timeout,
146				  jiffies + aq->request_timeout);
147		list_for_each_entry(ap_msg, &aq->pendingq, list) {
148			if (ap_msg->psmid != aq->reply->psmid)
149				continue;
150			list_del_init(&ap_msg->list);
151			aq->pendingq_count--;
152			ap_msg->receive(aq, ap_msg, aq->reply);
 
 
 
 
 
 
153			break;
154		}
 
 
 
 
 
155		fallthrough;
156	case AP_RESPONSE_NO_PENDING_REPLY:
157		if (!status.queue_empty || aq->queue_count <= 0)
158			break;
159		/* The card shouldn't forget requests but who knows. */
160		aq->queue_count = 0;
161		list_splice_init(&aq->pendingq, &aq->requestq);
162		aq->requestq_count += aq->pendingq_count;
163		aq->pendingq_count = 0;
164		break;
165	default:
166		break;
167	}
168	return status;
169}
170
171/**
172 * ap_sm_read(): Receive pending reply messages from an AP queue.
173 * @aq: pointer to the AP queue
174 *
175 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
176 */
177static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
178{
179	struct ap_queue_status status;
180
181	if (!aq->reply)
182		return AP_SM_WAIT_NONE;
183	status = ap_sm_recv(aq);
 
 
184	switch (status.response_code) {
185	case AP_RESPONSE_NORMAL:
186		if (aq->queue_count > 0) {
187			aq->sm_state = AP_SM_STATE_WORKING;
188			return AP_SM_WAIT_AGAIN;
189		}
190		aq->sm_state = AP_SM_STATE_IDLE;
191		return AP_SM_WAIT_NONE;
192	case AP_RESPONSE_NO_PENDING_REPLY:
193		if (aq->queue_count > 0)
194			return AP_SM_WAIT_INTERRUPT;
 
195		aq->sm_state = AP_SM_STATE_IDLE;
196		return AP_SM_WAIT_NONE;
197	default:
198		aq->sm_state = AP_SM_STATE_BORKED;
 
 
 
 
199		return AP_SM_WAIT_NONE;
200	}
 
 
 
 
 
 
 
 
 
 
201}
202
203/**
204 * ap_sm_write(): Send messages from the request queue to an AP queue.
205 * @aq: pointer to the AP queue
206 *
207 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
208 */
209static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
210{
211	struct ap_queue_status status;
212	struct ap_message *ap_msg;
 
213
214	if (aq->requestq_count <= 0)
215		return AP_SM_WAIT_NONE;
 
216	/* Start the next request on the queue. */
217	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
218	status = __ap_send(aq->qid, ap_msg->psmid,
219			   ap_msg->msg, ap_msg->len,
220			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
 
 
221	switch (status.response_code) {
222	case AP_RESPONSE_NORMAL:
223		aq->queue_count++;
224		if (aq->queue_count == 1)
225			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
226		list_move_tail(&ap_msg->list, &aq->pendingq);
227		aq->requestq_count--;
228		aq->pendingq_count++;
229		if (aq->queue_count < aq->card->queue_depth) {
230			aq->sm_state = AP_SM_STATE_WORKING;
231			return AP_SM_WAIT_AGAIN;
232		}
233		fallthrough;
234	case AP_RESPONSE_Q_FULL:
235		aq->sm_state = AP_SM_STATE_QUEUE_FULL;
236		return AP_SM_WAIT_INTERRUPT;
 
237	case AP_RESPONSE_RESET_IN_PROGRESS:
238		aq->sm_state = AP_SM_STATE_RESET_WAIT;
239		return AP_SM_WAIT_TIMEOUT;
 
 
 
240	case AP_RESPONSE_MESSAGE_TOO_BIG:
241	case AP_RESPONSE_REQ_FAC_NOT_INST:
242		list_del_init(&ap_msg->list);
243		aq->requestq_count--;
244		ap_msg->rc = -EINVAL;
245		ap_msg->receive(aq, ap_msg, NULL);
246		return AP_SM_WAIT_AGAIN;
247	default:
248		aq->sm_state = AP_SM_STATE_BORKED;
 
 
 
 
249		return AP_SM_WAIT_NONE;
250	}
251}
252
253/**
254 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
255 * @aq: pointer to the AP queue
256 *
257 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
258 */
259static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
260{
261	return min(ap_sm_read(aq), ap_sm_write(aq));
262}
263
264/**
265 * ap_sm_reset(): Reset an AP queue.
266 * @qid: The AP queue number
267 *
268 * Submit the Reset command to an AP queue.
269 */
270static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
271{
272	struct ap_queue_status status;
273
274	status = ap_rapq(aq->qid);
 
 
275	switch (status.response_code) {
276	case AP_RESPONSE_NORMAL:
277	case AP_RESPONSE_RESET_IN_PROGRESS:
278		aq->sm_state = AP_SM_STATE_RESET_WAIT;
279		aq->interrupt = AP_INTR_DISABLED;
280		return AP_SM_WAIT_TIMEOUT;
281	case AP_RESPONSE_BUSY:
282		return AP_SM_WAIT_TIMEOUT;
283	case AP_RESPONSE_Q_NOT_AVAIL:
284	case AP_RESPONSE_DECONFIGURED:
285	case AP_RESPONSE_CHECKSTOPPED:
286	default:
287		aq->sm_state = AP_SM_STATE_BORKED;
 
 
 
 
288		return AP_SM_WAIT_NONE;
289	}
290}
291
292/**
293 * ap_sm_reset_wait(): Test queue for completion of the reset operation
294 * @aq: pointer to the AP queue
295 *
296 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
297 */
298static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
299{
300	struct ap_queue_status status;
 
301	void *lsi_ptr;
302
303	if (aq->queue_count > 0 && aq->reply)
304		/* Try to read a completed message and get the status */
305		status = ap_sm_recv(aq);
306	else
307		/* Get the status with TAPQ */
308		status = ap_tapq(aq->qid, NULL);
309
310	switch (status.response_code) {
311	case AP_RESPONSE_NORMAL:
 
312		lsi_ptr = ap_airq_ptr();
313		if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
314			aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
315		else
316			aq->sm_state = (aq->queue_count > 0) ?
317				AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
318		return AP_SM_WAIT_AGAIN;
319	case AP_RESPONSE_BUSY:
320	case AP_RESPONSE_RESET_IN_PROGRESS:
321		return AP_SM_WAIT_TIMEOUT;
322	case AP_RESPONSE_Q_NOT_AVAIL:
323	case AP_RESPONSE_DECONFIGURED:
324	case AP_RESPONSE_CHECKSTOPPED:
325	default:
326		aq->sm_state = AP_SM_STATE_BORKED;
 
 
 
 
327		return AP_SM_WAIT_NONE;
328	}
329}
330
331/**
332 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
333 * @aq: pointer to the AP queue
334 *
335 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
336 */
337static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
338{
339	struct ap_queue_status status;
340
341	if (aq->queue_count > 0 && aq->reply)
342		/* Try to read a completed message and get the status */
343		status = ap_sm_recv(aq);
344	else
345		/* Get the status with TAPQ */
346		status = ap_tapq(aq->qid, NULL);
347
348	if (status.irq_enabled == 1) {
349		/* Irqs are now enabled */
350		aq->interrupt = AP_INTR_ENABLED;
351		aq->sm_state = (aq->queue_count > 0) ?
352			AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
353	}
354
355	switch (status.response_code) {
356	case AP_RESPONSE_NORMAL:
357		if (aq->queue_count > 0)
358			return AP_SM_WAIT_AGAIN;
359		fallthrough;
360	case AP_RESPONSE_NO_PENDING_REPLY:
361		return AP_SM_WAIT_TIMEOUT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362	default:
363		aq->sm_state = AP_SM_STATE_BORKED;
 
 
 
 
 
 
364		return AP_SM_WAIT_NONE;
365	}
366}
367
368/*
369 * AP state machine jump table
370 */
371static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
372	[AP_SM_STATE_RESET_START] = {
373		[AP_SM_EVENT_POLL] = ap_sm_reset,
374		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
375	},
376	[AP_SM_STATE_RESET_WAIT] = {
377		[AP_SM_EVENT_POLL] = ap_sm_reset_wait,
378		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
379	},
380	[AP_SM_STATE_SETIRQ_WAIT] = {
381		[AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
382		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
383	},
384	[AP_SM_STATE_IDLE] = {
385		[AP_SM_EVENT_POLL] = ap_sm_write,
386		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
387	},
388	[AP_SM_STATE_WORKING] = {
389		[AP_SM_EVENT_POLL] = ap_sm_read_write,
390		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
391	},
392	[AP_SM_STATE_QUEUE_FULL] = {
393		[AP_SM_EVENT_POLL] = ap_sm_read,
394		[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
395	},
396	[AP_SM_STATE_REMOVE] = {
397		[AP_SM_EVENT_POLL] = ap_sm_nop,
398		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
399	},
400	[AP_SM_STATE_UNBOUND] = {
401		[AP_SM_EVENT_POLL] = ap_sm_nop,
402		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
403	},
404	[AP_SM_STATE_BORKED] = {
405		[AP_SM_EVENT_POLL] = ap_sm_nop,
406		[AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
407	},
408};
409
410enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
411{
412	return ap_jumptable[aq->sm_state][event](aq);
 
 
 
 
413}
414
415enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
416{
417	enum ap_sm_wait wait;
418
419	while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
420		;
421	return wait;
422}
423
424/*
425 * AP queue related attributes.
426 */
427static ssize_t request_count_show(struct device *dev,
428				  struct device_attribute *attr,
429				  char *buf)
430{
431	struct ap_queue *aq = to_ap_queue(dev);
 
432	u64 req_cnt;
433
434	spin_lock_bh(&aq->lock);
435	req_cnt = aq->total_request_count;
 
 
 
436	spin_unlock_bh(&aq->lock);
437	return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
 
 
 
 
438}
439
440static ssize_t request_count_store(struct device *dev,
441				   struct device_attribute *attr,
442				   const char *buf, size_t count)
443{
444	struct ap_queue *aq = to_ap_queue(dev);
445
446	spin_lock_bh(&aq->lock);
447	aq->total_request_count = 0;
448	spin_unlock_bh(&aq->lock);
449
450	return count;
451}
452
453static DEVICE_ATTR_RW(request_count);
454
455static ssize_t requestq_count_show(struct device *dev,
456				   struct device_attribute *attr, char *buf)
457{
458	struct ap_queue *aq = to_ap_queue(dev);
459	unsigned int reqq_cnt = 0;
460
461	spin_lock_bh(&aq->lock);
462	reqq_cnt = aq->requestq_count;
 
463	spin_unlock_bh(&aq->lock);
464	return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
465}
466
467static DEVICE_ATTR_RO(requestq_count);
468
469static ssize_t pendingq_count_show(struct device *dev,
470				   struct device_attribute *attr, char *buf)
471{
472	struct ap_queue *aq = to_ap_queue(dev);
473	unsigned int penq_cnt = 0;
474
475	spin_lock_bh(&aq->lock);
476	penq_cnt = aq->pendingq_count;
 
477	spin_unlock_bh(&aq->lock);
478	return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
479}
480
481static DEVICE_ATTR_RO(pendingq_count);
482
483static ssize_t reset_show(struct device *dev,
484			  struct device_attribute *attr, char *buf)
485{
486	struct ap_queue *aq = to_ap_queue(dev);
487	int rc = 0;
488
489	spin_lock_bh(&aq->lock);
490	switch (aq->sm_state) {
491	case AP_SM_STATE_RESET_START:
492	case AP_SM_STATE_RESET_WAIT:
493		rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
494		break;
495	case AP_SM_STATE_WORKING:
496	case AP_SM_STATE_QUEUE_FULL:
497		rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
498		break;
499	default:
500		rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
501	}
502	spin_unlock_bh(&aq->lock);
503	return rc;
504}
505
506static ssize_t reset_store(struct device *dev,
507			   struct device_attribute *attr,
508			   const char *buf, size_t count)
509{
510	struct ap_queue *aq = to_ap_queue(dev);
511
512	spin_lock_bh(&aq->lock);
513	__ap_flush_queue(aq);
514	aq->sm_state = AP_SM_STATE_RESET_START;
515	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
516	spin_unlock_bh(&aq->lock);
517
518	AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
519	       AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
520
521	return count;
522}
523
524static DEVICE_ATTR_RW(reset);
525
526static ssize_t interrupt_show(struct device *dev,
527			      struct device_attribute *attr, char *buf)
528{
529	struct ap_queue *aq = to_ap_queue(dev);
 
530	int rc = 0;
531
532	spin_lock_bh(&aq->lock);
533	if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
534		rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
535	else if (aq->interrupt == AP_INTR_ENABLED)
536		rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
537	else
538		rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
 
 
 
539	spin_unlock_bh(&aq->lock);
 
540	return rc;
541}
542
543static DEVICE_ATTR_RO(interrupt);
544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545static struct attribute *ap_queue_dev_attrs[] = {
546	&dev_attr_request_count.attr,
547	&dev_attr_requestq_count.attr,
548	&dev_attr_pendingq_count.attr,
549	&dev_attr_reset.attr,
550	&dev_attr_interrupt.attr,
 
 
 
 
 
 
 
551	NULL
552};
553
554static struct attribute_group ap_queue_dev_attr_group = {
555	.attrs = ap_queue_dev_attrs
556};
557
558static const struct attribute_group *ap_queue_dev_attr_groups[] = {
559	&ap_queue_dev_attr_group,
560	NULL
561};
562
563static struct device_type ap_queue_type = {
564	.name = "ap_queue",
565	.groups = ap_queue_dev_attr_groups,
566};
567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568static void ap_queue_device_release(struct device *dev)
569{
570	struct ap_queue *aq = to_ap_queue(dev);
571
572	spin_lock_bh(&ap_queues_lock);
573	hash_del(&aq->hnode);
574	spin_unlock_bh(&ap_queues_lock);
575
576	kfree(aq);
577}
578
579struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
580{
581	struct ap_queue *aq;
582
583	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
584	if (!aq)
585		return NULL;
586	aq->ap_dev.device.release = ap_queue_device_release;
587	aq->ap_dev.device.type = &ap_queue_type;
588	aq->ap_dev.device_type = device_type;
 
 
 
589	aq->qid = qid;
590	aq->sm_state = AP_SM_STATE_UNBOUND;
591	aq->interrupt = AP_INTR_DISABLED;
592	spin_lock_init(&aq->lock);
593	INIT_LIST_HEAD(&aq->pendingq);
594	INIT_LIST_HEAD(&aq->requestq);
595	timer_setup(&aq->timeout, ap_request_timeout, 0);
596
597	return aq;
598}
599
600void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
601{
602	aq->reply = reply;
603
604	spin_lock_bh(&aq->lock);
605	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
606	spin_unlock_bh(&aq->lock);
607}
608EXPORT_SYMBOL(ap_queue_init_reply);
609
610/**
611 * ap_queue_message(): Queue a request to an AP device.
612 * @aq: The AP device to queue the message to
613 * @ap_msg: The message that is to be added
614 */
615void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
616{
617	/* For asynchronous message handling a valid receive-callback
618	 * is required.
619	 */
620	BUG_ON(!ap_msg->receive);
621
622	spin_lock_bh(&aq->lock);
623	/* Queue the message. */
624	list_add_tail(&ap_msg->list, &aq->requestq);
625	aq->requestq_count++;
626	aq->total_request_count++;
627	atomic64_inc(&aq->card->total_request_count);
 
 
 
 
 
 
628	/* Send/receive as many request from the queue as possible. */
629	ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
 
630	spin_unlock_bh(&aq->lock);
 
 
631}
632EXPORT_SYMBOL(ap_queue_message);
633
634/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635 * ap_cancel_message(): Cancel a crypto request.
636 * @aq: The AP device that has the message queued
637 * @ap_msg: The message that is to be removed
638 *
639 * Cancel a crypto request. This is done by removing the request
640 * from the device pending or request queue. Note that the
641 * request stays on the AP queue. When it finishes the message
642 * reply will be discarded because the psmid can't be found.
643 */
644void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
645{
646	struct ap_message *tmp;
647
648	spin_lock_bh(&aq->lock);
649	if (!list_empty(&ap_msg->list)) {
650		list_for_each_entry(tmp, &aq->pendingq, list)
651			if (tmp->psmid == ap_msg->psmid) {
652				aq->pendingq_count--;
653				goto found;
654			}
655		aq->requestq_count--;
656found:
657		list_del_init(&ap_msg->list);
658	}
659	spin_unlock_bh(&aq->lock);
660}
661EXPORT_SYMBOL(ap_cancel_message);
662
663/**
664 * __ap_flush_queue(): Flush requests.
665 * @aq: Pointer to the AP queue
666 *
667 * Flush all requests from the request/pending queue of an AP device.
668 */
669static void __ap_flush_queue(struct ap_queue *aq)
670{
671	struct ap_message *ap_msg, *next;
672
673	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
674		list_del_init(&ap_msg->list);
675		aq->pendingq_count--;
676		ap_msg->rc = -EAGAIN;
677		ap_msg->receive(aq, ap_msg, NULL);
678	}
679	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
680		list_del_init(&ap_msg->list);
681		aq->requestq_count--;
682		ap_msg->rc = -EAGAIN;
683		ap_msg->receive(aq, ap_msg, NULL);
684	}
685	aq->queue_count = 0;
686}
687
688void ap_flush_queue(struct ap_queue *aq)
689{
690	spin_lock_bh(&aq->lock);
691	__ap_flush_queue(aq);
692	spin_unlock_bh(&aq->lock);
693}
694EXPORT_SYMBOL(ap_flush_queue);
695
696void ap_queue_prepare_remove(struct ap_queue *aq)
697{
698	spin_lock_bh(&aq->lock);
699	/* flush queue */
700	__ap_flush_queue(aq);
701	/* set REMOVE state to prevent new messages are queued in */
702	aq->sm_state = AP_SM_STATE_REMOVE;
703	spin_unlock_bh(&aq->lock);
704	del_timer_sync(&aq->timeout);
705}
706
707void ap_queue_remove(struct ap_queue *aq)
708{
709	/*
710	 * all messages have been flushed and the state is
711	 * AP_SM_STATE_REMOVE. Now reset with zero which also
712	 * clears the irq registration and move the state
713	 * to AP_SM_STATE_UNBOUND to signal that this queue
714	 * is not used by any driver currently.
715	 */
716	spin_lock_bh(&aq->lock);
717	ap_zapq(aq->qid);
718	aq->sm_state = AP_SM_STATE_UNBOUND;
719	spin_unlock_bh(&aq->lock);
720}
721
 
 
 
 
 
 
 
 
 
722void ap_queue_init_state(struct ap_queue *aq)
723{
724	spin_lock_bh(&aq->lock);
725	aq->sm_state = AP_SM_STATE_RESET_START;
726	ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
727	spin_unlock_bh(&aq->lock);
728}
729EXPORT_SYMBOL(ap_queue_init_state);