Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9#define KMSG_COMPONENT "ap"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <asm/facility.h>
15
16#include "ap_bus.h"
17#include "ap_debug.h"
18
19static void __ap_flush_queue(struct ap_queue *aq);
20
21/*
22 * some AP queue helper functions
23 */
24
25static inline bool ap_q_supports_bind(struct ap_queue *aq)
26{
27 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
28}
29
30static inline bool ap_q_supports_assoc(struct ap_queue *aq)
31{
32 return aq->card->hwinfo.ep11;
33}
34
35static inline bool ap_q_needs_bind(struct ap_queue *aq)
36{
37 return ap_q_supports_bind(aq) && ap_sb_available();
38}
39
40/**
41 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
42 * @aq: The AP queue
43 * @ind: the notification indicator byte
44 *
45 * Enables interruption on AP queue via ap_aqic(). Based on the return
46 * value it waits a while and tests the AP queue if interrupts
47 * have been switched on using ap_test_queue().
48 */
49static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
50{
51 union ap_qirq_ctrl qirqctrl = { .value = 0 };
52 struct ap_queue_status status;
53
54 qirqctrl.ir = 1;
55 qirqctrl.isc = AP_ISC;
56 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
57 if (status.async)
58 return -EPERM;
59 switch (status.response_code) {
60 case AP_RESPONSE_NORMAL:
61 case AP_RESPONSE_OTHERWISE_CHANGED:
62 return 0;
63 case AP_RESPONSE_Q_NOT_AVAIL:
64 case AP_RESPONSE_DECONFIGURED:
65 case AP_RESPONSE_CHECKSTOPPED:
66 case AP_RESPONSE_INVALID_ADDRESS:
67 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
68 AP_QID_CARD(aq->qid),
69 AP_QID_QUEUE(aq->qid));
70 return -EOPNOTSUPP;
71 case AP_RESPONSE_RESET_IN_PROGRESS:
72 case AP_RESPONSE_BUSY:
73 default:
74 return -EBUSY;
75 }
76}
77
78/**
79 * __ap_send(): Send message to adjunct processor queue.
80 * @qid: The AP queue number
81 * @psmid: The program supplied message identifier
82 * @msg: The message text
83 * @msglen: The message length
84 * @special: Special Bit
85 *
86 * Returns AP queue status structure.
87 * Condition code 1 on NQAP can't happen because the L bit is 1.
88 * Condition code 2 on NQAP also means the send is incomplete,
89 * because a segment boundary was reached. The NQAP is repeated.
90 */
91static inline struct ap_queue_status
92__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
93 int special)
94{
95 if (special)
96 qid |= 0x400000UL;
97 return ap_nqap(qid, psmid, msg, msglen);
98}
99
100/* State machine definitions and helpers */
101
102static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
103{
104 return AP_SM_WAIT_NONE;
105}
106
107/**
108 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
109 * not change the state of the device.
110 * @aq: pointer to the AP queue
111 *
112 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
113 */
114static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
115{
116 struct ap_queue_status status;
117 struct ap_message *ap_msg;
118 bool found = false;
119 size_t reslen;
120 unsigned long resgr0 = 0;
121 int parts = 0;
122
123 /*
124 * DQAP loop until response code and resgr0 indicate that
125 * the msg is totally received. As we use the very same buffer
126 * the msg is overwritten with each invocation. That's intended
127 * and the receiver of the msg is informed with a msg rc code
128 * of EMSGSIZE in such a case.
129 */
130 do {
131 status = ap_dqap(aq->qid, &aq->reply->psmid,
132 aq->reply->msg, aq->reply->bufsize,
133 &aq->reply->len, &reslen, &resgr0);
134 parts++;
135 } while (status.response_code == 0xFF && resgr0 != 0);
136
137 switch (status.response_code) {
138 case AP_RESPONSE_NORMAL:
139 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
140 if (!status.queue_empty && !aq->queue_count)
141 aq->queue_count++;
142 if (aq->queue_count > 0)
143 mod_timer(&aq->timeout,
144 jiffies + aq->request_timeout);
145 list_for_each_entry(ap_msg, &aq->pendingq, list) {
146 if (ap_msg->psmid != aq->reply->psmid)
147 continue;
148 list_del_init(&ap_msg->list);
149 aq->pendingq_count--;
150 if (parts > 1) {
151 ap_msg->rc = -EMSGSIZE;
152 ap_msg->receive(aq, ap_msg, NULL);
153 } else {
154 ap_msg->receive(aq, ap_msg, aq->reply);
155 }
156 found = true;
157 break;
158 }
159 if (!found) {
160 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
161 __func__, aq->reply->psmid,
162 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
163 }
164 fallthrough;
165 case AP_RESPONSE_NO_PENDING_REPLY:
166 if (!status.queue_empty || aq->queue_count <= 0)
167 break;
168 /* The card shouldn't forget requests but who knows. */
169 aq->queue_count = 0;
170 list_splice_init(&aq->pendingq, &aq->requestq);
171 aq->requestq_count += aq->pendingq_count;
172 aq->pendingq_count = 0;
173 break;
174 default:
175 break;
176 }
177 return status;
178}
179
180/**
181 * ap_sm_read(): Receive pending reply messages from an AP queue.
182 * @aq: pointer to the AP queue
183 *
184 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
185 */
186static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
187{
188 struct ap_queue_status status;
189
190 if (!aq->reply)
191 return AP_SM_WAIT_NONE;
192 status = ap_sm_recv(aq);
193 if (status.async)
194 return AP_SM_WAIT_NONE;
195 switch (status.response_code) {
196 case AP_RESPONSE_NORMAL:
197 if (aq->queue_count > 0) {
198 aq->sm_state = AP_SM_STATE_WORKING;
199 return AP_SM_WAIT_AGAIN;
200 }
201 aq->sm_state = AP_SM_STATE_IDLE;
202 break;
203 case AP_RESPONSE_NO_PENDING_REPLY:
204 if (aq->queue_count > 0)
205 return status.irq_enabled ?
206 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
207 aq->sm_state = AP_SM_STATE_IDLE;
208 break;
209 default:
210 aq->dev_state = AP_DEV_STATE_ERROR;
211 aq->last_err_rc = status.response_code;
212 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
213 __func__, status.response_code,
214 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
215 return AP_SM_WAIT_NONE;
216 }
217 /* Check and maybe enable irq support (again) on this queue */
218 if (!status.irq_enabled && status.queue_empty) {
219 void *lsi_ptr = ap_airq_ptr();
220
221 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
222 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
223 return AP_SM_WAIT_AGAIN;
224 }
225 }
226 return AP_SM_WAIT_NONE;
227}
228
229/**
230 * ap_sm_write(): Send messages from the request queue to an AP queue.
231 * @aq: pointer to the AP queue
232 *
233 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
234 */
235static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
236{
237 struct ap_queue_status status;
238 struct ap_message *ap_msg;
239 ap_qid_t qid = aq->qid;
240
241 if (aq->requestq_count <= 0)
242 return AP_SM_WAIT_NONE;
243
244 /* Start the next request on the queue. */
245 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
246 status = __ap_send(qid, ap_msg->psmid,
247 ap_msg->msg, ap_msg->len,
248 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
249 if (status.async)
250 return AP_SM_WAIT_NONE;
251 switch (status.response_code) {
252 case AP_RESPONSE_NORMAL:
253 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
254 if (aq->queue_count == 1)
255 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
256 list_move_tail(&ap_msg->list, &aq->pendingq);
257 aq->requestq_count--;
258 aq->pendingq_count++;
259 if (aq->queue_count < aq->card->hwinfo.qd) {
260 aq->sm_state = AP_SM_STATE_WORKING;
261 return AP_SM_WAIT_AGAIN;
262 }
263 fallthrough;
264 case AP_RESPONSE_Q_FULL:
265 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
266 return status.irq_enabled ?
267 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
268 case AP_RESPONSE_RESET_IN_PROGRESS:
269 aq->sm_state = AP_SM_STATE_RESET_WAIT;
270 return AP_SM_WAIT_LOW_TIMEOUT;
271 case AP_RESPONSE_INVALID_DOMAIN:
272 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
273 fallthrough;
274 case AP_RESPONSE_MESSAGE_TOO_BIG:
275 case AP_RESPONSE_REQ_FAC_NOT_INST:
276 list_del_init(&ap_msg->list);
277 aq->requestq_count--;
278 ap_msg->rc = -EINVAL;
279 ap_msg->receive(aq, ap_msg, NULL);
280 return AP_SM_WAIT_AGAIN;
281 default:
282 aq->dev_state = AP_DEV_STATE_ERROR;
283 aq->last_err_rc = status.response_code;
284 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
285 __func__, status.response_code,
286 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
287 return AP_SM_WAIT_NONE;
288 }
289}
290
291/**
292 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
293 * @aq: pointer to the AP queue
294 *
295 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
296 */
297static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
298{
299 return min(ap_sm_read(aq), ap_sm_write(aq));
300}
301
302/**
303 * ap_sm_reset(): Reset an AP queue.
304 * @aq: The AP queue
305 *
306 * Submit the Reset command to an AP queue.
307 */
308static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
309{
310 struct ap_queue_status status;
311
312 status = ap_rapq(aq->qid, aq->rapq_fbit);
313 if (status.async)
314 return AP_SM_WAIT_NONE;
315 switch (status.response_code) {
316 case AP_RESPONSE_NORMAL:
317 case AP_RESPONSE_RESET_IN_PROGRESS:
318 aq->sm_state = AP_SM_STATE_RESET_WAIT;
319 aq->rapq_fbit = 0;
320 return AP_SM_WAIT_LOW_TIMEOUT;
321 default:
322 aq->dev_state = AP_DEV_STATE_ERROR;
323 aq->last_err_rc = status.response_code;
324 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
325 __func__, status.response_code,
326 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
327 return AP_SM_WAIT_NONE;
328 }
329}
330
331/**
332 * ap_sm_reset_wait(): Test queue for completion of the reset operation
333 * @aq: pointer to the AP queue
334 *
335 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
336 */
337static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
338{
339 struct ap_queue_status status;
340 struct ap_tapq_hwinfo hwinfo;
341 void *lsi_ptr;
342
343 /* Get the status with TAPQ */
344 status = ap_test_queue(aq->qid, 1, &hwinfo);
345
346 switch (status.response_code) {
347 case AP_RESPONSE_NORMAL:
348 aq->se_bstate = hwinfo.bs;
349 lsi_ptr = ap_airq_ptr();
350 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
351 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
352 else
353 aq->sm_state = (aq->queue_count > 0) ?
354 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
355 return AP_SM_WAIT_AGAIN;
356 case AP_RESPONSE_BUSY:
357 case AP_RESPONSE_RESET_IN_PROGRESS:
358 return AP_SM_WAIT_LOW_TIMEOUT;
359 case AP_RESPONSE_Q_NOT_AVAIL:
360 case AP_RESPONSE_DECONFIGURED:
361 case AP_RESPONSE_CHECKSTOPPED:
362 default:
363 aq->dev_state = AP_DEV_STATE_ERROR;
364 aq->last_err_rc = status.response_code;
365 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
366 __func__, status.response_code,
367 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
368 return AP_SM_WAIT_NONE;
369 }
370}
371
372/**
373 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
374 * @aq: pointer to the AP queue
375 *
376 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
377 */
378static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
379{
380 struct ap_queue_status status;
381
382 if (aq->queue_count > 0 && aq->reply)
383 /* Try to read a completed message and get the status */
384 status = ap_sm_recv(aq);
385 else
386 /* Get the status with TAPQ */
387 status = ap_tapq(aq->qid, NULL);
388
389 if (status.irq_enabled == 1) {
390 /* Irqs are now enabled */
391 aq->sm_state = (aq->queue_count > 0) ?
392 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
393 }
394
395 switch (status.response_code) {
396 case AP_RESPONSE_NORMAL:
397 if (aq->queue_count > 0)
398 return AP_SM_WAIT_AGAIN;
399 fallthrough;
400 case AP_RESPONSE_NO_PENDING_REPLY:
401 return AP_SM_WAIT_LOW_TIMEOUT;
402 default:
403 aq->dev_state = AP_DEV_STATE_ERROR;
404 aq->last_err_rc = status.response_code;
405 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
406 __func__, status.response_code,
407 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
408 return AP_SM_WAIT_NONE;
409 }
410}
411
412/**
413 * ap_sm_assoc_wait(): Test queue for completion of a pending
414 * association request.
415 * @aq: pointer to the AP queue
416 */
417static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
418{
419 struct ap_queue_status status;
420 struct ap_tapq_hwinfo hwinfo;
421
422 status = ap_test_queue(aq->qid, 1, &hwinfo);
423 /* handle asynchronous error on this queue */
424 if (status.async && status.response_code) {
425 aq->dev_state = AP_DEV_STATE_ERROR;
426 aq->last_err_rc = status.response_code;
427 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
428 __func__, status.response_code,
429 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
430 return AP_SM_WAIT_NONE;
431 }
432 if (status.response_code > AP_RESPONSE_BUSY) {
433 aq->dev_state = AP_DEV_STATE_ERROR;
434 aq->last_err_rc = status.response_code;
435 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
436 __func__, status.response_code,
437 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
438 return AP_SM_WAIT_NONE;
439 }
440
441 /* update queue's SE bind state */
442 aq->se_bstate = hwinfo.bs;
443
444 /* check bs bits */
445 switch (hwinfo.bs) {
446 case AP_BS_Q_USABLE:
447 /* association is through */
448 aq->sm_state = AP_SM_STATE_IDLE;
449 AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
450 __func__, AP_QID_CARD(aq->qid),
451 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
452 return AP_SM_WAIT_NONE;
453 case AP_BS_Q_USABLE_NO_SECURE_KEY:
454 /* association still pending */
455 return AP_SM_WAIT_LOW_TIMEOUT;
456 default:
457 /* reset from 'outside' happened or no idea at all */
458 aq->assoc_idx = ASSOC_IDX_INVALID;
459 aq->dev_state = AP_DEV_STATE_ERROR;
460 aq->last_err_rc = status.response_code;
461 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
462 __func__, hwinfo.bs,
463 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
464 return AP_SM_WAIT_NONE;
465 }
466}
467
468/*
469 * AP state machine jump table
470 */
471static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
472 [AP_SM_STATE_RESET_START] = {
473 [AP_SM_EVENT_POLL] = ap_sm_reset,
474 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
475 },
476 [AP_SM_STATE_RESET_WAIT] = {
477 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
478 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
479 },
480 [AP_SM_STATE_SETIRQ_WAIT] = {
481 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
482 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
483 },
484 [AP_SM_STATE_IDLE] = {
485 [AP_SM_EVENT_POLL] = ap_sm_write,
486 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
487 },
488 [AP_SM_STATE_WORKING] = {
489 [AP_SM_EVENT_POLL] = ap_sm_read_write,
490 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
491 },
492 [AP_SM_STATE_QUEUE_FULL] = {
493 [AP_SM_EVENT_POLL] = ap_sm_read,
494 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
495 },
496 [AP_SM_STATE_ASSOC_WAIT] = {
497 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
498 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
499 },
500};
501
502enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
503{
504 if (aq->config && !aq->chkstop &&
505 aq->dev_state > AP_DEV_STATE_UNINITIATED)
506 return ap_jumptable[aq->sm_state][event](aq);
507 else
508 return AP_SM_WAIT_NONE;
509}
510
511enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
512{
513 enum ap_sm_wait wait;
514
515 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
516 ;
517 return wait;
518}
519
520/*
521 * AP queue related attributes.
522 */
523static ssize_t request_count_show(struct device *dev,
524 struct device_attribute *attr,
525 char *buf)
526{
527 struct ap_queue *aq = to_ap_queue(dev);
528 bool valid = false;
529 u64 req_cnt;
530
531 spin_lock_bh(&aq->lock);
532 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
533 req_cnt = aq->total_request_count;
534 valid = true;
535 }
536 spin_unlock_bh(&aq->lock);
537
538 if (valid)
539 return sysfs_emit(buf, "%llu\n", req_cnt);
540 else
541 return sysfs_emit(buf, "-\n");
542}
543
544static ssize_t request_count_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t count)
547{
548 struct ap_queue *aq = to_ap_queue(dev);
549
550 spin_lock_bh(&aq->lock);
551 aq->total_request_count = 0;
552 spin_unlock_bh(&aq->lock);
553
554 return count;
555}
556
557static DEVICE_ATTR_RW(request_count);
558
559static ssize_t requestq_count_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
561{
562 struct ap_queue *aq = to_ap_queue(dev);
563 unsigned int reqq_cnt = 0;
564
565 spin_lock_bh(&aq->lock);
566 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
567 reqq_cnt = aq->requestq_count;
568 spin_unlock_bh(&aq->lock);
569 return sysfs_emit(buf, "%d\n", reqq_cnt);
570}
571
572static DEVICE_ATTR_RO(requestq_count);
573
574static ssize_t pendingq_count_show(struct device *dev,
575 struct device_attribute *attr, char *buf)
576{
577 struct ap_queue *aq = to_ap_queue(dev);
578 unsigned int penq_cnt = 0;
579
580 spin_lock_bh(&aq->lock);
581 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
582 penq_cnt = aq->pendingq_count;
583 spin_unlock_bh(&aq->lock);
584 return sysfs_emit(buf, "%d\n", penq_cnt);
585}
586
587static DEVICE_ATTR_RO(pendingq_count);
588
589static ssize_t reset_show(struct device *dev,
590 struct device_attribute *attr, char *buf)
591{
592 struct ap_queue *aq = to_ap_queue(dev);
593 int rc = 0;
594
595 spin_lock_bh(&aq->lock);
596 switch (aq->sm_state) {
597 case AP_SM_STATE_RESET_START:
598 case AP_SM_STATE_RESET_WAIT:
599 rc = sysfs_emit(buf, "Reset in progress.\n");
600 break;
601 case AP_SM_STATE_WORKING:
602 case AP_SM_STATE_QUEUE_FULL:
603 rc = sysfs_emit(buf, "Reset Timer armed.\n");
604 break;
605 default:
606 rc = sysfs_emit(buf, "No Reset Timer set.\n");
607 }
608 spin_unlock_bh(&aq->lock);
609 return rc;
610}
611
612static ssize_t reset_store(struct device *dev,
613 struct device_attribute *attr,
614 const char *buf, size_t count)
615{
616 struct ap_queue *aq = to_ap_queue(dev);
617
618 spin_lock_bh(&aq->lock);
619 __ap_flush_queue(aq);
620 aq->sm_state = AP_SM_STATE_RESET_START;
621 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
622 spin_unlock_bh(&aq->lock);
623
624 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
625 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
626
627 return count;
628}
629
630static DEVICE_ATTR_RW(reset);
631
632static ssize_t interrupt_show(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 struct ap_queue *aq = to_ap_queue(dev);
636 struct ap_queue_status status;
637 int rc = 0;
638
639 spin_lock_bh(&aq->lock);
640 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
641 rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
642 } else {
643 status = ap_tapq(aq->qid, NULL);
644 if (status.irq_enabled)
645 rc = sysfs_emit(buf, "Interrupts enabled.\n");
646 else
647 rc = sysfs_emit(buf, "Interrupts disabled.\n");
648 }
649 spin_unlock_bh(&aq->lock);
650
651 return rc;
652}
653
654static DEVICE_ATTR_RO(interrupt);
655
656static ssize_t config_show(struct device *dev,
657 struct device_attribute *attr, char *buf)
658{
659 struct ap_queue *aq = to_ap_queue(dev);
660 int rc;
661
662 spin_lock_bh(&aq->lock);
663 rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
664 spin_unlock_bh(&aq->lock);
665 return rc;
666}
667
668static DEVICE_ATTR_RO(config);
669
670static ssize_t chkstop_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
672{
673 struct ap_queue *aq = to_ap_queue(dev);
674 int rc;
675
676 spin_lock_bh(&aq->lock);
677 rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
678 spin_unlock_bh(&aq->lock);
679 return rc;
680}
681
682static DEVICE_ATTR_RO(chkstop);
683
684static ssize_t ap_functions_show(struct device *dev,
685 struct device_attribute *attr, char *buf)
686{
687 struct ap_queue *aq = to_ap_queue(dev);
688 struct ap_queue_status status;
689 struct ap_tapq_hwinfo hwinfo;
690
691 status = ap_test_queue(aq->qid, 1, &hwinfo);
692 if (status.response_code > AP_RESPONSE_BUSY) {
693 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
694 __func__, status.response_code,
695 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
696 return -EIO;
697 }
698
699 return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
700}
701
702static DEVICE_ATTR_RO(ap_functions);
703
704#ifdef CONFIG_ZCRYPT_DEBUG
705static ssize_t states_show(struct device *dev,
706 struct device_attribute *attr, char *buf)
707{
708 struct ap_queue *aq = to_ap_queue(dev);
709 int rc = 0;
710
711 spin_lock_bh(&aq->lock);
712 /* queue device state */
713 switch (aq->dev_state) {
714 case AP_DEV_STATE_UNINITIATED:
715 rc = sysfs_emit(buf, "UNINITIATED\n");
716 break;
717 case AP_DEV_STATE_OPERATING:
718 rc = sysfs_emit(buf, "OPERATING");
719 break;
720 case AP_DEV_STATE_SHUTDOWN:
721 rc = sysfs_emit(buf, "SHUTDOWN");
722 break;
723 case AP_DEV_STATE_ERROR:
724 rc = sysfs_emit(buf, "ERROR");
725 break;
726 default:
727 rc = sysfs_emit(buf, "UNKNOWN");
728 }
729 /* state machine state */
730 if (aq->dev_state) {
731 switch (aq->sm_state) {
732 case AP_SM_STATE_RESET_START:
733 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
734 break;
735 case AP_SM_STATE_RESET_WAIT:
736 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
737 break;
738 case AP_SM_STATE_SETIRQ_WAIT:
739 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
740 break;
741 case AP_SM_STATE_IDLE:
742 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
743 break;
744 case AP_SM_STATE_WORKING:
745 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
746 break;
747 case AP_SM_STATE_QUEUE_FULL:
748 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
749 break;
750 case AP_SM_STATE_ASSOC_WAIT:
751 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
752 break;
753 default:
754 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
755 }
756 }
757 spin_unlock_bh(&aq->lock);
758
759 return rc;
760}
761static DEVICE_ATTR_RO(states);
762
763static ssize_t last_err_rc_show(struct device *dev,
764 struct device_attribute *attr, char *buf)
765{
766 struct ap_queue *aq = to_ap_queue(dev);
767 int rc;
768
769 spin_lock_bh(&aq->lock);
770 rc = aq->last_err_rc;
771 spin_unlock_bh(&aq->lock);
772
773 switch (rc) {
774 case AP_RESPONSE_NORMAL:
775 return sysfs_emit(buf, "NORMAL\n");
776 case AP_RESPONSE_Q_NOT_AVAIL:
777 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
778 case AP_RESPONSE_RESET_IN_PROGRESS:
779 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
780 case AP_RESPONSE_DECONFIGURED:
781 return sysfs_emit(buf, "DECONFIGURED\n");
782 case AP_RESPONSE_CHECKSTOPPED:
783 return sysfs_emit(buf, "CHECKSTOPPED\n");
784 case AP_RESPONSE_BUSY:
785 return sysfs_emit(buf, "BUSY\n");
786 case AP_RESPONSE_INVALID_ADDRESS:
787 return sysfs_emit(buf, "INVALID_ADDRESS\n");
788 case AP_RESPONSE_OTHERWISE_CHANGED:
789 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
790 case AP_RESPONSE_Q_FULL:
791 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
792 case AP_RESPONSE_INDEX_TOO_BIG:
793 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
794 case AP_RESPONSE_NO_FIRST_PART:
795 return sysfs_emit(buf, "NO_FIRST_PART\n");
796 case AP_RESPONSE_MESSAGE_TOO_BIG:
797 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
798 case AP_RESPONSE_REQ_FAC_NOT_INST:
799 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
800 default:
801 return sysfs_emit(buf, "response code %d\n", rc);
802 }
803}
804static DEVICE_ATTR_RO(last_err_rc);
805#endif
806
807static struct attribute *ap_queue_dev_attrs[] = {
808 &dev_attr_request_count.attr,
809 &dev_attr_requestq_count.attr,
810 &dev_attr_pendingq_count.attr,
811 &dev_attr_reset.attr,
812 &dev_attr_interrupt.attr,
813 &dev_attr_config.attr,
814 &dev_attr_chkstop.attr,
815 &dev_attr_ap_functions.attr,
816#ifdef CONFIG_ZCRYPT_DEBUG
817 &dev_attr_states.attr,
818 &dev_attr_last_err_rc.attr,
819#endif
820 NULL
821};
822
823static struct attribute_group ap_queue_dev_attr_group = {
824 .attrs = ap_queue_dev_attrs
825};
826
827static const struct attribute_group *ap_queue_dev_attr_groups[] = {
828 &ap_queue_dev_attr_group,
829 NULL
830};
831
832static struct device_type ap_queue_type = {
833 .name = "ap_queue",
834 .groups = ap_queue_dev_attr_groups,
835};
836
837static ssize_t se_bind_show(struct device *dev,
838 struct device_attribute *attr, char *buf)
839{
840 struct ap_queue *aq = to_ap_queue(dev);
841 struct ap_queue_status status;
842 struct ap_tapq_hwinfo hwinfo;
843
844 if (!ap_q_supports_bind(aq))
845 return sysfs_emit(buf, "-\n");
846
847 status = ap_test_queue(aq->qid, 1, &hwinfo);
848 if (status.response_code > AP_RESPONSE_BUSY) {
849 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
850 __func__, status.response_code,
851 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
852 return -EIO;
853 }
854
855 /* update queue's SE bind state */
856 spin_lock_bh(&aq->lock);
857 aq->se_bstate = hwinfo.bs;
858 spin_unlock_bh(&aq->lock);
859
860 switch (hwinfo.bs) {
861 case AP_BS_Q_USABLE:
862 case AP_BS_Q_USABLE_NO_SECURE_KEY:
863 return sysfs_emit(buf, "bound\n");
864 default:
865 return sysfs_emit(buf, "unbound\n");
866 }
867}
868
869static ssize_t se_bind_store(struct device *dev,
870 struct device_attribute *attr,
871 const char *buf, size_t count)
872{
873 struct ap_queue *aq = to_ap_queue(dev);
874 struct ap_queue_status status;
875 struct ap_tapq_hwinfo hwinfo;
876 bool value;
877 int rc;
878
879 if (!ap_q_supports_bind(aq))
880 return -EINVAL;
881
882 /* only 0 (unbind) and 1 (bind) allowed */
883 rc = kstrtobool(buf, &value);
884 if (rc)
885 return rc;
886
887 if (!value) {
888 /* Unbind. Set F bit arg and trigger RAPQ */
889 spin_lock_bh(&aq->lock);
890 __ap_flush_queue(aq);
891 aq->rapq_fbit = 1;
892 _ap_queue_init_state(aq);
893 rc = count;
894 goto out;
895 }
896
897 /* Bind. Check current SE bind state */
898 status = ap_test_queue(aq->qid, 1, &hwinfo);
899 if (status.response_code) {
900 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
901 __func__, status.response_code,
902 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
903 return -EIO;
904 }
905
906 /* Update BS state */
907 spin_lock_bh(&aq->lock);
908 aq->se_bstate = hwinfo.bs;
909 if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
910 AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
911 __func__, hwinfo.bs,
912 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
913 rc = -EINVAL;
914 goto out;
915 }
916
917 /* Check SM state */
918 if (aq->sm_state < AP_SM_STATE_IDLE) {
919 rc = -EBUSY;
920 goto out;
921 }
922
923 /* invoke BAPQ */
924 status = ap_bapq(aq->qid);
925 if (status.response_code) {
926 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
927 __func__, status.response_code,
928 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
929 rc = -EIO;
930 goto out;
931 }
932 aq->assoc_idx = ASSOC_IDX_INVALID;
933
934 /* verify SE bind state */
935 status = ap_test_queue(aq->qid, 1, &hwinfo);
936 if (status.response_code) {
937 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
938 __func__, status.response_code,
939 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
940 rc = -EIO;
941 goto out;
942 }
943 aq->se_bstate = hwinfo.bs;
944 if (!(hwinfo.bs == AP_BS_Q_USABLE ||
945 hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
946 AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
947 __func__, hwinfo.bs,
948 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
949 rc = -EIO;
950 goto out;
951 }
952
953 /* SE bind was successful */
954 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
955 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
956 rc = count;
957
958out:
959 spin_unlock_bh(&aq->lock);
960 return rc;
961}
962
963static DEVICE_ATTR_RW(se_bind);
964
965static ssize_t se_associate_show(struct device *dev,
966 struct device_attribute *attr, char *buf)
967{
968 struct ap_queue *aq = to_ap_queue(dev);
969 struct ap_queue_status status;
970 struct ap_tapq_hwinfo hwinfo;
971
972 if (!ap_q_supports_assoc(aq))
973 return sysfs_emit(buf, "-\n");
974
975 status = ap_test_queue(aq->qid, 1, &hwinfo);
976 if (status.response_code > AP_RESPONSE_BUSY) {
977 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
978 __func__, status.response_code,
979 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
980 return -EIO;
981 }
982
983 /* update queue's SE bind state */
984 spin_lock_bh(&aq->lock);
985 aq->se_bstate = hwinfo.bs;
986 spin_unlock_bh(&aq->lock);
987
988 switch (hwinfo.bs) {
989 case AP_BS_Q_USABLE:
990 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
991 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
992 return -EIO;
993 }
994 return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
995 case AP_BS_Q_USABLE_NO_SECURE_KEY:
996 if (aq->assoc_idx != ASSOC_IDX_INVALID)
997 return sysfs_emit(buf, "association pending\n");
998 fallthrough;
999 default:
1000 return sysfs_emit(buf, "unassociated\n");
1001 }
1002}
1003
1004static ssize_t se_associate_store(struct device *dev,
1005 struct device_attribute *attr,
1006 const char *buf, size_t count)
1007{
1008 struct ap_queue *aq = to_ap_queue(dev);
1009 struct ap_queue_status status;
1010 struct ap_tapq_hwinfo hwinfo;
1011 unsigned int value;
1012 int rc;
1013
1014 if (!ap_q_supports_assoc(aq))
1015 return -EINVAL;
1016
1017 /* association index needs to be >= 0 */
1018 rc = kstrtouint(buf, 0, &value);
1019 if (rc)
1020 return rc;
1021 if (value >= ASSOC_IDX_INVALID)
1022 return -EINVAL;
1023
1024 /* check current SE bind state */
1025 status = ap_test_queue(aq->qid, 1, &hwinfo);
1026 if (status.response_code) {
1027 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1028 __func__, status.response_code,
1029 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1030 return -EIO;
1031 }
1032 spin_lock_bh(&aq->lock);
1033 aq->se_bstate = hwinfo.bs;
1034 if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1035 AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1036 __func__, hwinfo.bs,
1037 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1038 rc = -EINVAL;
1039 goto out;
1040 }
1041
1042 /* check SM state */
1043 if (aq->sm_state != AP_SM_STATE_IDLE) {
1044 rc = -EBUSY;
1045 goto out;
1046 }
1047
1048 /* trigger the asynchronous association request */
1049 status = ap_aapq(aq->qid, value);
1050 switch (status.response_code) {
1051 case AP_RESPONSE_NORMAL:
1052 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1053 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1054 aq->assoc_idx = value;
1055 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1056 break;
1057 default:
1058 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1059 __func__, status.response_code,
1060 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1061 rc = -EIO;
1062 goto out;
1063 }
1064
1065 rc = count;
1066
1067out:
1068 spin_unlock_bh(&aq->lock);
1069 return rc;
1070}
1071
1072static DEVICE_ATTR_RW(se_associate);
1073
1074static struct attribute *ap_queue_dev_sb_attrs[] = {
1075 &dev_attr_se_bind.attr,
1076 &dev_attr_se_associate.attr,
1077 NULL
1078};
1079
1080static struct attribute_group ap_queue_dev_sb_attr_group = {
1081 .attrs = ap_queue_dev_sb_attrs
1082};
1083
1084static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1085 &ap_queue_dev_sb_attr_group,
1086 NULL
1087};
1088
1089static void ap_queue_device_release(struct device *dev)
1090{
1091 struct ap_queue *aq = to_ap_queue(dev);
1092
1093 spin_lock_bh(&ap_queues_lock);
1094 hash_del(&aq->hnode);
1095 spin_unlock_bh(&ap_queues_lock);
1096
1097 kfree(aq);
1098}
1099
1100struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1101{
1102 struct ap_queue *aq;
1103
1104 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1105 if (!aq)
1106 return NULL;
1107 aq->ap_dev.device.release = ap_queue_device_release;
1108 aq->ap_dev.device.type = &ap_queue_type;
1109 aq->ap_dev.device_type = device_type;
1110 // add optional SE secure binding attributes group
1111 if (ap_sb_available() && is_prot_virt_guest())
1112 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1113 aq->qid = qid;
1114 spin_lock_init(&aq->lock);
1115 INIT_LIST_HEAD(&aq->pendingq);
1116 INIT_LIST_HEAD(&aq->requestq);
1117 timer_setup(&aq->timeout, ap_request_timeout, 0);
1118
1119 return aq;
1120}
1121
1122void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1123{
1124 aq->reply = reply;
1125
1126 spin_lock_bh(&aq->lock);
1127 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1128 spin_unlock_bh(&aq->lock);
1129}
1130EXPORT_SYMBOL(ap_queue_init_reply);
1131
1132/**
1133 * ap_queue_message(): Queue a request to an AP device.
1134 * @aq: The AP device to queue the message to
1135 * @ap_msg: The message that is to be added
1136 */
1137int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1138{
1139 int rc = 0;
1140
1141 /* msg needs to have a valid receive-callback */
1142 BUG_ON(!ap_msg->receive);
1143
1144 spin_lock_bh(&aq->lock);
1145
1146 /* only allow to queue new messages if device state is ok */
1147 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1148 list_add_tail(&ap_msg->list, &aq->requestq);
1149 aq->requestq_count++;
1150 aq->total_request_count++;
1151 atomic64_inc(&aq->card->total_request_count);
1152 } else {
1153 rc = -ENODEV;
1154 }
1155
1156 /* Send/receive as many request from the queue as possible. */
1157 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1158
1159 spin_unlock_bh(&aq->lock);
1160
1161 return rc;
1162}
1163EXPORT_SYMBOL(ap_queue_message);
1164
1165/**
1166 * ap_queue_usable(): Check if queue is usable just now.
1167 * @aq: The AP queue device to test for usability.
1168 * This function is intended for the scheduler to query if it makes
1169 * sense to enqueue a message into this AP queue device by calling
1170 * ap_queue_message(). The perspective is very short-term as the
1171 * state machine and device state(s) may change at any time.
1172 */
1173bool ap_queue_usable(struct ap_queue *aq)
1174{
1175 bool rc = true;
1176
1177 spin_lock_bh(&aq->lock);
1178
1179 /* check for not configured or checkstopped */
1180 if (!aq->config || aq->chkstop) {
1181 rc = false;
1182 goto unlock_and_out;
1183 }
1184
1185 /* device state needs to be ok */
1186 if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1187 rc = false;
1188 goto unlock_and_out;
1189 }
1190
1191 /* SE guest's queues additionally need to be bound */
1192 if (ap_q_needs_bind(aq) &&
1193 !(aq->se_bstate == AP_BS_Q_USABLE ||
1194 aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1195 rc = false;
1196
1197unlock_and_out:
1198 spin_unlock_bh(&aq->lock);
1199 return rc;
1200}
1201EXPORT_SYMBOL(ap_queue_usable);
1202
1203/**
1204 * ap_cancel_message(): Cancel a crypto request.
1205 * @aq: The AP device that has the message queued
1206 * @ap_msg: The message that is to be removed
1207 *
1208 * Cancel a crypto request. This is done by removing the request
1209 * from the device pending or request queue. Note that the
1210 * request stays on the AP queue. When it finishes the message
1211 * reply will be discarded because the psmid can't be found.
1212 */
1213void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1214{
1215 struct ap_message *tmp;
1216
1217 spin_lock_bh(&aq->lock);
1218 if (!list_empty(&ap_msg->list)) {
1219 list_for_each_entry(tmp, &aq->pendingq, list)
1220 if (tmp->psmid == ap_msg->psmid) {
1221 aq->pendingq_count--;
1222 goto found;
1223 }
1224 aq->requestq_count--;
1225found:
1226 list_del_init(&ap_msg->list);
1227 }
1228 spin_unlock_bh(&aq->lock);
1229}
1230EXPORT_SYMBOL(ap_cancel_message);
1231
1232/**
1233 * __ap_flush_queue(): Flush requests.
1234 * @aq: Pointer to the AP queue
1235 *
1236 * Flush all requests from the request/pending queue of an AP device.
1237 */
1238static void __ap_flush_queue(struct ap_queue *aq)
1239{
1240 struct ap_message *ap_msg, *next;
1241
1242 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1243 list_del_init(&ap_msg->list);
1244 aq->pendingq_count--;
1245 ap_msg->rc = -EAGAIN;
1246 ap_msg->receive(aq, ap_msg, NULL);
1247 }
1248 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1249 list_del_init(&ap_msg->list);
1250 aq->requestq_count--;
1251 ap_msg->rc = -EAGAIN;
1252 ap_msg->receive(aq, ap_msg, NULL);
1253 }
1254 aq->queue_count = 0;
1255}
1256
1257void ap_flush_queue(struct ap_queue *aq)
1258{
1259 spin_lock_bh(&aq->lock);
1260 __ap_flush_queue(aq);
1261 spin_unlock_bh(&aq->lock);
1262}
1263EXPORT_SYMBOL(ap_flush_queue);
1264
1265void ap_queue_prepare_remove(struct ap_queue *aq)
1266{
1267 spin_lock_bh(&aq->lock);
1268 /* flush queue */
1269 __ap_flush_queue(aq);
1270 /* move queue device state to SHUTDOWN in progress */
1271 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1272 spin_unlock_bh(&aq->lock);
1273 del_timer_sync(&aq->timeout);
1274}
1275
1276void ap_queue_remove(struct ap_queue *aq)
1277{
1278 /*
1279 * all messages have been flushed and the device state
1280 * is SHUTDOWN. Now reset with zero which also clears
1281 * the irq registration and move the device state
1282 * to the initial value AP_DEV_STATE_UNINITIATED.
1283 */
1284 spin_lock_bh(&aq->lock);
1285 ap_zapq(aq->qid, 0);
1286 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1287 spin_unlock_bh(&aq->lock);
1288}
1289
1290void _ap_queue_init_state(struct ap_queue *aq)
1291{
1292 aq->dev_state = AP_DEV_STATE_OPERATING;
1293 aq->sm_state = AP_SM_STATE_RESET_START;
1294 aq->last_err_rc = 0;
1295 aq->assoc_idx = ASSOC_IDX_INVALID;
1296 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1297}
1298
1299void ap_queue_init_state(struct ap_queue *aq)
1300{
1301 spin_lock_bh(&aq->lock);
1302 _ap_queue_init_state(aq);
1303 spin_unlock_bh(&aq->lock);
1304}
1305EXPORT_SYMBOL(ap_queue_init_state);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2016, 2023
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9#define KMSG_COMPONENT "ap"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <asm/facility.h>
15
16#include "ap_bus.h"
17#include "ap_debug.h"
18
19static void __ap_flush_queue(struct ap_queue *aq);
20
21/*
22 * some AP queue helper functions
23 */
24
25static inline bool ap_q_supports_bind(struct ap_queue *aq)
26{
27 return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
28}
29
30static inline bool ap_q_supports_assoc(struct ap_queue *aq)
31{
32 return aq->card->hwinfo.ep11;
33}
34
35static inline bool ap_q_needs_bind(struct ap_queue *aq)
36{
37 return ap_q_supports_bind(aq) && ap_sb_available();
38}
39
40/**
41 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
42 * @aq: The AP queue
43 * @ind: the notification indicator byte
44 *
45 * Enables interruption on AP queue via ap_aqic(). Based on the return
46 * value it waits a while and tests the AP queue if interrupts
47 * have been switched on using ap_test_queue().
48 */
49static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
50{
51 union ap_qirq_ctrl qirqctrl = { .value = 0 };
52 struct ap_queue_status status;
53
54 qirqctrl.ir = 1;
55 qirqctrl.isc = AP_ISC;
56 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
57 if (status.async)
58 return -EPERM;
59 switch (status.response_code) {
60 case AP_RESPONSE_NORMAL:
61 case AP_RESPONSE_OTHERWISE_CHANGED:
62 return 0;
63 case AP_RESPONSE_Q_NOT_AVAIL:
64 case AP_RESPONSE_DECONFIGURED:
65 case AP_RESPONSE_CHECKSTOPPED:
66 case AP_RESPONSE_INVALID_ADDRESS:
67 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
68 AP_QID_CARD(aq->qid),
69 AP_QID_QUEUE(aq->qid));
70 return -EOPNOTSUPP;
71 case AP_RESPONSE_RESET_IN_PROGRESS:
72 case AP_RESPONSE_BUSY:
73 default:
74 return -EBUSY;
75 }
76}
77
78/**
79 * __ap_send(): Send message to adjunct processor queue.
80 * @qid: The AP queue number
81 * @psmid: The program supplied message identifier
82 * @msg: The message text
83 * @msglen: The message length
84 * @special: Special Bit
85 *
86 * Returns AP queue status structure.
87 * Condition code 1 on NQAP can't happen because the L bit is 1.
88 * Condition code 2 on NQAP also means the send is incomplete,
89 * because a segment boundary was reached. The NQAP is repeated.
90 */
91static inline struct ap_queue_status
92__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
93 int special)
94{
95 if (special)
96 qid |= 0x400000UL;
97 return ap_nqap(qid, psmid, msg, msglen);
98}
99
100/* State machine definitions and helpers */
101
102static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
103{
104 return AP_SM_WAIT_NONE;
105}
106
107/**
108 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
109 * not change the state of the device.
110 * @aq: pointer to the AP queue
111 *
112 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
113 */
114static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
115{
116 struct ap_queue_status status;
117 struct ap_message *ap_msg;
118 bool found = false;
119 size_t reslen;
120 unsigned long resgr0 = 0;
121 int parts = 0;
122
123 /*
124 * DQAP loop until response code and resgr0 indicate that
125 * the msg is totally received. As we use the very same buffer
126 * the msg is overwritten with each invocation. That's intended
127 * and the receiver of the msg is informed with a msg rc code
128 * of EMSGSIZE in such a case.
129 */
130 do {
131 status = ap_dqap(aq->qid, &aq->reply->psmid,
132 aq->reply->msg, aq->reply->bufsize,
133 &aq->reply->len, &reslen, &resgr0);
134 parts++;
135 } while (status.response_code == 0xFF && resgr0 != 0);
136
137 switch (status.response_code) {
138 case AP_RESPONSE_NORMAL:
139 print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
140 aq->reply->msg, aq->reply->len, false);
141 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
142 if (!status.queue_empty && !aq->queue_count)
143 aq->queue_count++;
144 if (aq->queue_count > 0)
145 mod_timer(&aq->timeout,
146 jiffies + aq->request_timeout);
147 list_for_each_entry(ap_msg, &aq->pendingq, list) {
148 if (ap_msg->psmid != aq->reply->psmid)
149 continue;
150 list_del_init(&ap_msg->list);
151 aq->pendingq_count--;
152 if (parts > 1) {
153 ap_msg->rc = -EMSGSIZE;
154 ap_msg->receive(aq, ap_msg, NULL);
155 } else {
156 ap_msg->receive(aq, ap_msg, aq->reply);
157 }
158 found = true;
159 break;
160 }
161 if (!found) {
162 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
163 __func__, aq->reply->psmid,
164 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
165 }
166 fallthrough;
167 case AP_RESPONSE_NO_PENDING_REPLY:
168 if (!status.queue_empty || aq->queue_count <= 0)
169 break;
170 /* The card shouldn't forget requests but who knows. */
171 aq->queue_count = 0;
172 list_splice_init(&aq->pendingq, &aq->requestq);
173 aq->requestq_count += aq->pendingq_count;
174 pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
175 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
176 aq->pendingq_count, aq->requestq_count);
177 aq->pendingq_count = 0;
178 break;
179 default:
180 break;
181 }
182 return status;
183}
184
185/**
186 * ap_sm_read(): Receive pending reply messages from an AP queue.
187 * @aq: pointer to the AP queue
188 *
189 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
190 */
191static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
192{
193 struct ap_queue_status status;
194
195 if (!aq->reply)
196 return AP_SM_WAIT_NONE;
197 status = ap_sm_recv(aq);
198 if (status.async)
199 return AP_SM_WAIT_NONE;
200 switch (status.response_code) {
201 case AP_RESPONSE_NORMAL:
202 if (aq->queue_count > 0) {
203 aq->sm_state = AP_SM_STATE_WORKING;
204 return AP_SM_WAIT_AGAIN;
205 }
206 aq->sm_state = AP_SM_STATE_IDLE;
207 break;
208 case AP_RESPONSE_NO_PENDING_REPLY:
209 if (aq->queue_count > 0)
210 return status.irq_enabled ?
211 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
212 aq->sm_state = AP_SM_STATE_IDLE;
213 break;
214 default:
215 aq->dev_state = AP_DEV_STATE_ERROR;
216 aq->last_err_rc = status.response_code;
217 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
218 __func__, status.response_code,
219 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
220 return AP_SM_WAIT_NONE;
221 }
222 /* Check and maybe enable irq support (again) on this queue */
223 if (!status.irq_enabled && status.queue_empty) {
224 void *lsi_ptr = ap_airq_ptr();
225
226 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
227 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
228 return AP_SM_WAIT_AGAIN;
229 }
230 }
231 return AP_SM_WAIT_NONE;
232}
233
234/**
235 * ap_sm_write(): Send messages from the request queue to an AP queue.
236 * @aq: pointer to the AP queue
237 *
238 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
239 */
240static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
241{
242 struct ap_queue_status status;
243 struct ap_message *ap_msg;
244 ap_qid_t qid = aq->qid;
245
246 if (aq->requestq_count <= 0)
247 return AP_SM_WAIT_NONE;
248
249 /* Start the next request on the queue. */
250 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
251 print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
252 ap_msg->msg, ap_msg->len, false);
253 status = __ap_send(qid, ap_msg->psmid,
254 ap_msg->msg, ap_msg->len,
255 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
256 if (status.async)
257 return AP_SM_WAIT_NONE;
258 switch (status.response_code) {
259 case AP_RESPONSE_NORMAL:
260 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
261 if (aq->queue_count == 1)
262 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
263 list_move_tail(&ap_msg->list, &aq->pendingq);
264 aq->requestq_count--;
265 aq->pendingq_count++;
266 if (aq->queue_count < aq->card->hwinfo.qd) {
267 aq->sm_state = AP_SM_STATE_WORKING;
268 return AP_SM_WAIT_AGAIN;
269 }
270 fallthrough;
271 case AP_RESPONSE_Q_FULL:
272 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
273 return status.irq_enabled ?
274 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
275 case AP_RESPONSE_RESET_IN_PROGRESS:
276 aq->sm_state = AP_SM_STATE_RESET_WAIT;
277 return AP_SM_WAIT_LOW_TIMEOUT;
278 case AP_RESPONSE_INVALID_DOMAIN:
279 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
280 fallthrough;
281 case AP_RESPONSE_MESSAGE_TOO_BIG:
282 case AP_RESPONSE_REQ_FAC_NOT_INST:
283 list_del_init(&ap_msg->list);
284 aq->requestq_count--;
285 ap_msg->rc = -EINVAL;
286 ap_msg->receive(aq, ap_msg, NULL);
287 return AP_SM_WAIT_AGAIN;
288 default:
289 aq->dev_state = AP_DEV_STATE_ERROR;
290 aq->last_err_rc = status.response_code;
291 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
292 __func__, status.response_code,
293 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
294 return AP_SM_WAIT_NONE;
295 }
296}
297
298/**
299 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
300 * @aq: pointer to the AP queue
301 *
302 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
303 */
304static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
305{
306 return min(ap_sm_read(aq), ap_sm_write(aq));
307}
308
309/**
310 * ap_sm_reset(): Reset an AP queue.
311 * @aq: The AP queue
312 *
313 * Submit the Reset command to an AP queue.
314 */
315static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
316{
317 struct ap_queue_status status;
318
319 status = ap_rapq(aq->qid, aq->rapq_fbit);
320 if (status.async)
321 return AP_SM_WAIT_NONE;
322 switch (status.response_code) {
323 case AP_RESPONSE_NORMAL:
324 case AP_RESPONSE_RESET_IN_PROGRESS:
325 aq->sm_state = AP_SM_STATE_RESET_WAIT;
326 aq->rapq_fbit = 0;
327 return AP_SM_WAIT_LOW_TIMEOUT;
328 default:
329 aq->dev_state = AP_DEV_STATE_ERROR;
330 aq->last_err_rc = status.response_code;
331 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
332 __func__, status.response_code,
333 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
334 return AP_SM_WAIT_NONE;
335 }
336}
337
338/**
339 * ap_sm_reset_wait(): Test queue for completion of the reset operation
340 * @aq: pointer to the AP queue
341 *
342 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
343 */
344static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
345{
346 struct ap_queue_status status;
347 struct ap_tapq_hwinfo hwinfo;
348 void *lsi_ptr;
349
350 /* Get the status with TAPQ */
351 status = ap_test_queue(aq->qid, 1, &hwinfo);
352
353 switch (status.response_code) {
354 case AP_RESPONSE_NORMAL:
355 aq->se_bstate = hwinfo.bs;
356 lsi_ptr = ap_airq_ptr();
357 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
358 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
359 else
360 aq->sm_state = (aq->queue_count > 0) ?
361 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
362 return AP_SM_WAIT_AGAIN;
363 case AP_RESPONSE_BUSY:
364 case AP_RESPONSE_RESET_IN_PROGRESS:
365 return AP_SM_WAIT_LOW_TIMEOUT;
366 case AP_RESPONSE_Q_NOT_AVAIL:
367 case AP_RESPONSE_DECONFIGURED:
368 case AP_RESPONSE_CHECKSTOPPED:
369 default:
370 aq->dev_state = AP_DEV_STATE_ERROR;
371 aq->last_err_rc = status.response_code;
372 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
373 __func__, status.response_code,
374 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
375 return AP_SM_WAIT_NONE;
376 }
377}
378
379/**
380 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
381 * @aq: pointer to the AP queue
382 *
383 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
384 */
385static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
386{
387 struct ap_queue_status status;
388
389 if (aq->queue_count > 0 && aq->reply)
390 /* Try to read a completed message and get the status */
391 status = ap_sm_recv(aq);
392 else
393 /* Get the status with TAPQ */
394 status = ap_tapq(aq->qid, NULL);
395
396 if (status.irq_enabled == 1) {
397 /* Irqs are now enabled */
398 aq->sm_state = (aq->queue_count > 0) ?
399 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
400 }
401
402 switch (status.response_code) {
403 case AP_RESPONSE_NORMAL:
404 if (aq->queue_count > 0)
405 return AP_SM_WAIT_AGAIN;
406 fallthrough;
407 case AP_RESPONSE_NO_PENDING_REPLY:
408 return AP_SM_WAIT_LOW_TIMEOUT;
409 default:
410 aq->dev_state = AP_DEV_STATE_ERROR;
411 aq->last_err_rc = status.response_code;
412 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
413 __func__, status.response_code,
414 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
415 return AP_SM_WAIT_NONE;
416 }
417}
418
419/**
420 * ap_sm_assoc_wait(): Test queue for completion of a pending
421 * association request.
422 * @aq: pointer to the AP queue
423 */
424static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
425{
426 struct ap_queue_status status;
427 struct ap_tapq_hwinfo hwinfo;
428
429 status = ap_test_queue(aq->qid, 1, &hwinfo);
430 /* handle asynchronous error on this queue */
431 if (status.async && status.response_code) {
432 aq->dev_state = AP_DEV_STATE_ERROR;
433 aq->last_err_rc = status.response_code;
434 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
435 __func__, status.response_code,
436 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
437 return AP_SM_WAIT_NONE;
438 }
439 if (status.response_code > AP_RESPONSE_BUSY) {
440 aq->dev_state = AP_DEV_STATE_ERROR;
441 aq->last_err_rc = status.response_code;
442 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
443 __func__, status.response_code,
444 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
445 return AP_SM_WAIT_NONE;
446 }
447
448 /* update queue's SE bind state */
449 aq->se_bstate = hwinfo.bs;
450
451 /* check bs bits */
452 switch (hwinfo.bs) {
453 case AP_BS_Q_USABLE:
454 /* association is through */
455 aq->sm_state = AP_SM_STATE_IDLE;
456 pr_debug("%s queue 0x%02x.%04x associated with %u\n",
457 __func__, AP_QID_CARD(aq->qid),
458 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
459 return AP_SM_WAIT_NONE;
460 case AP_BS_Q_USABLE_NO_SECURE_KEY:
461 /* association still pending */
462 return AP_SM_WAIT_LOW_TIMEOUT;
463 default:
464 /* reset from 'outside' happened or no idea at all */
465 aq->assoc_idx = ASSOC_IDX_INVALID;
466 aq->dev_state = AP_DEV_STATE_ERROR;
467 aq->last_err_rc = status.response_code;
468 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
469 __func__, hwinfo.bs,
470 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
471 return AP_SM_WAIT_NONE;
472 }
473}
474
475/*
476 * AP state machine jump table
477 */
478static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
479 [AP_SM_STATE_RESET_START] = {
480 [AP_SM_EVENT_POLL] = ap_sm_reset,
481 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
482 },
483 [AP_SM_STATE_RESET_WAIT] = {
484 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
485 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
486 },
487 [AP_SM_STATE_SETIRQ_WAIT] = {
488 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
489 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
490 },
491 [AP_SM_STATE_IDLE] = {
492 [AP_SM_EVENT_POLL] = ap_sm_write,
493 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
494 },
495 [AP_SM_STATE_WORKING] = {
496 [AP_SM_EVENT_POLL] = ap_sm_read_write,
497 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
498 },
499 [AP_SM_STATE_QUEUE_FULL] = {
500 [AP_SM_EVENT_POLL] = ap_sm_read,
501 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
502 },
503 [AP_SM_STATE_ASSOC_WAIT] = {
504 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
505 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
506 },
507};
508
509enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
510{
511 if (aq->config && !aq->chkstop &&
512 aq->dev_state > AP_DEV_STATE_UNINITIATED)
513 return ap_jumptable[aq->sm_state][event](aq);
514 else
515 return AP_SM_WAIT_NONE;
516}
517
518enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
519{
520 enum ap_sm_wait wait;
521
522 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
523 ;
524 return wait;
525}
526
527/*
528 * AP queue related attributes.
529 */
530static ssize_t request_count_show(struct device *dev,
531 struct device_attribute *attr,
532 char *buf)
533{
534 struct ap_queue *aq = to_ap_queue(dev);
535 bool valid = false;
536 u64 req_cnt;
537
538 spin_lock_bh(&aq->lock);
539 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
540 req_cnt = aq->total_request_count;
541 valid = true;
542 }
543 spin_unlock_bh(&aq->lock);
544
545 if (valid)
546 return sysfs_emit(buf, "%llu\n", req_cnt);
547 else
548 return sysfs_emit(buf, "-\n");
549}
550
551static ssize_t request_count_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t count)
554{
555 struct ap_queue *aq = to_ap_queue(dev);
556
557 spin_lock_bh(&aq->lock);
558 aq->total_request_count = 0;
559 spin_unlock_bh(&aq->lock);
560
561 return count;
562}
563
564static DEVICE_ATTR_RW(request_count);
565
566static ssize_t requestq_count_show(struct device *dev,
567 struct device_attribute *attr, char *buf)
568{
569 struct ap_queue *aq = to_ap_queue(dev);
570 unsigned int reqq_cnt = 0;
571
572 spin_lock_bh(&aq->lock);
573 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
574 reqq_cnt = aq->requestq_count;
575 spin_unlock_bh(&aq->lock);
576 return sysfs_emit(buf, "%d\n", reqq_cnt);
577}
578
579static DEVICE_ATTR_RO(requestq_count);
580
581static ssize_t pendingq_count_show(struct device *dev,
582 struct device_attribute *attr, char *buf)
583{
584 struct ap_queue *aq = to_ap_queue(dev);
585 unsigned int penq_cnt = 0;
586
587 spin_lock_bh(&aq->lock);
588 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
589 penq_cnt = aq->pendingq_count;
590 spin_unlock_bh(&aq->lock);
591 return sysfs_emit(buf, "%d\n", penq_cnt);
592}
593
594static DEVICE_ATTR_RO(pendingq_count);
595
596static ssize_t reset_show(struct device *dev,
597 struct device_attribute *attr, char *buf)
598{
599 struct ap_queue *aq = to_ap_queue(dev);
600 int rc = 0;
601
602 spin_lock_bh(&aq->lock);
603 switch (aq->sm_state) {
604 case AP_SM_STATE_RESET_START:
605 case AP_SM_STATE_RESET_WAIT:
606 rc = sysfs_emit(buf, "Reset in progress.\n");
607 break;
608 case AP_SM_STATE_WORKING:
609 case AP_SM_STATE_QUEUE_FULL:
610 rc = sysfs_emit(buf, "Reset Timer armed.\n");
611 break;
612 default:
613 rc = sysfs_emit(buf, "No Reset Timer set.\n");
614 }
615 spin_unlock_bh(&aq->lock);
616 return rc;
617}
618
619static ssize_t reset_store(struct device *dev,
620 struct device_attribute *attr,
621 const char *buf, size_t count)
622{
623 struct ap_queue *aq = to_ap_queue(dev);
624
625 spin_lock_bh(&aq->lock);
626 __ap_flush_queue(aq);
627 aq->sm_state = AP_SM_STATE_RESET_START;
628 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
629 spin_unlock_bh(&aq->lock);
630
631 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
632 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
633
634 return count;
635}
636
637static DEVICE_ATTR_RW(reset);
638
639static ssize_t interrupt_show(struct device *dev,
640 struct device_attribute *attr, char *buf)
641{
642 struct ap_queue *aq = to_ap_queue(dev);
643 struct ap_queue_status status;
644 int rc = 0;
645
646 spin_lock_bh(&aq->lock);
647 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
648 rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
649 } else {
650 status = ap_tapq(aq->qid, NULL);
651 if (status.irq_enabled)
652 rc = sysfs_emit(buf, "Interrupts enabled.\n");
653 else
654 rc = sysfs_emit(buf, "Interrupts disabled.\n");
655 }
656 spin_unlock_bh(&aq->lock);
657
658 return rc;
659}
660
661static DEVICE_ATTR_RO(interrupt);
662
663static ssize_t config_show(struct device *dev,
664 struct device_attribute *attr, char *buf)
665{
666 struct ap_queue *aq = to_ap_queue(dev);
667 int rc;
668
669 spin_lock_bh(&aq->lock);
670 rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
671 spin_unlock_bh(&aq->lock);
672 return rc;
673}
674
675static DEVICE_ATTR_RO(config);
676
677static ssize_t chkstop_show(struct device *dev,
678 struct device_attribute *attr, char *buf)
679{
680 struct ap_queue *aq = to_ap_queue(dev);
681 int rc;
682
683 spin_lock_bh(&aq->lock);
684 rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
685 spin_unlock_bh(&aq->lock);
686 return rc;
687}
688
689static DEVICE_ATTR_RO(chkstop);
690
691static ssize_t ap_functions_show(struct device *dev,
692 struct device_attribute *attr, char *buf)
693{
694 struct ap_queue *aq = to_ap_queue(dev);
695 struct ap_queue_status status;
696 struct ap_tapq_hwinfo hwinfo;
697
698 status = ap_test_queue(aq->qid, 1, &hwinfo);
699 if (status.response_code > AP_RESPONSE_BUSY) {
700 pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
701 __func__, status.response_code,
702 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
703 return -EIO;
704 }
705
706 return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
707}
708
709static DEVICE_ATTR_RO(ap_functions);
710
711#ifdef CONFIG_ZCRYPT_DEBUG
712static ssize_t states_show(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 struct ap_queue *aq = to_ap_queue(dev);
716 int rc = 0;
717
718 spin_lock_bh(&aq->lock);
719 /* queue device state */
720 switch (aq->dev_state) {
721 case AP_DEV_STATE_UNINITIATED:
722 rc = sysfs_emit(buf, "UNINITIATED\n");
723 break;
724 case AP_DEV_STATE_OPERATING:
725 rc = sysfs_emit(buf, "OPERATING");
726 break;
727 case AP_DEV_STATE_SHUTDOWN:
728 rc = sysfs_emit(buf, "SHUTDOWN");
729 break;
730 case AP_DEV_STATE_ERROR:
731 rc = sysfs_emit(buf, "ERROR");
732 break;
733 default:
734 rc = sysfs_emit(buf, "UNKNOWN");
735 }
736 /* state machine state */
737 if (aq->dev_state) {
738 switch (aq->sm_state) {
739 case AP_SM_STATE_RESET_START:
740 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
741 break;
742 case AP_SM_STATE_RESET_WAIT:
743 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
744 break;
745 case AP_SM_STATE_SETIRQ_WAIT:
746 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
747 break;
748 case AP_SM_STATE_IDLE:
749 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
750 break;
751 case AP_SM_STATE_WORKING:
752 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
753 break;
754 case AP_SM_STATE_QUEUE_FULL:
755 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
756 break;
757 case AP_SM_STATE_ASSOC_WAIT:
758 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
759 break;
760 default:
761 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
762 }
763 }
764 spin_unlock_bh(&aq->lock);
765
766 return rc;
767}
768static DEVICE_ATTR_RO(states);
769
770static ssize_t last_err_rc_show(struct device *dev,
771 struct device_attribute *attr, char *buf)
772{
773 struct ap_queue *aq = to_ap_queue(dev);
774 int rc;
775
776 spin_lock_bh(&aq->lock);
777 rc = aq->last_err_rc;
778 spin_unlock_bh(&aq->lock);
779
780 switch (rc) {
781 case AP_RESPONSE_NORMAL:
782 return sysfs_emit(buf, "NORMAL\n");
783 case AP_RESPONSE_Q_NOT_AVAIL:
784 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
785 case AP_RESPONSE_RESET_IN_PROGRESS:
786 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
787 case AP_RESPONSE_DECONFIGURED:
788 return sysfs_emit(buf, "DECONFIGURED\n");
789 case AP_RESPONSE_CHECKSTOPPED:
790 return sysfs_emit(buf, "CHECKSTOPPED\n");
791 case AP_RESPONSE_BUSY:
792 return sysfs_emit(buf, "BUSY\n");
793 case AP_RESPONSE_INVALID_ADDRESS:
794 return sysfs_emit(buf, "INVALID_ADDRESS\n");
795 case AP_RESPONSE_OTHERWISE_CHANGED:
796 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
797 case AP_RESPONSE_Q_FULL:
798 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
799 case AP_RESPONSE_INDEX_TOO_BIG:
800 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
801 case AP_RESPONSE_NO_FIRST_PART:
802 return sysfs_emit(buf, "NO_FIRST_PART\n");
803 case AP_RESPONSE_MESSAGE_TOO_BIG:
804 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
805 case AP_RESPONSE_REQ_FAC_NOT_INST:
806 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
807 default:
808 return sysfs_emit(buf, "response code %d\n", rc);
809 }
810}
811static DEVICE_ATTR_RO(last_err_rc);
812#endif
813
814static struct attribute *ap_queue_dev_attrs[] = {
815 &dev_attr_request_count.attr,
816 &dev_attr_requestq_count.attr,
817 &dev_attr_pendingq_count.attr,
818 &dev_attr_reset.attr,
819 &dev_attr_interrupt.attr,
820 &dev_attr_config.attr,
821 &dev_attr_chkstop.attr,
822 &dev_attr_ap_functions.attr,
823#ifdef CONFIG_ZCRYPT_DEBUG
824 &dev_attr_states.attr,
825 &dev_attr_last_err_rc.attr,
826#endif
827 NULL
828};
829
830static struct attribute_group ap_queue_dev_attr_group = {
831 .attrs = ap_queue_dev_attrs
832};
833
834static const struct attribute_group *ap_queue_dev_attr_groups[] = {
835 &ap_queue_dev_attr_group,
836 NULL
837};
838
839static struct device_type ap_queue_type = {
840 .name = "ap_queue",
841 .groups = ap_queue_dev_attr_groups,
842};
843
844static ssize_t se_bind_show(struct device *dev,
845 struct device_attribute *attr, char *buf)
846{
847 struct ap_queue *aq = to_ap_queue(dev);
848 struct ap_queue_status status;
849 struct ap_tapq_hwinfo hwinfo;
850
851 if (!ap_q_supports_bind(aq))
852 return sysfs_emit(buf, "-\n");
853
854 status = ap_test_queue(aq->qid, 1, &hwinfo);
855 if (status.response_code > AP_RESPONSE_BUSY) {
856 pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
857 __func__, status.response_code,
858 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
859 return -EIO;
860 }
861
862 /* update queue's SE bind state */
863 spin_lock_bh(&aq->lock);
864 aq->se_bstate = hwinfo.bs;
865 spin_unlock_bh(&aq->lock);
866
867 switch (hwinfo.bs) {
868 case AP_BS_Q_USABLE:
869 case AP_BS_Q_USABLE_NO_SECURE_KEY:
870 return sysfs_emit(buf, "bound\n");
871 default:
872 return sysfs_emit(buf, "unbound\n");
873 }
874}
875
876static ssize_t se_bind_store(struct device *dev,
877 struct device_attribute *attr,
878 const char *buf, size_t count)
879{
880 struct ap_queue *aq = to_ap_queue(dev);
881 struct ap_queue_status status;
882 struct ap_tapq_hwinfo hwinfo;
883 bool value;
884 int rc;
885
886 if (!ap_q_supports_bind(aq))
887 return -EINVAL;
888
889 /* only 0 (unbind) and 1 (bind) allowed */
890 rc = kstrtobool(buf, &value);
891 if (rc)
892 return rc;
893
894 if (!value) {
895 /* Unbind. Set F bit arg and trigger RAPQ */
896 spin_lock_bh(&aq->lock);
897 __ap_flush_queue(aq);
898 aq->rapq_fbit = 1;
899 _ap_queue_init_state(aq);
900 rc = count;
901 goto out;
902 }
903
904 /* Bind. Check current SE bind state */
905 status = ap_test_queue(aq->qid, 1, &hwinfo);
906 if (status.response_code) {
907 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
908 __func__, status.response_code,
909 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
910 return -EIO;
911 }
912
913 /* Update BS state */
914 spin_lock_bh(&aq->lock);
915 aq->se_bstate = hwinfo.bs;
916 if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
917 AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
918 __func__, hwinfo.bs,
919 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
920 rc = -EINVAL;
921 goto out;
922 }
923
924 /* Check SM state */
925 if (aq->sm_state < AP_SM_STATE_IDLE) {
926 rc = -EBUSY;
927 goto out;
928 }
929
930 /* invoke BAPQ */
931 status = ap_bapq(aq->qid);
932 if (status.response_code) {
933 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
934 __func__, status.response_code,
935 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
936 rc = -EIO;
937 goto out;
938 }
939 aq->assoc_idx = ASSOC_IDX_INVALID;
940
941 /* verify SE bind state */
942 status = ap_test_queue(aq->qid, 1, &hwinfo);
943 if (status.response_code) {
944 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
945 __func__, status.response_code,
946 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
947 rc = -EIO;
948 goto out;
949 }
950 aq->se_bstate = hwinfo.bs;
951 if (!(hwinfo.bs == AP_BS_Q_USABLE ||
952 hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
953 AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
954 __func__, hwinfo.bs,
955 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
956 rc = -EIO;
957 goto out;
958 }
959
960 /* SE bind was successful */
961 AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
962 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
963 rc = count;
964
965out:
966 spin_unlock_bh(&aq->lock);
967 return rc;
968}
969
970static DEVICE_ATTR_RW(se_bind);
971
972static ssize_t se_associate_show(struct device *dev,
973 struct device_attribute *attr, char *buf)
974{
975 struct ap_queue *aq = to_ap_queue(dev);
976 struct ap_queue_status status;
977 struct ap_tapq_hwinfo hwinfo;
978
979 if (!ap_q_supports_assoc(aq))
980 return sysfs_emit(buf, "-\n");
981
982 status = ap_test_queue(aq->qid, 1, &hwinfo);
983 if (status.response_code > AP_RESPONSE_BUSY) {
984 pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
985 __func__, status.response_code,
986 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
987 return -EIO;
988 }
989
990 /* update queue's SE bind state */
991 spin_lock_bh(&aq->lock);
992 aq->se_bstate = hwinfo.bs;
993 spin_unlock_bh(&aq->lock);
994
995 switch (hwinfo.bs) {
996 case AP_BS_Q_USABLE:
997 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
998 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
999 return -EIO;
1000 }
1001 return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
1002 case AP_BS_Q_USABLE_NO_SECURE_KEY:
1003 if (aq->assoc_idx != ASSOC_IDX_INVALID)
1004 return sysfs_emit(buf, "association pending\n");
1005 fallthrough;
1006 default:
1007 return sysfs_emit(buf, "unassociated\n");
1008 }
1009}
1010
1011static ssize_t se_associate_store(struct device *dev,
1012 struct device_attribute *attr,
1013 const char *buf, size_t count)
1014{
1015 struct ap_queue *aq = to_ap_queue(dev);
1016 struct ap_queue_status status;
1017 struct ap_tapq_hwinfo hwinfo;
1018 unsigned int value;
1019 int rc;
1020
1021 if (!ap_q_supports_assoc(aq))
1022 return -EINVAL;
1023
1024 /* association index needs to be >= 0 */
1025 rc = kstrtouint(buf, 0, &value);
1026 if (rc)
1027 return rc;
1028 if (value >= ASSOC_IDX_INVALID)
1029 return -EINVAL;
1030
1031 /* check current SE bind state */
1032 status = ap_test_queue(aq->qid, 1, &hwinfo);
1033 if (status.response_code) {
1034 AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
1035 __func__, status.response_code,
1036 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1037 return -EIO;
1038 }
1039 spin_lock_bh(&aq->lock);
1040 aq->se_bstate = hwinfo.bs;
1041 if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
1042 AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
1043 __func__, hwinfo.bs,
1044 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1045 rc = -EINVAL;
1046 goto out;
1047 }
1048
1049 /* check SM state */
1050 if (aq->sm_state != AP_SM_STATE_IDLE) {
1051 rc = -EBUSY;
1052 goto out;
1053 }
1054
1055 /* trigger the asynchronous association request */
1056 status = ap_aapq(aq->qid, value);
1057 switch (status.response_code) {
1058 case AP_RESPONSE_NORMAL:
1059 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1060 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1061 aq->assoc_idx = value;
1062 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1063 break;
1064 default:
1065 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1066 __func__, status.response_code,
1067 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1068 rc = -EIO;
1069 goto out;
1070 }
1071
1072 rc = count;
1073
1074out:
1075 spin_unlock_bh(&aq->lock);
1076 return rc;
1077}
1078
1079static DEVICE_ATTR_RW(se_associate);
1080
1081static struct attribute *ap_queue_dev_sb_attrs[] = {
1082 &dev_attr_se_bind.attr,
1083 &dev_attr_se_associate.attr,
1084 NULL
1085};
1086
1087static struct attribute_group ap_queue_dev_sb_attr_group = {
1088 .attrs = ap_queue_dev_sb_attrs
1089};
1090
1091static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1092 &ap_queue_dev_sb_attr_group,
1093 NULL
1094};
1095
1096static void ap_queue_device_release(struct device *dev)
1097{
1098 struct ap_queue *aq = to_ap_queue(dev);
1099
1100 spin_lock_bh(&ap_queues_lock);
1101 hash_del(&aq->hnode);
1102 spin_unlock_bh(&ap_queues_lock);
1103
1104 kfree(aq);
1105}
1106
1107struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1108{
1109 struct ap_queue *aq;
1110
1111 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1112 if (!aq)
1113 return NULL;
1114 aq->ap_dev.device.release = ap_queue_device_release;
1115 aq->ap_dev.device.type = &ap_queue_type;
1116 aq->ap_dev.device_type = device_type;
1117 // add optional SE secure binding attributes group
1118 if (ap_sb_available() && is_prot_virt_guest())
1119 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1120 aq->qid = qid;
1121 spin_lock_init(&aq->lock);
1122 INIT_LIST_HEAD(&aq->pendingq);
1123 INIT_LIST_HEAD(&aq->requestq);
1124 timer_setup(&aq->timeout, ap_request_timeout, 0);
1125
1126 return aq;
1127}
1128
1129void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1130{
1131 aq->reply = reply;
1132
1133 spin_lock_bh(&aq->lock);
1134 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1135 spin_unlock_bh(&aq->lock);
1136}
1137EXPORT_SYMBOL(ap_queue_init_reply);
1138
1139/**
1140 * ap_queue_message(): Queue a request to an AP device.
1141 * @aq: The AP device to queue the message to
1142 * @ap_msg: The message that is to be added
1143 */
1144int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1145{
1146 int rc = 0;
1147
1148 /* msg needs to have a valid receive-callback */
1149 BUG_ON(!ap_msg->receive);
1150
1151 spin_lock_bh(&aq->lock);
1152
1153 /* only allow to queue new messages if device state is ok */
1154 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1155 list_add_tail(&ap_msg->list, &aq->requestq);
1156 aq->requestq_count++;
1157 aq->total_request_count++;
1158 atomic64_inc(&aq->card->total_request_count);
1159 } else {
1160 rc = -ENODEV;
1161 }
1162
1163 /* Send/receive as many request from the queue as possible. */
1164 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1165
1166 spin_unlock_bh(&aq->lock);
1167
1168 return rc;
1169}
1170EXPORT_SYMBOL(ap_queue_message);
1171
1172/**
1173 * ap_queue_usable(): Check if queue is usable just now.
1174 * @aq: The AP queue device to test for usability.
1175 * This function is intended for the scheduler to query if it makes
1176 * sense to enqueue a message into this AP queue device by calling
1177 * ap_queue_message(). The perspective is very short-term as the
1178 * state machine and device state(s) may change at any time.
1179 */
1180bool ap_queue_usable(struct ap_queue *aq)
1181{
1182 bool rc = true;
1183
1184 spin_lock_bh(&aq->lock);
1185
1186 /* check for not configured or checkstopped */
1187 if (!aq->config || aq->chkstop) {
1188 rc = false;
1189 goto unlock_and_out;
1190 }
1191
1192 /* device state needs to be ok */
1193 if (aq->dev_state != AP_DEV_STATE_OPERATING) {
1194 rc = false;
1195 goto unlock_and_out;
1196 }
1197
1198 /* SE guest's queues additionally need to be bound */
1199 if (ap_q_needs_bind(aq) &&
1200 !(aq->se_bstate == AP_BS_Q_USABLE ||
1201 aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
1202 rc = false;
1203
1204unlock_and_out:
1205 spin_unlock_bh(&aq->lock);
1206 return rc;
1207}
1208EXPORT_SYMBOL(ap_queue_usable);
1209
1210/**
1211 * ap_cancel_message(): Cancel a crypto request.
1212 * @aq: The AP device that has the message queued
1213 * @ap_msg: The message that is to be removed
1214 *
1215 * Cancel a crypto request. This is done by removing the request
1216 * from the device pending or request queue. Note that the
1217 * request stays on the AP queue. When it finishes the message
1218 * reply will be discarded because the psmid can't be found.
1219 */
1220void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1221{
1222 struct ap_message *tmp;
1223
1224 spin_lock_bh(&aq->lock);
1225 if (!list_empty(&ap_msg->list)) {
1226 list_for_each_entry(tmp, &aq->pendingq, list)
1227 if (tmp->psmid == ap_msg->psmid) {
1228 aq->pendingq_count--;
1229 goto found;
1230 }
1231 aq->requestq_count--;
1232found:
1233 list_del_init(&ap_msg->list);
1234 }
1235 spin_unlock_bh(&aq->lock);
1236}
1237EXPORT_SYMBOL(ap_cancel_message);
1238
1239/**
1240 * __ap_flush_queue(): Flush requests.
1241 * @aq: Pointer to the AP queue
1242 *
1243 * Flush all requests from the request/pending queue of an AP device.
1244 */
1245static void __ap_flush_queue(struct ap_queue *aq)
1246{
1247 struct ap_message *ap_msg, *next;
1248
1249 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1250 list_del_init(&ap_msg->list);
1251 aq->pendingq_count--;
1252 ap_msg->rc = -EAGAIN;
1253 ap_msg->receive(aq, ap_msg, NULL);
1254 }
1255 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1256 list_del_init(&ap_msg->list);
1257 aq->requestq_count--;
1258 ap_msg->rc = -EAGAIN;
1259 ap_msg->receive(aq, ap_msg, NULL);
1260 }
1261 aq->queue_count = 0;
1262}
1263
1264void ap_flush_queue(struct ap_queue *aq)
1265{
1266 spin_lock_bh(&aq->lock);
1267 __ap_flush_queue(aq);
1268 spin_unlock_bh(&aq->lock);
1269}
1270EXPORT_SYMBOL(ap_flush_queue);
1271
1272void ap_queue_prepare_remove(struct ap_queue *aq)
1273{
1274 spin_lock_bh(&aq->lock);
1275 /* flush queue */
1276 __ap_flush_queue(aq);
1277 /* move queue device state to SHUTDOWN in progress */
1278 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1279 spin_unlock_bh(&aq->lock);
1280 del_timer_sync(&aq->timeout);
1281}
1282
1283void ap_queue_remove(struct ap_queue *aq)
1284{
1285 /*
1286 * all messages have been flushed and the device state
1287 * is SHUTDOWN. Now reset with zero which also clears
1288 * the irq registration and move the device state
1289 * to the initial value AP_DEV_STATE_UNINITIATED.
1290 */
1291 spin_lock_bh(&aq->lock);
1292 ap_zapq(aq->qid, 0);
1293 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1294 spin_unlock_bh(&aq->lock);
1295}
1296
1297void _ap_queue_init_state(struct ap_queue *aq)
1298{
1299 aq->dev_state = AP_DEV_STATE_OPERATING;
1300 aq->sm_state = AP_SM_STATE_RESET_START;
1301 aq->last_err_rc = 0;
1302 aq->assoc_idx = ASSOC_IDX_INVALID;
1303 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1304}
1305
1306void ap_queue_init_state(struct ap_queue *aq)
1307{
1308 spin_lock_bh(&aq->lock);
1309 _ap_queue_init_state(aq);
1310 spin_unlock_bh(&aq->lock);
1311}
1312EXPORT_SYMBOL(ap_queue_init_state);