Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Adjunct processor matrix VFIO device driver callbacks.
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
8 * Halil Pasic <pasic@linux.ibm.com>
9 * Pierre Morel <pmorel@linux.ibm.com>
10 */
11#include <linux/string.h>
12#include <linux/vfio.h>
13#include <linux/device.h>
14#include <linux/list.h>
15#include <linux/ctype.h>
16#include <linux/bitops.h>
17#include <linux/kvm_host.h>
18#include <linux/module.h>
19#include <linux/uuid.h>
20#include <asm/kvm.h>
21#include <asm/zcrypt.h>
22
23#include "vfio_ap_private.h"
24#include "vfio_ap_debug.h"
25
26#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
27#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
28
29#define AP_QUEUE_ASSIGNED "assigned"
30#define AP_QUEUE_UNASSIGNED "unassigned"
31#define AP_QUEUE_IN_USE "in use"
32
33#define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
34
35static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
36static int vfio_ap_mdev_reset_qlist(struct list_head *qlist);
37static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
38static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
39static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
40
41/**
42 * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
43 * KVM guest's APCB in the proper order.
44 *
45 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
46 *
47 * The proper locking order is:
48 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
49 * guest's APCB.
50 * 2. kvm->lock: required to update a guest's APCB
51 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
52 *
53 * Note: If @kvm is NULL, the KVM lock will not be taken.
54 */
55static inline void get_update_locks_for_kvm(struct kvm *kvm)
56{
57 mutex_lock(&matrix_dev->guests_lock);
58 if (kvm)
59 mutex_lock(&kvm->lock);
60 mutex_lock(&matrix_dev->mdevs_lock);
61}
62
63/**
64 * release_update_locks_for_kvm: Release the locks used to dynamically update a
65 * KVM guest's APCB in the proper order.
66 *
67 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
68 *
69 * The proper unlocking order is:
70 * 1. matrix_dev->mdevs_lock
71 * 2. kvm->lock
72 * 3. matrix_dev->guests_lock
73 *
74 * Note: If @kvm is NULL, the KVM lock will not be released.
75 */
76static inline void release_update_locks_for_kvm(struct kvm *kvm)
77{
78 mutex_unlock(&matrix_dev->mdevs_lock);
79 if (kvm)
80 mutex_unlock(&kvm->lock);
81 mutex_unlock(&matrix_dev->guests_lock);
82}
83
84/**
85 * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
86 * KVM guest's APCB in the proper order.
87 *
88 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
89 * configuration data to use to update a KVM guest's APCB.
90 *
91 * The proper locking order is:
92 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
93 * guest's APCB.
94 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
95 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
96 *
97 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
98 * lock will not be taken.
99 */
100static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
101{
102 mutex_lock(&matrix_dev->guests_lock);
103 if (matrix_mdev && matrix_mdev->kvm)
104 mutex_lock(&matrix_mdev->kvm->lock);
105 mutex_lock(&matrix_dev->mdevs_lock);
106}
107
108/**
109 * release_update_locks_for_mdev: Release the locks used to dynamically update a
110 * KVM guest's APCB in the proper order.
111 *
112 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
113 * configuration data to use to update a KVM guest's APCB.
114 *
115 * The proper unlocking order is:
116 * 1. matrix_dev->mdevs_lock
117 * 2. matrix_mdev->kvm->lock
118 * 3. matrix_dev->guests_lock
119 *
120 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
121 * lock will not be released.
122 */
123static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
124{
125 mutex_unlock(&matrix_dev->mdevs_lock);
126 if (matrix_mdev && matrix_mdev->kvm)
127 mutex_unlock(&matrix_mdev->kvm->lock);
128 mutex_unlock(&matrix_dev->guests_lock);
129}
130
131/**
132 * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
133 * acquire the locks required to update the APCB of
134 * the KVM guest to which the mdev is attached.
135 *
136 * @apqn: the APQN of a queue device.
137 *
138 * The proper locking order is:
139 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
140 * guest's APCB.
141 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
142 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
143 *
144 * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
145 * will not be taken.
146 *
147 * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
148 * is not assigned to an ap_matrix_mdev.
149 */
150static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
151{
152 struct ap_matrix_mdev *matrix_mdev;
153
154 mutex_lock(&matrix_dev->guests_lock);
155
156 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
157 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
158 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
159 if (matrix_mdev->kvm)
160 mutex_lock(&matrix_mdev->kvm->lock);
161
162 mutex_lock(&matrix_dev->mdevs_lock);
163
164 return matrix_mdev;
165 }
166 }
167
168 mutex_lock(&matrix_dev->mdevs_lock);
169
170 return NULL;
171}
172
173/**
174 * get_update_locks_for_queue: get the locks required to update the APCB of the
175 * KVM guest to which the matrix mdev linked to a
176 * vfio_ap_queue object is attached.
177 *
178 * @q: a pointer to a vfio_ap_queue object.
179 *
180 * The proper locking order is:
181 * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
182 * KVM guest's APCB.
183 * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
184 * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
185 *
186 * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
187 * will not be taken.
188 */
189static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
190{
191 mutex_lock(&matrix_dev->guests_lock);
192 if (q->matrix_mdev && q->matrix_mdev->kvm)
193 mutex_lock(&q->matrix_mdev->kvm->lock);
194 mutex_lock(&matrix_dev->mdevs_lock);
195}
196
197/**
198 * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
199 * hash table of queues assigned to a matrix mdev
200 * @matrix_mdev: the matrix mdev
201 * @apqn: The APQN of a queue device
202 *
203 * Return: the pointer to the vfio_ap_queue struct representing the queue or
204 * NULL if the queue is not assigned to @matrix_mdev
205 */
206static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
207 struct ap_matrix_mdev *matrix_mdev,
208 int apqn)
209{
210 struct vfio_ap_queue *q;
211
212 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
213 apqn) {
214 if (q && q->apqn == apqn)
215 return q;
216 }
217
218 return NULL;
219}
220
221/**
222 * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
223 * @apqn: The AP Queue number
224 *
225 * Checks the IRQ bit for the status of this APQN using ap_tapq.
226 * Returns if the ap_tapq function succeeded and the bit is clear.
227 * Returns if ap_tapq function failed with invalid, deconfigured or
228 * checkstopped AP.
229 * Otherwise retries up to 5 times after waiting 20ms.
230 */
231static void vfio_ap_wait_for_irqclear(int apqn)
232{
233 struct ap_queue_status status;
234 int retry = 5;
235
236 do {
237 status = ap_tapq(apqn, NULL);
238 switch (status.response_code) {
239 case AP_RESPONSE_NORMAL:
240 case AP_RESPONSE_RESET_IN_PROGRESS:
241 if (!status.irq_enabled)
242 return;
243 fallthrough;
244 case AP_RESPONSE_BUSY:
245 msleep(20);
246 break;
247 case AP_RESPONSE_Q_NOT_AVAIL:
248 case AP_RESPONSE_DECONFIGURED:
249 case AP_RESPONSE_CHECKSTOPPED:
250 default:
251 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
252 status.response_code, apqn);
253 return;
254 }
255 } while (--retry);
256
257 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
258 __func__, status.response_code, apqn);
259}
260
261/**
262 * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
263 * @q: The vfio_ap_queue
264 *
265 * Unregisters the ISC in the GIB when the saved ISC not invalid.
266 * Unpins the guest's page holding the NIB when it exists.
267 * Resets the saved_iova and saved_isc to invalid values.
268 */
269static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
270{
271 if (!q)
272 return;
273 if (q->saved_isc != VFIO_AP_ISC_INVALID &&
274 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
275 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
276 q->saved_isc = VFIO_AP_ISC_INVALID;
277 }
278 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
279 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
280 q->saved_iova = 0;
281 }
282}
283
284/**
285 * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
286 * @q: The vfio_ap_queue
287 *
288 * Uses ap_aqic to disable the interruption and in case of success, reset
289 * in progress or IRQ disable command already proceeded: calls
290 * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
291 * and calls vfio_ap_free_aqic_resources() to free the resources associated
292 * with the AP interrupt handling.
293 *
294 * In the case the AP is busy, or a reset is in progress,
295 * retries after 20ms, up to 5 times.
296 *
297 * Returns if ap_aqic function failed with invalid, deconfigured or
298 * checkstopped AP.
299 *
300 * Return: &struct ap_queue_status
301 */
302static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
303{
304 union ap_qirq_ctrl aqic_gisa = { .value = 0 };
305 struct ap_queue_status status;
306 int retries = 5;
307
308 do {
309 status = ap_aqic(q->apqn, aqic_gisa, 0);
310 switch (status.response_code) {
311 case AP_RESPONSE_OTHERWISE_CHANGED:
312 case AP_RESPONSE_NORMAL:
313 vfio_ap_wait_for_irqclear(q->apqn);
314 goto end_free;
315 case AP_RESPONSE_RESET_IN_PROGRESS:
316 case AP_RESPONSE_BUSY:
317 msleep(20);
318 break;
319 case AP_RESPONSE_Q_NOT_AVAIL:
320 case AP_RESPONSE_DECONFIGURED:
321 case AP_RESPONSE_CHECKSTOPPED:
322 case AP_RESPONSE_INVALID_ADDRESS:
323 default:
324 /* All cases in default means AP not operational */
325 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
326 status.response_code);
327 goto end_free;
328 }
329 } while (retries--);
330
331 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
332 status.response_code);
333end_free:
334 vfio_ap_free_aqic_resources(q);
335 return status;
336}
337
338/**
339 * vfio_ap_validate_nib - validate a notification indicator byte (nib) address.
340 *
341 * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
342 * @nib: the location for storing the nib address.
343 *
344 * When the PQAP(AQIC) instruction is executed, general register 2 contains the
345 * address of the notification indicator byte (nib) used for IRQ notification.
346 * This function parses and validates the nib from gr2.
347 *
348 * Return: returns zero if the nib address is a valid; otherwise, returns
349 * -EINVAL.
350 */
351static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
352{
353 *nib = vcpu->run->s.regs.gprs[2];
354
355 if (!*nib)
356 return -EINVAL;
357 if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
358 return -EINVAL;
359
360 return 0;
361}
362
363static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
364{
365 int ret;
366
367 /*
368 * The nib has to be located in shared storage since guest and
369 * host access it. vfio_pin_pages() will do a pin shared and
370 * if that fails (possibly because it's not a shared page) it
371 * calls export. We try to do a second pin shared here so that
372 * the UV gives us an error code if we try to pin a non-shared
373 * page.
374 *
375 * If the page is already pinned shared the UV will return a success.
376 */
377 ret = uv_pin_shared(addr);
378 if (ret) {
379 /* vfio_pin_pages() likely exported the page so let's re-import */
380 gmap_convert_to_secure(gmap, addr);
381 }
382 return ret;
383}
384
385/**
386 * vfio_ap_irq_enable - Enable Interruption for a APQN
387 *
388 * @q: the vfio_ap_queue holding AQIC parameters
389 * @isc: the guest ISC to register with the GIB interface
390 * @vcpu: the vcpu object containing the registers specifying the parameters
391 * passed to the PQAP(AQIC) instruction.
392 *
393 * Pin the NIB saved in *q
394 * Register the guest ISC to GIB interface and retrieve the
395 * host ISC to issue the host side PQAP/AQIC
396 *
397 * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the
398 * vfio_pin_pages or kvm_s390_gisc_register failed.
399 *
400 * Otherwise return the ap_queue_status returned by the ap_aqic(),
401 * all retry handling will be done by the guest.
402 *
403 * Return: &struct ap_queue_status
404 */
405static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
406 int isc,
407 struct kvm_vcpu *vcpu)
408{
409 union ap_qirq_ctrl aqic_gisa = { .value = 0 };
410 struct ap_queue_status status = {};
411 struct kvm_s390_gisa *gisa;
412 struct page *h_page;
413 int nisc;
414 struct kvm *kvm;
415 phys_addr_t h_nib;
416 dma_addr_t nib;
417 int ret;
418
419 /* Verify that the notification indicator byte address is valid */
420 if (vfio_ap_validate_nib(vcpu, &nib)) {
421 VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
422 __func__, &nib, q->apqn);
423
424 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
425 return status;
426 }
427
428 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
429 IOMMU_READ | IOMMU_WRITE, &h_page);
430 switch (ret) {
431 case 1:
432 break;
433 default:
434 VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
435 "nib=%pad, apqn=%#04x\n",
436 __func__, ret, &nib, q->apqn);
437
438 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
439 return status;
440 }
441
442 kvm = q->matrix_mdev->kvm;
443 gisa = kvm->arch.gisa_int.origin;
444
445 h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
446 aqic_gisa.gisc = isc;
447
448 /* NIB in non-shared storage is a rc 6 for PV guests */
449 if (kvm_s390_pv_cpu_is_protected(vcpu) &&
450 ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) {
451 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
452 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
453 return status;
454 }
455
456 nisc = kvm_s390_gisc_register(kvm, isc);
457 if (nisc < 0) {
458 VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
459 __func__, nisc, isc, q->apqn);
460
461 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
462 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
463 return status;
464 }
465
466 aqic_gisa.isc = nisc;
467 aqic_gisa.ir = 1;
468 aqic_gisa.gisa = virt_to_phys(gisa) >> 4;
469
470 status = ap_aqic(q->apqn, aqic_gisa, h_nib);
471 switch (status.response_code) {
472 case AP_RESPONSE_NORMAL:
473 /* See if we did clear older IRQ configuration */
474 vfio_ap_free_aqic_resources(q);
475 q->saved_iova = nib;
476 q->saved_isc = isc;
477 break;
478 case AP_RESPONSE_OTHERWISE_CHANGED:
479 /* We could not modify IRQ settings: clear new configuration */
480 ret = kvm_s390_gisc_unregister(kvm, isc);
481 if (ret)
482 VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n",
483 __func__, ret, isc, q->apqn);
484 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
485 break;
486 default:
487 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
488 status.response_code);
489 vfio_ap_irq_disable(q);
490 break;
491 }
492
493 if (status.response_code != AP_RESPONSE_NORMAL) {
494 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: "
495 "zone=%#x, ir=%#x, gisc=%#x, f=%#x,"
496 "gisa=%#x, isc=%#x, apqn=%#04x\n",
497 __func__, status.response_code,
498 aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc,
499 aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc,
500 q->apqn);
501 }
502
503 return status;
504}
505
506/**
507 * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array
508 * of big endian elements that can be passed by
509 * value to an s390dbf sprintf event function to
510 * format a UUID string.
511 *
512 * @guid: the object containing the little endian guid
513 * @uuid: a six-element array of long values that can be passed by value as
514 * arguments for a formatting string specifying a UUID.
515 *
516 * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf
517 * event functions if the memory for the passed string is available as long as
518 * the debug feature exists. Since a mediated device can be removed at any
519 * time, it's name can not be used because %s passes the reference to the string
520 * in memory and the reference will go stale once the device is removed .
521 *
522 * The s390dbf string formatting function allows a maximum of 9 arguments for a
523 * message to be displayed in the 'sprintf' view. In order to use the bytes
524 * comprising the mediated device's UUID to display the mediated device name,
525 * they will have to be converted into an array whose elements can be passed by
526 * value to sprintf. For example:
527 *
528 * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 }
529 * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804
530 * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 }
531 * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx"
532 */
533static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
534{
535 /*
536 * The input guid is ordered in little endian, so it needs to be
537 * reordered for displaying a UUID as a string. This specifies the
538 * guid indices in proper order.
539 */
540 uuid[0] = le32_to_cpup((__le32 *)guid);
541 uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]);
542 uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]);
543 uuid[3] = *((__u16 *)&guid->b[8]);
544 uuid[4] = *((__u16 *)&guid->b[10]);
545 uuid[5] = *((__u32 *)&guid->b[12]);
546}
547
548/**
549 * handle_pqap - PQAP instruction callback
550 *
551 * @vcpu: The vcpu on which we received the PQAP instruction
552 *
553 * Get the general register contents to initialize internal variables.
554 * REG[0]: APQN
555 * REG[1]: IR and ISC
556 * REG[2]: NIB
557 *
558 * Response.status may be set to following Response Code:
559 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
560 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
561 * - AP_RESPONSE_NORMAL (0) : in case of success
562 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
563 * We take the matrix_dev lock to ensure serialization on queues and
564 * mediated device access.
565 *
566 * Return: 0 if we could handle the request inside KVM.
567 * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
568 */
569static int handle_pqap(struct kvm_vcpu *vcpu)
570{
571 uint64_t status;
572 uint16_t apqn;
573 unsigned long uuid[6];
574 struct vfio_ap_queue *q;
575 struct ap_queue_status qstatus = {
576 .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
577 struct ap_matrix_mdev *matrix_mdev;
578
579 apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
580
581 /* If we do not use the AIV facility just go to userland */
582 if (!(vcpu->arch.sie_block->eca & ECA_AIV)) {
583 VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n",
584 __func__, apqn, vcpu->arch.sie_block->eca);
585
586 return -EOPNOTSUPP;
587 }
588
589 mutex_lock(&matrix_dev->mdevs_lock);
590
591 if (!vcpu->kvm->arch.crypto.pqap_hook) {
592 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
593 __func__, apqn);
594
595 goto out_unlock;
596 }
597
598 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
599 struct ap_matrix_mdev, pqap_hook);
600
601 /* If the there is no guest using the mdev, there is nothing to do */
602 if (!matrix_mdev->kvm) {
603 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid);
604 VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n",
605 __func__, uuid[0], uuid[1], uuid[2],
606 uuid[3], uuid[4], uuid[5], apqn);
607 goto out_unlock;
608 }
609
610 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
611 if (!q) {
612 VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
613 __func__, AP_QID_CARD(apqn),
614 AP_QID_QUEUE(apqn));
615 goto out_unlock;
616 }
617
618 status = vcpu->run->s.regs.gprs[1];
619
620 /* If IR bit(16) is set we enable the interrupt */
621 if ((status >> (63 - 16)) & 0x01)
622 qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu);
623 else
624 qstatus = vfio_ap_irq_disable(q);
625
626out_unlock:
627 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
628 vcpu->run->s.regs.gprs[1] >>= 32;
629 mutex_unlock(&matrix_dev->mdevs_lock);
630 return 0;
631}
632
633static void vfio_ap_matrix_init(struct ap_config_info *info,
634 struct ap_matrix *matrix)
635{
636 matrix->apm_max = info->apxa ? info->na : 63;
637 matrix->aqm_max = info->apxa ? info->nd : 15;
638 matrix->adm_max = info->apxa ? info->nd : 15;
639}
640
641static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
642{
643 if (matrix_mdev->kvm)
644 kvm_arch_crypto_set_masks(matrix_mdev->kvm,
645 matrix_mdev->shadow_apcb.apm,
646 matrix_mdev->shadow_apcb.aqm,
647 matrix_mdev->shadow_apcb.adm);
648}
649
650static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
651{
652 DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
653
654 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
655 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
656 (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
657
658 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
659 AP_DOMAINS);
660}
661
662/*
663 * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
664 * to ensure no queue devices are passed through to
665 * the guest that are not bound to the vfio_ap
666 * device driver.
667 *
668 * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
669 * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the
670 * guest's AP configuration that are still in the host's AP
671 * configuration.
672 *
673 * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
674 * driver, its APID will be filtered from the guest's APCB. The matrix
675 * structure precludes filtering an individual APQN, so its APID will be
676 * filtered. Consequently, all queues associated with the adapter that
677 * are in the host's AP configuration must be reset. If queues are
678 * subsequently made available again to the guest, they should re-appear
679 * in a reset state
680 *
681 * Return: a boolean value indicating whether the KVM guest's APCB was changed
682 * by the filtering or not.
683 */
684static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
685 unsigned long *apm_filtered)
686{
687 unsigned long apid, apqi, apqn;
688 DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
689 DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
690 struct vfio_ap_queue *q;
691
692 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
693 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
694 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
695 bitmap_clear(apm_filtered, 0, AP_DEVICES);
696
697 /*
698 * Copy the adapters, domains and control domains to the shadow_apcb
699 * from the matrix mdev, but only those that are assigned to the host's
700 * AP configuration.
701 */
702 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
703 (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
704 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
705 (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
706
707 for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
708 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
709 AP_DOMAINS) {
710 /*
711 * If the APQN is not bound to the vfio_ap device
712 * driver, then we can't assign it to the guest's
713 * AP configuration. The AP architecture won't
714 * allow filtering of a single APQN, so let's filter
715 * the APID since an adapter represents a physical
716 * hardware device.
717 */
718 apqn = AP_MKQID(apid, apqi);
719 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
720 if (!q || q->reset_status.response_code) {
721 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
722
723 /*
724 * If the adapter was previously plugged into
725 * the guest, let's let the caller know that
726 * the APID was filtered.
727 */
728 if (test_bit_inv(apid, prev_shadow_apm))
729 set_bit_inv(apid, apm_filtered);
730
731 break;
732 }
733 }
734 }
735
736 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
737 AP_DEVICES) ||
738 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
739 AP_DOMAINS);
740}
741
742static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
743{
744 struct ap_matrix_mdev *matrix_mdev =
745 container_of(vdev, struct ap_matrix_mdev, vdev);
746
747 matrix_mdev->mdev = to_mdev_device(vdev->dev);
748 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
749 matrix_mdev->pqap_hook = handle_pqap;
750 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
751 hash_init(matrix_mdev->qtable.queues);
752
753 return 0;
754}
755
756static int vfio_ap_mdev_probe(struct mdev_device *mdev)
757{
758 struct ap_matrix_mdev *matrix_mdev;
759 int ret;
760
761 matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
762 &vfio_ap_matrix_dev_ops);
763 if (IS_ERR(matrix_mdev))
764 return PTR_ERR(matrix_mdev);
765
766 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
767 if (ret)
768 goto err_put_vdev;
769 matrix_mdev->req_trigger = NULL;
770 dev_set_drvdata(&mdev->dev, matrix_mdev);
771 mutex_lock(&matrix_dev->mdevs_lock);
772 list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
773 mutex_unlock(&matrix_dev->mdevs_lock);
774 return 0;
775
776err_put_vdev:
777 vfio_put_device(&matrix_mdev->vdev);
778 return ret;
779}
780
781static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
782 struct vfio_ap_queue *q)
783{
784 if (q) {
785 q->matrix_mdev = matrix_mdev;
786 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
787 }
788}
789
790static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
791{
792 struct vfio_ap_queue *q;
793
794 q = vfio_ap_find_queue(apqn);
795 vfio_ap_mdev_link_queue(matrix_mdev, q);
796}
797
798static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
799{
800 hash_del(&q->mdev_qnode);
801}
802
803static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
804{
805 q->matrix_mdev = NULL;
806}
807
808static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
809{
810 struct vfio_ap_queue *q;
811 unsigned long apid, apqi;
812
813 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
814 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
815 AP_DOMAINS) {
816 q = vfio_ap_mdev_get_queue(matrix_mdev,
817 AP_MKQID(apid, apqi));
818 if (q)
819 q->matrix_mdev = NULL;
820 }
821 }
822}
823
824static void vfio_ap_mdev_remove(struct mdev_device *mdev)
825{
826 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
827
828 vfio_unregister_group_dev(&matrix_mdev->vdev);
829
830 mutex_lock(&matrix_dev->guests_lock);
831 mutex_lock(&matrix_dev->mdevs_lock);
832 vfio_ap_mdev_reset_queues(matrix_mdev);
833 vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
834 list_del(&matrix_mdev->node);
835 mutex_unlock(&matrix_dev->mdevs_lock);
836 mutex_unlock(&matrix_dev->guests_lock);
837 vfio_put_device(&matrix_mdev->vdev);
838}
839
840#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
841 "already assigned to %s"
842
843static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
844 unsigned long *apm,
845 unsigned long *aqm)
846{
847 unsigned long apid, apqi;
848 const struct device *dev = mdev_dev(matrix_mdev->mdev);
849 const char *mdev_name = dev_name(dev);
850
851 for_each_set_bit_inv(apid, apm, AP_DEVICES)
852 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
853 dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
854}
855
856/**
857 * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
858 *
859 * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
860 * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
861 *
862 * Verifies that each APQN derived from the Cartesian product of a bitmap of
863 * AP adapter IDs and AP queue indexes is not configured for any matrix
864 * mediated device. AP queue sharing is not allowed.
865 *
866 * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
867 */
868static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
869 unsigned long *mdev_aqm)
870{
871 struct ap_matrix_mdev *matrix_mdev;
872 DECLARE_BITMAP(apm, AP_DEVICES);
873 DECLARE_BITMAP(aqm, AP_DOMAINS);
874
875 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
876 /*
877 * If the input apm and aqm are fields of the matrix_mdev
878 * object, then move on to the next matrix_mdev.
879 */
880 if (mdev_apm == matrix_mdev->matrix.apm &&
881 mdev_aqm == matrix_mdev->matrix.aqm)
882 continue;
883
884 memset(apm, 0, sizeof(apm));
885 memset(aqm, 0, sizeof(aqm));
886
887 /*
888 * We work on full longs, as we can only exclude the leftover
889 * bits in non-inverse order. The leftover is all zeros.
890 */
891 if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
892 AP_DEVICES))
893 continue;
894
895 if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
896 AP_DOMAINS))
897 continue;
898
899 vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
900
901 return -EADDRINUSE;
902 }
903
904 return 0;
905}
906
907/**
908 * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
909 * not reserved for the default zcrypt driver and
910 * are not assigned to another mdev.
911 *
912 * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
913 *
914 * Return: One of the following values:
915 * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
916 * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
917 * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
918 * zcrypt default driver.
919 * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
920 * o A zero indicating validation succeeded.
921 */
922static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
923{
924 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
925 matrix_mdev->matrix.aqm))
926 return -EADDRNOTAVAIL;
927
928 return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
929 matrix_mdev->matrix.aqm);
930}
931
932static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
933 unsigned long apid)
934{
935 unsigned long apqi;
936
937 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
938 vfio_ap_mdev_link_apqn(matrix_mdev,
939 AP_MKQID(apid, apqi));
940}
941
942static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev,
943 unsigned long apid,
944 struct list_head *qlist)
945{
946 struct vfio_ap_queue *q;
947 unsigned long apqi;
948
949 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) {
950 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
951 if (q)
952 list_add_tail(&q->reset_qnode, qlist);
953 }
954}
955
956static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev,
957 unsigned long apid)
958{
959 struct list_head qlist;
960
961 INIT_LIST_HEAD(&qlist);
962 collect_queues_to_reset(matrix_mdev, apid, &qlist);
963 vfio_ap_mdev_reset_qlist(&qlist);
964}
965
966static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
967 unsigned long *apm_reset)
968{
969 struct list_head qlist;
970 unsigned long apid;
971
972 if (bitmap_empty(apm_reset, AP_DEVICES))
973 return 0;
974
975 INIT_LIST_HEAD(&qlist);
976
977 for_each_set_bit_inv(apid, apm_reset, AP_DEVICES)
978 collect_queues_to_reset(matrix_mdev, apid, &qlist);
979
980 return vfio_ap_mdev_reset_qlist(&qlist);
981}
982
983/**
984 * assign_adapter_store - parses the APID from @buf and sets the
985 * corresponding bit in the mediated matrix device's APM
986 *
987 * @dev: the matrix device
988 * @attr: the mediated matrix device's assign_adapter attribute
989 * @buf: a buffer containing the AP adapter number (APID) to
990 * be assigned
991 * @count: the number of bytes in @buf
992 *
993 * Return: the number of bytes processed if the APID is valid; otherwise,
994 * returns one of the following errors:
995 *
996 * 1. -EINVAL
997 * The APID is not a valid number
998 *
999 * 2. -ENODEV
1000 * The APID exceeds the maximum value configured for the system
1001 *
1002 * 3. -EADDRNOTAVAIL
1003 * An APQN derived from the cross product of the APID being assigned
1004 * and the APQIs previously assigned is not bound to the vfio_ap device
1005 * driver; or, if no APQIs have yet been assigned, the APID is not
1006 * contained in an APQN bound to the vfio_ap device driver.
1007 *
1008 * 4. -EADDRINUSE
1009 * An APQN derived from the cross product of the APID being assigned
1010 * and the APQIs previously assigned is being used by another mediated
1011 * matrix device
1012 *
1013 * 5. -EAGAIN
1014 * A lock required to validate the mdev's AP configuration could not
1015 * be obtained.
1016 */
1017static ssize_t assign_adapter_store(struct device *dev,
1018 struct device_attribute *attr,
1019 const char *buf, size_t count)
1020{
1021 int ret;
1022 unsigned long apid;
1023 DECLARE_BITMAP(apm_filtered, AP_DEVICES);
1024 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1025
1026 mutex_lock(&ap_perms_mutex);
1027 get_update_locks_for_mdev(matrix_mdev);
1028
1029 ret = kstrtoul(buf, 0, &apid);
1030 if (ret)
1031 goto done;
1032
1033 if (apid > matrix_mdev->matrix.apm_max) {
1034 ret = -ENODEV;
1035 goto done;
1036 }
1037
1038 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
1039 ret = count;
1040 goto done;
1041 }
1042
1043 set_bit_inv(apid, matrix_mdev->matrix.apm);
1044
1045 ret = vfio_ap_mdev_validate_masks(matrix_mdev);
1046 if (ret) {
1047 clear_bit_inv(apid, matrix_mdev->matrix.apm);
1048 goto done;
1049 }
1050
1051 vfio_ap_mdev_link_adapter(matrix_mdev, apid);
1052
1053 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
1054 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1055 reset_queues_for_apids(matrix_mdev, apm_filtered);
1056 }
1057
1058 ret = count;
1059done:
1060 release_update_locks_for_mdev(matrix_mdev);
1061 mutex_unlock(&ap_perms_mutex);
1062
1063 return ret;
1064}
1065static DEVICE_ATTR_WO(assign_adapter);
1066
1067static struct vfio_ap_queue
1068*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
1069 unsigned long apid, unsigned long apqi)
1070{
1071 struct vfio_ap_queue *q = NULL;
1072
1073 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
1074 /* If the queue is assigned to the matrix mdev, unlink it. */
1075 if (q)
1076 vfio_ap_unlink_queue_fr_mdev(q);
1077
1078 return q;
1079}
1080
1081/**
1082 * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
1083 * adapter from the matrix mdev to which the
1084 * adapter was assigned.
1085 * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
1086 * @apid: the APID of the unassigned adapter.
1087 * @qlist: list for storing queues associated with unassigned adapter that
1088 * need to be reset.
1089 */
1090static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
1091 unsigned long apid,
1092 struct list_head *qlist)
1093{
1094 unsigned long apqi;
1095 struct vfio_ap_queue *q;
1096
1097 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
1098 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
1099
1100 if (q && qlist) {
1101 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
1102 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
1103 list_add_tail(&q->reset_qnode, qlist);
1104 }
1105 }
1106}
1107
1108static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
1109 unsigned long apid)
1110{
1111 struct vfio_ap_queue *q, *tmpq;
1112 struct list_head qlist;
1113
1114 INIT_LIST_HEAD(&qlist);
1115 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
1116
1117 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
1118 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
1119 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1120 }
1121
1122 vfio_ap_mdev_reset_qlist(&qlist);
1123
1124 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
1125 vfio_ap_unlink_mdev_fr_queue(q);
1126 list_del(&q->reset_qnode);
1127 }
1128}
1129
1130/**
1131 * unassign_adapter_store - parses the APID from @buf and clears the
1132 * corresponding bit in the mediated matrix device's APM
1133 *
1134 * @dev: the matrix device
1135 * @attr: the mediated matrix device's unassign_adapter attribute
1136 * @buf: a buffer containing the adapter number (APID) to be unassigned
1137 * @count: the number of bytes in @buf
1138 *
1139 * Return: the number of bytes processed if the APID is valid; otherwise,
1140 * returns one of the following errors:
1141 * -EINVAL if the APID is not a number
1142 * -ENODEV if the APID it exceeds the maximum value configured for the
1143 * system
1144 */
1145static ssize_t unassign_adapter_store(struct device *dev,
1146 struct device_attribute *attr,
1147 const char *buf, size_t count)
1148{
1149 int ret;
1150 unsigned long apid;
1151 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1152
1153 get_update_locks_for_mdev(matrix_mdev);
1154
1155 ret = kstrtoul(buf, 0, &apid);
1156 if (ret)
1157 goto done;
1158
1159 if (apid > matrix_mdev->matrix.apm_max) {
1160 ret = -ENODEV;
1161 goto done;
1162 }
1163
1164 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
1165 ret = count;
1166 goto done;
1167 }
1168
1169 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
1170 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
1171 ret = count;
1172done:
1173 release_update_locks_for_mdev(matrix_mdev);
1174 return ret;
1175}
1176static DEVICE_ATTR_WO(unassign_adapter);
1177
1178static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
1179 unsigned long apqi)
1180{
1181 unsigned long apid;
1182
1183 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
1184 vfio_ap_mdev_link_apqn(matrix_mdev,
1185 AP_MKQID(apid, apqi));
1186}
1187
1188/**
1189 * assign_domain_store - parses the APQI from @buf and sets the
1190 * corresponding bit in the mediated matrix device's AQM
1191 *
1192 * @dev: the matrix device
1193 * @attr: the mediated matrix device's assign_domain attribute
1194 * @buf: a buffer containing the AP queue index (APQI) of the domain to
1195 * be assigned
1196 * @count: the number of bytes in @buf
1197 *
1198 * Return: the number of bytes processed if the APQI is valid; otherwise returns
1199 * one of the following errors:
1200 *
1201 * 1. -EINVAL
1202 * The APQI is not a valid number
1203 *
1204 * 2. -ENODEV
1205 * The APQI exceeds the maximum value configured for the system
1206 *
1207 * 3. -EADDRNOTAVAIL
1208 * An APQN derived from the cross product of the APQI being assigned
1209 * and the APIDs previously assigned is not bound to the vfio_ap device
1210 * driver; or, if no APIDs have yet been assigned, the APQI is not
1211 * contained in an APQN bound to the vfio_ap device driver.
1212 *
1213 * 4. -EADDRINUSE
1214 * An APQN derived from the cross product of the APQI being assigned
1215 * and the APIDs previously assigned is being used by another mediated
1216 * matrix device
1217 *
1218 * 5. -EAGAIN
1219 * The lock required to validate the mdev's AP configuration could not
1220 * be obtained.
1221 */
1222static ssize_t assign_domain_store(struct device *dev,
1223 struct device_attribute *attr,
1224 const char *buf, size_t count)
1225{
1226 int ret;
1227 unsigned long apqi;
1228 DECLARE_BITMAP(apm_filtered, AP_DEVICES);
1229 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1230
1231 mutex_lock(&ap_perms_mutex);
1232 get_update_locks_for_mdev(matrix_mdev);
1233
1234 ret = kstrtoul(buf, 0, &apqi);
1235 if (ret)
1236 goto done;
1237
1238 if (apqi > matrix_mdev->matrix.aqm_max) {
1239 ret = -ENODEV;
1240 goto done;
1241 }
1242
1243 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
1244 ret = count;
1245 goto done;
1246 }
1247
1248 set_bit_inv(apqi, matrix_mdev->matrix.aqm);
1249
1250 ret = vfio_ap_mdev_validate_masks(matrix_mdev);
1251 if (ret) {
1252 clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
1253 goto done;
1254 }
1255
1256 vfio_ap_mdev_link_domain(matrix_mdev, apqi);
1257
1258 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
1259 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1260 reset_queues_for_apids(matrix_mdev, apm_filtered);
1261 }
1262
1263 ret = count;
1264done:
1265 release_update_locks_for_mdev(matrix_mdev);
1266 mutex_unlock(&ap_perms_mutex);
1267
1268 return ret;
1269}
1270static DEVICE_ATTR_WO(assign_domain);
1271
1272static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
1273 unsigned long apqi,
1274 struct list_head *qlist)
1275{
1276 unsigned long apid;
1277 struct vfio_ap_queue *q;
1278
1279 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
1280 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
1281
1282 if (q && qlist) {
1283 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
1284 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
1285 list_add_tail(&q->reset_qnode, qlist);
1286 }
1287 }
1288}
1289
1290static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
1291 unsigned long apqi)
1292{
1293 struct vfio_ap_queue *q, *tmpq;
1294 struct list_head qlist;
1295
1296 INIT_LIST_HEAD(&qlist);
1297 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
1298
1299 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
1300 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
1301 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1302 }
1303
1304 vfio_ap_mdev_reset_qlist(&qlist);
1305
1306 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
1307 vfio_ap_unlink_mdev_fr_queue(q);
1308 list_del(&q->reset_qnode);
1309 }
1310}
1311
1312/**
1313 * unassign_domain_store - parses the APQI from @buf and clears the
1314 * corresponding bit in the mediated matrix device's AQM
1315 *
1316 * @dev: the matrix device
1317 * @attr: the mediated matrix device's unassign_domain attribute
1318 * @buf: a buffer containing the AP queue index (APQI) of the domain to
1319 * be unassigned
1320 * @count: the number of bytes in @buf
1321 *
1322 * Return: the number of bytes processed if the APQI is valid; otherwise,
1323 * returns one of the following errors:
1324 * -EINVAL if the APQI is not a number
1325 * -ENODEV if the APQI exceeds the maximum value configured for the system
1326 */
1327static ssize_t unassign_domain_store(struct device *dev,
1328 struct device_attribute *attr,
1329 const char *buf, size_t count)
1330{
1331 int ret;
1332 unsigned long apqi;
1333 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1334
1335 get_update_locks_for_mdev(matrix_mdev);
1336
1337 ret = kstrtoul(buf, 0, &apqi);
1338 if (ret)
1339 goto done;
1340
1341 if (apqi > matrix_mdev->matrix.aqm_max) {
1342 ret = -ENODEV;
1343 goto done;
1344 }
1345
1346 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
1347 ret = count;
1348 goto done;
1349 }
1350
1351 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
1352 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
1353 ret = count;
1354
1355done:
1356 release_update_locks_for_mdev(matrix_mdev);
1357 return ret;
1358}
1359static DEVICE_ATTR_WO(unassign_domain);
1360
1361/**
1362 * assign_control_domain_store - parses the domain ID from @buf and sets
1363 * the corresponding bit in the mediated matrix device's ADM
1364 *
1365 * @dev: the matrix device
1366 * @attr: the mediated matrix device's assign_control_domain attribute
1367 * @buf: a buffer containing the domain ID to be assigned
1368 * @count: the number of bytes in @buf
1369 *
1370 * Return: the number of bytes processed if the domain ID is valid; otherwise,
1371 * returns one of the following errors:
1372 * -EINVAL if the ID is not a number
1373 * -ENODEV if the ID exceeds the maximum value configured for the system
1374 */
1375static ssize_t assign_control_domain_store(struct device *dev,
1376 struct device_attribute *attr,
1377 const char *buf, size_t count)
1378{
1379 int ret;
1380 unsigned long id;
1381 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1382
1383 get_update_locks_for_mdev(matrix_mdev);
1384
1385 ret = kstrtoul(buf, 0, &id);
1386 if (ret)
1387 goto done;
1388
1389 if (id > matrix_mdev->matrix.adm_max) {
1390 ret = -ENODEV;
1391 goto done;
1392 }
1393
1394 if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
1395 ret = count;
1396 goto done;
1397 }
1398
1399 /* Set the bit in the ADM (bitmask) corresponding to the AP control
1400 * domain number (id). The bits in the mask, from most significant to
1401 * least significant, correspond to IDs 0 up to the one less than the
1402 * number of control domains that can be assigned.
1403 */
1404 set_bit_inv(id, matrix_mdev->matrix.adm);
1405 if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
1406 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1407
1408 ret = count;
1409done:
1410 release_update_locks_for_mdev(matrix_mdev);
1411 return ret;
1412}
1413static DEVICE_ATTR_WO(assign_control_domain);
1414
1415/**
1416 * unassign_control_domain_store - parses the domain ID from @buf and
1417 * clears the corresponding bit in the mediated matrix device's ADM
1418 *
1419 * @dev: the matrix device
1420 * @attr: the mediated matrix device's unassign_control_domain attribute
1421 * @buf: a buffer containing the domain ID to be unassigned
1422 * @count: the number of bytes in @buf
1423 *
1424 * Return: the number of bytes processed if the domain ID is valid; otherwise,
1425 * returns one of the following errors:
1426 * -EINVAL if the ID is not a number
1427 * -ENODEV if the ID exceeds the maximum value configured for the system
1428 */
1429static ssize_t unassign_control_domain_store(struct device *dev,
1430 struct device_attribute *attr,
1431 const char *buf, size_t count)
1432{
1433 int ret;
1434 unsigned long domid;
1435 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1436
1437 get_update_locks_for_mdev(matrix_mdev);
1438
1439 ret = kstrtoul(buf, 0, &domid);
1440 if (ret)
1441 goto done;
1442
1443 if (domid > matrix_mdev->matrix.adm_max) {
1444 ret = -ENODEV;
1445 goto done;
1446 }
1447
1448 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
1449 ret = count;
1450 goto done;
1451 }
1452
1453 clear_bit_inv(domid, matrix_mdev->matrix.adm);
1454
1455 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
1456 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
1457 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1458 }
1459
1460 ret = count;
1461done:
1462 release_update_locks_for_mdev(matrix_mdev);
1463 return ret;
1464}
1465static DEVICE_ATTR_WO(unassign_control_domain);
1466
1467static ssize_t control_domains_show(struct device *dev,
1468 struct device_attribute *dev_attr,
1469 char *buf)
1470{
1471 unsigned long id;
1472 int nchars = 0;
1473 int n;
1474 char *bufpos = buf;
1475 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1476 unsigned long max_domid = matrix_mdev->matrix.adm_max;
1477
1478 mutex_lock(&matrix_dev->mdevs_lock);
1479 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
1480 n = sprintf(bufpos, "%04lx\n", id);
1481 bufpos += n;
1482 nchars += n;
1483 }
1484 mutex_unlock(&matrix_dev->mdevs_lock);
1485
1486 return nchars;
1487}
1488static DEVICE_ATTR_RO(control_domains);
1489
1490static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
1491{
1492 char *bufpos = buf;
1493 unsigned long apid;
1494 unsigned long apqi;
1495 unsigned long apid1;
1496 unsigned long apqi1;
1497 unsigned long napm_bits = matrix->apm_max + 1;
1498 unsigned long naqm_bits = matrix->aqm_max + 1;
1499 int nchars = 0;
1500 int n;
1501
1502 apid1 = find_first_bit_inv(matrix->apm, napm_bits);
1503 apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
1504
1505 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
1506 for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
1507 for_each_set_bit_inv(apqi, matrix->aqm,
1508 naqm_bits) {
1509 n = sprintf(bufpos, "%02lx.%04lx\n", apid,
1510 apqi);
1511 bufpos += n;
1512 nchars += n;
1513 }
1514 }
1515 } else if (apid1 < napm_bits) {
1516 for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
1517 n = sprintf(bufpos, "%02lx.\n", apid);
1518 bufpos += n;
1519 nchars += n;
1520 }
1521 } else if (apqi1 < naqm_bits) {
1522 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
1523 n = sprintf(bufpos, ".%04lx\n", apqi);
1524 bufpos += n;
1525 nchars += n;
1526 }
1527 }
1528
1529 return nchars;
1530}
1531
1532static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
1533 char *buf)
1534{
1535 ssize_t nchars;
1536 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1537
1538 mutex_lock(&matrix_dev->mdevs_lock);
1539 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
1540 mutex_unlock(&matrix_dev->mdevs_lock);
1541
1542 return nchars;
1543}
1544static DEVICE_ATTR_RO(matrix);
1545
1546static ssize_t guest_matrix_show(struct device *dev,
1547 struct device_attribute *attr, char *buf)
1548{
1549 ssize_t nchars;
1550 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1551
1552 mutex_lock(&matrix_dev->mdevs_lock);
1553 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
1554 mutex_unlock(&matrix_dev->mdevs_lock);
1555
1556 return nchars;
1557}
1558static DEVICE_ATTR_RO(guest_matrix);
1559
1560static struct attribute *vfio_ap_mdev_attrs[] = {
1561 &dev_attr_assign_adapter.attr,
1562 &dev_attr_unassign_adapter.attr,
1563 &dev_attr_assign_domain.attr,
1564 &dev_attr_unassign_domain.attr,
1565 &dev_attr_assign_control_domain.attr,
1566 &dev_attr_unassign_control_domain.attr,
1567 &dev_attr_control_domains.attr,
1568 &dev_attr_matrix.attr,
1569 &dev_attr_guest_matrix.attr,
1570 NULL,
1571};
1572
1573static struct attribute_group vfio_ap_mdev_attr_group = {
1574 .attrs = vfio_ap_mdev_attrs
1575};
1576
1577static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
1578 &vfio_ap_mdev_attr_group,
1579 NULL
1580};
1581
1582/**
1583 * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
1584 * to manage AP resources for the guest whose state is represented by @kvm
1585 *
1586 * @matrix_mdev: a mediated matrix device
1587 * @kvm: reference to KVM instance
1588 *
1589 * Return: 0 if no other mediated matrix device has a reference to @kvm;
1590 * otherwise, returns an -EPERM.
1591 */
1592static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
1593 struct kvm *kvm)
1594{
1595 struct ap_matrix_mdev *m;
1596
1597 if (kvm->arch.crypto.crycbd) {
1598 down_write(&kvm->arch.crypto.pqap_hook_rwsem);
1599 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
1600 up_write(&kvm->arch.crypto.pqap_hook_rwsem);
1601
1602 get_update_locks_for_kvm(kvm);
1603
1604 list_for_each_entry(m, &matrix_dev->mdev_list, node) {
1605 if (m != matrix_mdev && m->kvm == kvm) {
1606 release_update_locks_for_kvm(kvm);
1607 return -EPERM;
1608 }
1609 }
1610
1611 kvm_get_kvm(kvm);
1612 matrix_mdev->kvm = kvm;
1613 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1614
1615 release_update_locks_for_kvm(kvm);
1616 }
1617
1618 return 0;
1619}
1620
1621static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length)
1622{
1623 struct ap_queue_table *qtable = &matrix_mdev->qtable;
1624 struct vfio_ap_queue *q;
1625 int loop_cursor;
1626
1627 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
1628 if (q->saved_iova >= iova && q->saved_iova < iova + length)
1629 vfio_ap_irq_disable(q);
1630 }
1631}
1632
1633static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
1634 u64 length)
1635{
1636 struct ap_matrix_mdev *matrix_mdev =
1637 container_of(vdev, struct ap_matrix_mdev, vdev);
1638
1639 mutex_lock(&matrix_dev->mdevs_lock);
1640
1641 unmap_iova(matrix_mdev, iova, length);
1642
1643 mutex_unlock(&matrix_dev->mdevs_lock);
1644}
1645
1646/**
1647 * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
1648 * by @matrix_mdev.
1649 *
1650 * @matrix_mdev: a matrix mediated device
1651 */
1652static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
1653{
1654 struct kvm *kvm = matrix_mdev->kvm;
1655
1656 if (kvm && kvm->arch.crypto.crycbd) {
1657 down_write(&kvm->arch.crypto.pqap_hook_rwsem);
1658 kvm->arch.crypto.pqap_hook = NULL;
1659 up_write(&kvm->arch.crypto.pqap_hook_rwsem);
1660
1661 get_update_locks_for_kvm(kvm);
1662
1663 kvm_arch_crypto_clear_masks(kvm);
1664 vfio_ap_mdev_reset_queues(matrix_mdev);
1665 kvm_put_kvm(kvm);
1666 matrix_mdev->kvm = NULL;
1667
1668 release_update_locks_for_kvm(kvm);
1669 }
1670}
1671
1672static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
1673{
1674 struct ap_queue *queue;
1675 struct vfio_ap_queue *q = NULL;
1676
1677 queue = ap_get_qdev(apqn);
1678 if (!queue)
1679 return NULL;
1680
1681 if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
1682 q = dev_get_drvdata(&queue->ap_dev.device);
1683
1684 put_device(&queue->ap_dev.device);
1685
1686 return q;
1687}
1688
1689static int apq_status_check(int apqn, struct ap_queue_status *status)
1690{
1691 switch (status->response_code) {
1692 case AP_RESPONSE_NORMAL:
1693 case AP_RESPONSE_DECONFIGURED:
1694 return 0;
1695 case AP_RESPONSE_RESET_IN_PROGRESS:
1696 case AP_RESPONSE_BUSY:
1697 return -EBUSY;
1698 case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE:
1699 case AP_RESPONSE_ASSOC_FAILED:
1700 /*
1701 * These asynchronous response codes indicate a PQAP(AAPQ)
1702 * instruction to associate a secret with the guest failed. All
1703 * subsequent AP instructions will end with the asynchronous
1704 * response code until the AP queue is reset; so, let's return
1705 * a value indicating a reset needs to be performed again.
1706 */
1707 return -EAGAIN;
1708 default:
1709 WARN(true,
1710 "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n",
1711 AP_QID_CARD(apqn), AP_QID_QUEUE(apqn),
1712 status->response_code);
1713 return -EIO;
1714 }
1715}
1716
1717#define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)"
1718
1719static void apq_reset_check(struct work_struct *reset_work)
1720{
1721 int ret = -EBUSY, elapsed = 0;
1722 struct ap_queue_status status;
1723 struct vfio_ap_queue *q;
1724
1725 q = container_of(reset_work, struct vfio_ap_queue, reset_work);
1726 memcpy(&status, &q->reset_status, sizeof(status));
1727 while (true) {
1728 msleep(AP_RESET_INTERVAL);
1729 elapsed += AP_RESET_INTERVAL;
1730 status = ap_tapq(q->apqn, NULL);
1731 ret = apq_status_check(q->apqn, &status);
1732 if (ret == -EIO)
1733 return;
1734 if (ret == -EBUSY) {
1735 pr_notice_ratelimited(WAIT_MSG, elapsed,
1736 AP_QID_CARD(q->apqn),
1737 AP_QID_QUEUE(q->apqn),
1738 status.response_code,
1739 status.queue_empty,
1740 status.irq_enabled);
1741 } else {
1742 if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS ||
1743 q->reset_status.response_code == AP_RESPONSE_BUSY ||
1744 q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS ||
1745 ret == -EAGAIN) {
1746 status = ap_zapq(q->apqn, 0);
1747 memcpy(&q->reset_status, &status, sizeof(status));
1748 continue;
1749 }
1750 /*
1751 * When an AP adapter is deconfigured, the
1752 * associated queues are reset, so let's set the
1753 * status response code to 0 so the queue may be
1754 * passed through (i.e., not filtered)
1755 */
1756 if (status.response_code == AP_RESPONSE_DECONFIGURED)
1757 q->reset_status.response_code = 0;
1758 if (q->saved_isc != VFIO_AP_ISC_INVALID)
1759 vfio_ap_free_aqic_resources(q);
1760 break;
1761 }
1762 }
1763}
1764
1765static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
1766{
1767 struct ap_queue_status status;
1768
1769 if (!q)
1770 return;
1771 status = ap_zapq(q->apqn, 0);
1772 memcpy(&q->reset_status, &status, sizeof(status));
1773 switch (status.response_code) {
1774 case AP_RESPONSE_NORMAL:
1775 case AP_RESPONSE_RESET_IN_PROGRESS:
1776 case AP_RESPONSE_BUSY:
1777 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1778 /*
1779 * Let's verify whether the ZAPQ completed successfully on a work queue.
1780 */
1781 queue_work(system_long_wq, &q->reset_work);
1782 break;
1783 case AP_RESPONSE_DECONFIGURED:
1784 /*
1785 * When an AP adapter is deconfigured, the associated
1786 * queues are reset, so let's set the status response code to 0
1787 * so the queue may be passed through (i.e., not filtered).
1788 */
1789 q->reset_status.response_code = 0;
1790 vfio_ap_free_aqic_resources(q);
1791 break;
1792 default:
1793 WARN(true,
1794 "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
1795 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
1796 status.response_code);
1797 }
1798}
1799
1800static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
1801{
1802 int ret = 0, loop_cursor;
1803 struct vfio_ap_queue *q;
1804
1805 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
1806 vfio_ap_mdev_reset_queue(q);
1807
1808 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
1809 flush_work(&q->reset_work);
1810
1811 if (q->reset_status.response_code)
1812 ret = -EIO;
1813 }
1814
1815 return ret;
1816}
1817
1818static int vfio_ap_mdev_reset_qlist(struct list_head *qlist)
1819{
1820 int ret = 0;
1821 struct vfio_ap_queue *q;
1822
1823 list_for_each_entry(q, qlist, reset_qnode)
1824 vfio_ap_mdev_reset_queue(q);
1825
1826 list_for_each_entry(q, qlist, reset_qnode) {
1827 flush_work(&q->reset_work);
1828
1829 if (q->reset_status.response_code)
1830 ret = -EIO;
1831 }
1832
1833 return ret;
1834}
1835
1836static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
1837{
1838 struct ap_matrix_mdev *matrix_mdev =
1839 container_of(vdev, struct ap_matrix_mdev, vdev);
1840
1841 if (!vdev->kvm)
1842 return -EINVAL;
1843
1844 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
1845}
1846
1847static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
1848{
1849 struct ap_matrix_mdev *matrix_mdev =
1850 container_of(vdev, struct ap_matrix_mdev, vdev);
1851
1852 vfio_ap_mdev_unset_kvm(matrix_mdev);
1853}
1854
1855static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
1856{
1857 struct device *dev = vdev->dev;
1858 struct ap_matrix_mdev *matrix_mdev;
1859
1860 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
1861
1862 if (matrix_mdev->req_trigger) {
1863 if (!(count % 10))
1864 dev_notice_ratelimited(dev,
1865 "Relaying device request to user (#%u)\n",
1866 count);
1867
1868 eventfd_signal(matrix_mdev->req_trigger);
1869 } else if (count == 0) {
1870 dev_notice(dev,
1871 "No device request registered, blocked until released by user\n");
1872 }
1873}
1874
1875static int vfio_ap_mdev_get_device_info(unsigned long arg)
1876{
1877 unsigned long minsz;
1878 struct vfio_device_info info;
1879
1880 minsz = offsetofend(struct vfio_device_info, num_irqs);
1881
1882 if (copy_from_user(&info, (void __user *)arg, minsz))
1883 return -EFAULT;
1884
1885 if (info.argsz < minsz)
1886 return -EINVAL;
1887
1888 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
1889 info.num_regions = 0;
1890 info.num_irqs = VFIO_AP_NUM_IRQS;
1891
1892 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
1893}
1894
1895static ssize_t vfio_ap_get_irq_info(unsigned long arg)
1896{
1897 unsigned long minsz;
1898 struct vfio_irq_info info;
1899
1900 minsz = offsetofend(struct vfio_irq_info, count);
1901
1902 if (copy_from_user(&info, (void __user *)arg, minsz))
1903 return -EFAULT;
1904
1905 if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS)
1906 return -EINVAL;
1907
1908 switch (info.index) {
1909 case VFIO_AP_REQ_IRQ_INDEX:
1910 info.count = 1;
1911 info.flags = VFIO_IRQ_INFO_EVENTFD;
1912 break;
1913 default:
1914 return -EINVAL;
1915 }
1916
1917 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
1918}
1919
1920static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg)
1921{
1922 int ret;
1923 size_t data_size;
1924 unsigned long minsz;
1925
1926 minsz = offsetofend(struct vfio_irq_set, count);
1927
1928 if (copy_from_user(irq_set, (void __user *)arg, minsz))
1929 return -EFAULT;
1930
1931 ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS,
1932 &data_size);
1933 if (ret)
1934 return ret;
1935
1936 if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER))
1937 return -EINVAL;
1938
1939 return 0;
1940}
1941
1942static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
1943 unsigned long arg)
1944{
1945 s32 fd;
1946 void __user *data;
1947 unsigned long minsz;
1948 struct eventfd_ctx *req_trigger;
1949
1950 minsz = offsetofend(struct vfio_irq_set, count);
1951 data = (void __user *)(arg + minsz);
1952
1953 if (get_user(fd, (s32 __user *)data))
1954 return -EFAULT;
1955
1956 if (fd == -1) {
1957 if (matrix_mdev->req_trigger)
1958 eventfd_ctx_put(matrix_mdev->req_trigger);
1959 matrix_mdev->req_trigger = NULL;
1960 } else if (fd >= 0) {
1961 req_trigger = eventfd_ctx_fdget(fd);
1962 if (IS_ERR(req_trigger))
1963 return PTR_ERR(req_trigger);
1964
1965 if (matrix_mdev->req_trigger)
1966 eventfd_ctx_put(matrix_mdev->req_trigger);
1967
1968 matrix_mdev->req_trigger = req_trigger;
1969 } else {
1970 return -EINVAL;
1971 }
1972
1973 return 0;
1974}
1975
1976static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
1977 unsigned long arg)
1978{
1979 int ret;
1980 struct vfio_irq_set irq_set;
1981
1982 ret = vfio_ap_irq_set_init(&irq_set, arg);
1983 if (ret)
1984 return ret;
1985
1986 switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1987 case VFIO_IRQ_SET_DATA_EVENTFD:
1988 switch (irq_set.index) {
1989 case VFIO_AP_REQ_IRQ_INDEX:
1990 return vfio_ap_set_request_irq(matrix_mdev, arg);
1991 default:
1992 return -EINVAL;
1993 }
1994 default:
1995 return -EINVAL;
1996 }
1997}
1998
1999static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
2000 unsigned int cmd, unsigned long arg)
2001{
2002 struct ap_matrix_mdev *matrix_mdev =
2003 container_of(vdev, struct ap_matrix_mdev, vdev);
2004 int ret;
2005
2006 mutex_lock(&matrix_dev->mdevs_lock);
2007 switch (cmd) {
2008 case VFIO_DEVICE_GET_INFO:
2009 ret = vfio_ap_mdev_get_device_info(arg);
2010 break;
2011 case VFIO_DEVICE_RESET:
2012 ret = vfio_ap_mdev_reset_queues(matrix_mdev);
2013 break;
2014 case VFIO_DEVICE_GET_IRQ_INFO:
2015 ret = vfio_ap_get_irq_info(arg);
2016 break;
2017 case VFIO_DEVICE_SET_IRQS:
2018 ret = vfio_ap_set_irqs(matrix_mdev, arg);
2019 break;
2020 default:
2021 ret = -EOPNOTSUPP;
2022 break;
2023 }
2024 mutex_unlock(&matrix_dev->mdevs_lock);
2025
2026 return ret;
2027}
2028
2029static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
2030{
2031 struct ap_matrix_mdev *matrix_mdev;
2032 unsigned long apid = AP_QID_CARD(q->apqn);
2033 unsigned long apqi = AP_QID_QUEUE(q->apqn);
2034
2035 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
2036 if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
2037 test_bit_inv(apqi, matrix_mdev->matrix.aqm))
2038 return matrix_mdev;
2039 }
2040
2041 return NULL;
2042}
2043
2044static ssize_t status_show(struct device *dev,
2045 struct device_attribute *attr,
2046 char *buf)
2047{
2048 ssize_t nchars = 0;
2049 struct vfio_ap_queue *q;
2050 unsigned long apid, apqi;
2051 struct ap_matrix_mdev *matrix_mdev;
2052 struct ap_device *apdev = to_ap_dev(dev);
2053
2054 mutex_lock(&matrix_dev->mdevs_lock);
2055 q = dev_get_drvdata(&apdev->device);
2056 matrix_mdev = vfio_ap_mdev_for_queue(q);
2057
2058 /* If the queue is assigned to the matrix mediated device, then
2059 * determine whether it is passed through to a guest; otherwise,
2060 * indicate that it is unassigned.
2061 */
2062 if (matrix_mdev) {
2063 apid = AP_QID_CARD(q->apqn);
2064 apqi = AP_QID_QUEUE(q->apqn);
2065 /*
2066 * If the queue is passed through to the guest, then indicate
2067 * that it is in use; otherwise, indicate that it is
2068 * merely assigned to a matrix mediated device.
2069 */
2070 if (matrix_mdev->kvm &&
2071 test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
2072 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
2073 nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
2074 AP_QUEUE_IN_USE);
2075 else
2076 nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
2077 AP_QUEUE_ASSIGNED);
2078 } else {
2079 nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
2080 AP_QUEUE_UNASSIGNED);
2081 }
2082
2083 mutex_unlock(&matrix_dev->mdevs_lock);
2084
2085 return nchars;
2086}
2087
2088static DEVICE_ATTR_RO(status);
2089
2090static struct attribute *vfio_queue_attrs[] = {
2091 &dev_attr_status.attr,
2092 NULL,
2093};
2094
2095static const struct attribute_group vfio_queue_attr_group = {
2096 .attrs = vfio_queue_attrs,
2097};
2098
2099static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
2100 .init = vfio_ap_mdev_init_dev,
2101 .open_device = vfio_ap_mdev_open_device,
2102 .close_device = vfio_ap_mdev_close_device,
2103 .ioctl = vfio_ap_mdev_ioctl,
2104 .dma_unmap = vfio_ap_mdev_dma_unmap,
2105 .bind_iommufd = vfio_iommufd_emulated_bind,
2106 .unbind_iommufd = vfio_iommufd_emulated_unbind,
2107 .attach_ioas = vfio_iommufd_emulated_attach_ioas,
2108 .detach_ioas = vfio_iommufd_emulated_detach_ioas,
2109 .request = vfio_ap_mdev_request
2110};
2111
2112static struct mdev_driver vfio_ap_matrix_driver = {
2113 .device_api = VFIO_DEVICE_API_AP_STRING,
2114 .max_instances = MAX_ZDEV_ENTRIES_EXT,
2115 .driver = {
2116 .name = "vfio_ap_mdev",
2117 .owner = THIS_MODULE,
2118 .mod_name = KBUILD_MODNAME,
2119 .dev_groups = vfio_ap_mdev_attr_groups,
2120 },
2121 .probe = vfio_ap_mdev_probe,
2122 .remove = vfio_ap_mdev_remove,
2123};
2124
2125int vfio_ap_mdev_register(void)
2126{
2127 int ret;
2128
2129 ret = mdev_register_driver(&vfio_ap_matrix_driver);
2130 if (ret)
2131 return ret;
2132
2133 matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
2134 matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
2135 matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
2136 ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
2137 &vfio_ap_matrix_driver,
2138 matrix_dev->mdev_types, 1);
2139 if (ret)
2140 goto err_driver;
2141 return 0;
2142
2143err_driver:
2144 mdev_unregister_driver(&vfio_ap_matrix_driver);
2145 return ret;
2146}
2147
2148void vfio_ap_mdev_unregister(void)
2149{
2150 mdev_unregister_parent(&matrix_dev->parent);
2151 mdev_unregister_driver(&vfio_ap_matrix_driver);
2152}
2153
2154int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
2155{
2156 int ret;
2157 struct vfio_ap_queue *q;
2158 DECLARE_BITMAP(apm_filtered, AP_DEVICES);
2159 struct ap_matrix_mdev *matrix_mdev;
2160
2161 ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
2162 if (ret)
2163 return ret;
2164
2165 q = kzalloc(sizeof(*q), GFP_KERNEL);
2166 if (!q) {
2167 ret = -ENOMEM;
2168 goto err_remove_group;
2169 }
2170
2171 q->apqn = to_ap_queue(&apdev->device)->qid;
2172 q->saved_isc = VFIO_AP_ISC_INVALID;
2173 memset(&q->reset_status, 0, sizeof(q->reset_status));
2174 INIT_WORK(&q->reset_work, apq_reset_check);
2175 matrix_mdev = get_update_locks_by_apqn(q->apqn);
2176
2177 if (matrix_mdev) {
2178 vfio_ap_mdev_link_queue(matrix_mdev, q);
2179
2180 /*
2181 * If we're in the process of handling the adding of adapters or
2182 * domains to the host's AP configuration, then let the
2183 * vfio_ap device driver's on_scan_complete callback filter the
2184 * matrix and update the guest's AP configuration after all of
2185 * the new queue devices are probed.
2186 */
2187 if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
2188 !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
2189 goto done;
2190
2191 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
2192 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
2193 reset_queues_for_apids(matrix_mdev, apm_filtered);
2194 }
2195 }
2196
2197done:
2198 dev_set_drvdata(&apdev->device, q);
2199 release_update_locks_for_mdev(matrix_mdev);
2200
2201 return ret;
2202
2203err_remove_group:
2204 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
2205 return ret;
2206}
2207
2208void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
2209{
2210 unsigned long apid, apqi;
2211 struct vfio_ap_queue *q;
2212 struct ap_matrix_mdev *matrix_mdev;
2213
2214 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
2215 q = dev_get_drvdata(&apdev->device);
2216 get_update_locks_for_queue(q);
2217 matrix_mdev = q->matrix_mdev;
2218 apid = AP_QID_CARD(q->apqn);
2219 apqi = AP_QID_QUEUE(q->apqn);
2220
2221 if (matrix_mdev) {
2222 /* If the queue is assigned to the guest's AP configuration */
2223 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
2224 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
2225 /*
2226 * Since the queues are defined via a matrix of adapters
2227 * and domains, it is not possible to hot unplug a
2228 * single queue; so, let's unplug the adapter.
2229 */
2230 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
2231 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
2232 reset_queues_for_apid(matrix_mdev, apid);
2233 goto done;
2234 }
2235 }
2236
2237 /*
2238 * If the queue is not in the host's AP configuration, then resetting
2239 * it will fail with response code 01, (APQN not valid); so, let's make
2240 * sure it is in the host's config.
2241 */
2242 if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) &&
2243 test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) {
2244 vfio_ap_mdev_reset_queue(q);
2245 flush_work(&q->reset_work);
2246 }
2247
2248done:
2249 if (matrix_mdev)
2250 vfio_ap_unlink_queue_fr_mdev(q);
2251
2252 dev_set_drvdata(&apdev->device, NULL);
2253 kfree(q);
2254 release_update_locks_for_mdev(matrix_mdev);
2255}
2256
2257/**
2258 * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
2259 * assigned to a mediated device under the control
2260 * of the vfio_ap device driver.
2261 *
2262 * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
2263 * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
2264 *
2265 * Return:
2266 * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
2267 * assigned to a mediated device under the control of the vfio_ap
2268 * device driver.
2269 * * Otherwise, return 0.
2270 */
2271int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
2272{
2273 int ret;
2274
2275 mutex_lock(&matrix_dev->guests_lock);
2276 mutex_lock(&matrix_dev->mdevs_lock);
2277 ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
2278 mutex_unlock(&matrix_dev->mdevs_lock);
2279 mutex_unlock(&matrix_dev->guests_lock);
2280
2281 return ret;
2282}
2283
2284/**
2285 * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
2286 * domains that have been removed from the host's
2287 * AP configuration from a guest.
2288 *
2289 * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
2290 * @aprem: the adapters that have been removed from the host's AP configuration
2291 * @aqrem: the domains that have been removed from the host's AP configuration
2292 * @cdrem: the control domains that have been removed from the host's AP
2293 * configuration.
2294 */
2295static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
2296 unsigned long *aprem,
2297 unsigned long *aqrem,
2298 unsigned long *cdrem)
2299{
2300 int do_hotplug = 0;
2301
2302 if (!bitmap_empty(aprem, AP_DEVICES)) {
2303 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
2304 matrix_mdev->shadow_apcb.apm,
2305 aprem, AP_DEVICES);
2306 }
2307
2308 if (!bitmap_empty(aqrem, AP_DOMAINS)) {
2309 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
2310 matrix_mdev->shadow_apcb.aqm,
2311 aqrem, AP_DEVICES);
2312 }
2313
2314 if (!bitmap_empty(cdrem, AP_DOMAINS))
2315 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
2316 matrix_mdev->shadow_apcb.adm,
2317 cdrem, AP_DOMAINS);
2318
2319 if (do_hotplug)
2320 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
2321}
2322
2323/**
2324 * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
2325 * domains and control domains that have been removed
2326 * from the host AP configuration and unplugs them
2327 * from those guests.
2328 *
2329 * @ap_remove: bitmap specifying which adapters have been removed from the host
2330 * config.
2331 * @aq_remove: bitmap specifying which domains have been removed from the host
2332 * config.
2333 * @cd_remove: bitmap specifying which control domains have been removed from
2334 * the host config.
2335 */
2336static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
2337 unsigned long *aq_remove,
2338 unsigned long *cd_remove)
2339{
2340 struct ap_matrix_mdev *matrix_mdev;
2341 DECLARE_BITMAP(aprem, AP_DEVICES);
2342 DECLARE_BITMAP(aqrem, AP_DOMAINS);
2343 DECLARE_BITMAP(cdrem, AP_DOMAINS);
2344 int do_remove = 0;
2345
2346 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
2347 mutex_lock(&matrix_mdev->kvm->lock);
2348 mutex_lock(&matrix_dev->mdevs_lock);
2349
2350 do_remove |= bitmap_and(aprem, ap_remove,
2351 matrix_mdev->matrix.apm,
2352 AP_DEVICES);
2353 do_remove |= bitmap_and(aqrem, aq_remove,
2354 matrix_mdev->matrix.aqm,
2355 AP_DOMAINS);
2356 do_remove |= bitmap_andnot(cdrem, cd_remove,
2357 matrix_mdev->matrix.adm,
2358 AP_DOMAINS);
2359
2360 if (do_remove)
2361 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
2362 cdrem);
2363
2364 mutex_unlock(&matrix_dev->mdevs_lock);
2365 mutex_unlock(&matrix_mdev->kvm->lock);
2366 }
2367}
2368
2369/**
2370 * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
2371 * control domains from the host AP configuration
2372 * by unplugging them from the guests that are
2373 * using them.
2374 * @cur_config_info: the current host AP configuration information
2375 * @prev_config_info: the previous host AP configuration information
2376 */
2377static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
2378 struct ap_config_info *prev_config_info)
2379{
2380 int do_remove;
2381 DECLARE_BITMAP(aprem, AP_DEVICES);
2382 DECLARE_BITMAP(aqrem, AP_DOMAINS);
2383 DECLARE_BITMAP(cdrem, AP_DOMAINS);
2384
2385 do_remove = bitmap_andnot(aprem,
2386 (unsigned long *)prev_config_info->apm,
2387 (unsigned long *)cur_config_info->apm,
2388 AP_DEVICES);
2389 do_remove |= bitmap_andnot(aqrem,
2390 (unsigned long *)prev_config_info->aqm,
2391 (unsigned long *)cur_config_info->aqm,
2392 AP_DEVICES);
2393 do_remove |= bitmap_andnot(cdrem,
2394 (unsigned long *)prev_config_info->adm,
2395 (unsigned long *)cur_config_info->adm,
2396 AP_DEVICES);
2397
2398 if (do_remove)
2399 vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
2400}
2401
2402/**
2403 * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
2404 * are older than AP type 10 (CEX4).
2405 * @apm: a bitmap of the APIDs to examine
2406 * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
2407 */
2408static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
2409{
2410 bool apid_cleared;
2411 struct ap_queue_status status;
2412 unsigned long apid, apqi;
2413 struct ap_tapq_hwinfo info;
2414
2415 for_each_set_bit_inv(apid, apm, AP_DEVICES) {
2416 apid_cleared = false;
2417
2418 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
2419 status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
2420 switch (status.response_code) {
2421 /*
2422 * According to the architecture in each case
2423 * below, the queue's info should be filled.
2424 */
2425 case AP_RESPONSE_NORMAL:
2426 case AP_RESPONSE_RESET_IN_PROGRESS:
2427 case AP_RESPONSE_DECONFIGURED:
2428 case AP_RESPONSE_CHECKSTOPPED:
2429 case AP_RESPONSE_BUSY:
2430 /*
2431 * The vfio_ap device driver only
2432 * supports CEX4 and newer adapters, so
2433 * remove the APID if the adapter is
2434 * older than a CEX4.
2435 */
2436 if (info.at < AP_DEVICE_TYPE_CEX4) {
2437 clear_bit_inv(apid, apm);
2438 apid_cleared = true;
2439 }
2440
2441 break;
2442
2443 default:
2444 /*
2445 * If we don't know the adapter type,
2446 * clear its APID since it can't be
2447 * determined whether the vfio_ap
2448 * device driver supports it.
2449 */
2450 clear_bit_inv(apid, apm);
2451 apid_cleared = true;
2452 break;
2453 }
2454
2455 /*
2456 * If we've already cleared the APID from the apm, there
2457 * is no need to continue examining the remainin AP
2458 * queues to determine the type of the adapter.
2459 */
2460 if (apid_cleared)
2461 continue;
2462 }
2463 }
2464}
2465
2466/**
2467 * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
2468 * control domains that have been added to the host's
2469 * AP configuration for each matrix mdev to which they
2470 * are assigned.
2471 *
2472 * @apm_add: a bitmap specifying the adapters that have been added to the AP
2473 * configuration.
2474 * @aqm_add: a bitmap specifying the domains that have been added to the AP
2475 * configuration.
2476 * @adm_add: a bitmap specifying the control domains that have been added to the
2477 * AP configuration.
2478 */
2479static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
2480 unsigned long *adm_add)
2481{
2482 struct ap_matrix_mdev *matrix_mdev;
2483
2484 if (list_empty(&matrix_dev->mdev_list))
2485 return;
2486
2487 vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
2488
2489 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
2490 bitmap_and(matrix_mdev->apm_add,
2491 matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
2492 bitmap_and(matrix_mdev->aqm_add,
2493 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
2494 bitmap_and(matrix_mdev->adm_add,
2495 matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
2496 }
2497}
2498
2499/**
2500 * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
2501 * control domains to the host AP configuration
2502 * by updating the bitmaps that specify what adapters,
2503 * domains and control domains have been added so they
2504 * can be hot plugged into the guest when the AP bus
2505 * scan completes (see vfio_ap_on_scan_complete
2506 * function).
2507 * @cur_config_info: the current AP configuration information
2508 * @prev_config_info: the previous AP configuration information
2509 */
2510static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
2511 struct ap_config_info *prev_config_info)
2512{
2513 bool do_add;
2514 DECLARE_BITMAP(apm_add, AP_DEVICES);
2515 DECLARE_BITMAP(aqm_add, AP_DOMAINS);
2516 DECLARE_BITMAP(adm_add, AP_DOMAINS);
2517
2518 do_add = bitmap_andnot(apm_add,
2519 (unsigned long *)cur_config_info->apm,
2520 (unsigned long *)prev_config_info->apm,
2521 AP_DEVICES);
2522 do_add |= bitmap_andnot(aqm_add,
2523 (unsigned long *)cur_config_info->aqm,
2524 (unsigned long *)prev_config_info->aqm,
2525 AP_DOMAINS);
2526 do_add |= bitmap_andnot(adm_add,
2527 (unsigned long *)cur_config_info->adm,
2528 (unsigned long *)prev_config_info->adm,
2529 AP_DOMAINS);
2530
2531 if (do_add)
2532 vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
2533}
2534
2535/**
2536 * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
2537 * configuration.
2538 *
2539 * @cur_cfg_info: the current host AP configuration
2540 * @prev_cfg_info: the previous host AP configuration
2541 */
2542void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
2543 struct ap_config_info *prev_cfg_info)
2544{
2545 if (!cur_cfg_info || !prev_cfg_info)
2546 return;
2547
2548 mutex_lock(&matrix_dev->guests_lock);
2549
2550 vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
2551 vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
2552 memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
2553
2554 mutex_unlock(&matrix_dev->guests_lock);
2555}
2556
2557static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
2558{
2559 DECLARE_BITMAP(apm_filtered, AP_DEVICES);
2560 bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
2561
2562 mutex_lock(&matrix_mdev->kvm->lock);
2563 mutex_lock(&matrix_dev->mdevs_lock);
2564
2565 filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
2566 matrix_mdev->apm_add, AP_DEVICES);
2567 filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
2568 matrix_mdev->aqm_add, AP_DOMAINS);
2569 filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
2570 matrix_mdev->adm_add, AP_DOMAINS);
2571
2572 if (filter_adapters || filter_domains)
2573 do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
2574
2575 if (filter_cdoms)
2576 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
2577
2578 if (do_hotplug)
2579 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
2580
2581 reset_queues_for_apids(matrix_mdev, apm_filtered);
2582
2583 mutex_unlock(&matrix_dev->mdevs_lock);
2584 mutex_unlock(&matrix_mdev->kvm->lock);
2585}
2586
2587void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
2588 struct ap_config_info *old_config_info)
2589{
2590 struct ap_matrix_mdev *matrix_mdev;
2591
2592 mutex_lock(&matrix_dev->guests_lock);
2593
2594 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
2595 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
2596 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
2597 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
2598 continue;
2599
2600 vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
2601 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
2602 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
2603 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
2604 }
2605
2606 mutex_unlock(&matrix_dev->guests_lock);
2607}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Adjunct processor matrix VFIO device driver callbacks.
4 *
5 * Copyright IBM Corp. 2018
6 *
7 * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
8 * Halil Pasic <pasic@linux.ibm.com>
9 * Pierre Morel <pmorel@linux.ibm.com>
10 */
11#include <linux/string.h>
12#include <linux/vfio.h>
13#include <linux/device.h>
14#include <linux/list.h>
15#include <linux/ctype.h>
16#include <linux/bitops.h>
17#include <linux/kvm_host.h>
18#include <linux/module.h>
19#include <asm/kvm.h>
20#include <asm/zcrypt.h>
21
22#include "vfio_ap_private.h"
23
24#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
25#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
26
27static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
28
29static int match_apqn(struct device *dev, const void *data)
30{
31 struct vfio_ap_queue *q = dev_get_drvdata(dev);
32
33 return (q->apqn == *(int *)(data)) ? 1 : 0;
34}
35
36/**
37 * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
38 * @matrix_mdev: the associated mediated matrix
39 * @apqn: The queue APQN
40 *
41 * Retrieve a queue with a specific APQN from the list of the
42 * devices of the vfio_ap_drv.
43 * Verify that the APID and the APQI are set in the matrix.
44 *
45 * Returns the pointer to the associated vfio_ap_queue
46 */
47static struct vfio_ap_queue *vfio_ap_get_queue(
48 struct ap_matrix_mdev *matrix_mdev,
49 int apqn)
50{
51 struct vfio_ap_queue *q;
52 struct device *dev;
53
54 if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
55 return NULL;
56 if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
57 return NULL;
58
59 dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
60 &apqn, match_apqn);
61 if (!dev)
62 return NULL;
63 q = dev_get_drvdata(dev);
64 q->matrix_mdev = matrix_mdev;
65 put_device(dev);
66
67 return q;
68}
69
70/**
71 * vfio_ap_wait_for_irqclear
72 * @apqn: The AP Queue number
73 *
74 * Checks the IRQ bit for the status of this APQN using ap_tapq.
75 * Returns if the ap_tapq function succeeded and the bit is clear.
76 * Returns if ap_tapq function failed with invalid, deconfigured or
77 * checkstopped AP.
78 * Otherwise retries up to 5 times after waiting 20ms.
79 *
80 */
81static void vfio_ap_wait_for_irqclear(int apqn)
82{
83 struct ap_queue_status status;
84 int retry = 5;
85
86 do {
87 status = ap_tapq(apqn, NULL);
88 switch (status.response_code) {
89 case AP_RESPONSE_NORMAL:
90 case AP_RESPONSE_RESET_IN_PROGRESS:
91 if (!status.irq_enabled)
92 return;
93 /* Fall through */
94 case AP_RESPONSE_BUSY:
95 msleep(20);
96 break;
97 case AP_RESPONSE_Q_NOT_AVAIL:
98 case AP_RESPONSE_DECONFIGURED:
99 case AP_RESPONSE_CHECKSTOPPED:
100 default:
101 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
102 status.response_code, apqn);
103 return;
104 }
105 } while (--retry);
106
107 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
108 __func__, status.response_code, apqn);
109}
110
111/**
112 * vfio_ap_free_aqic_resources
113 * @q: The vfio_ap_queue
114 *
115 * Unregisters the ISC in the GIB when the saved ISC not invalid.
116 * Unpin the guest's page holding the NIB when it exist.
117 * Reset the saved_pfn and saved_isc to invalid values.
118 *
119 */
120static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
121{
122 if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
123 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
124 if (q->saved_pfn && q->matrix_mdev)
125 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
126 &q->saved_pfn, 1);
127 q->saved_pfn = 0;
128 q->saved_isc = VFIO_AP_ISC_INVALID;
129}
130
131/**
132 * vfio_ap_irq_disable
133 * @q: The vfio_ap_queue
134 *
135 * Uses ap_aqic to disable the interruption and in case of success, reset
136 * in progress or IRQ disable command already proceeded: calls
137 * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
138 * and calls vfio_ap_free_aqic_resources() to free the resources associated
139 * with the AP interrupt handling.
140 *
141 * In the case the AP is busy, or a reset is in progress,
142 * retries after 20ms, up to 5 times.
143 *
144 * Returns if ap_aqic function failed with invalid, deconfigured or
145 * checkstopped AP.
146 */
147struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
148{
149 struct ap_qirq_ctrl aqic_gisa = {};
150 struct ap_queue_status status;
151 int retries = 5;
152
153 do {
154 status = ap_aqic(q->apqn, aqic_gisa, NULL);
155 switch (status.response_code) {
156 case AP_RESPONSE_OTHERWISE_CHANGED:
157 case AP_RESPONSE_NORMAL:
158 vfio_ap_wait_for_irqclear(q->apqn);
159 goto end_free;
160 case AP_RESPONSE_RESET_IN_PROGRESS:
161 case AP_RESPONSE_BUSY:
162 msleep(20);
163 break;
164 case AP_RESPONSE_Q_NOT_AVAIL:
165 case AP_RESPONSE_DECONFIGURED:
166 case AP_RESPONSE_CHECKSTOPPED:
167 case AP_RESPONSE_INVALID_ADDRESS:
168 default:
169 /* All cases in default means AP not operational */
170 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
171 status.response_code);
172 goto end_free;
173 }
174 } while (retries--);
175
176 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
177 status.response_code);
178end_free:
179 vfio_ap_free_aqic_resources(q);
180 q->matrix_mdev = NULL;
181 return status;
182}
183
184/**
185 * vfio_ap_setirq: Enable Interruption for a APQN
186 *
187 * @dev: the device associated with the ap_queue
188 * @q: the vfio_ap_queue holding AQIC parameters
189 *
190 * Pin the NIB saved in *q
191 * Register the guest ISC to GIB interface and retrieve the
192 * host ISC to issue the host side PQAP/AQIC
193 *
194 * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
195 * vfio_pin_pages failed.
196 *
197 * Otherwise return the ap_queue_status returned by the ap_aqic(),
198 * all retry handling will be done by the guest.
199 */
200static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
201 int isc,
202 unsigned long nib)
203{
204 struct ap_qirq_ctrl aqic_gisa = {};
205 struct ap_queue_status status = {};
206 struct kvm_s390_gisa *gisa;
207 struct kvm *kvm;
208 unsigned long h_nib, g_pfn, h_pfn;
209 int ret;
210
211 g_pfn = nib >> PAGE_SHIFT;
212 ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
213 IOMMU_READ | IOMMU_WRITE, &h_pfn);
214 switch (ret) {
215 case 1:
216 break;
217 default:
218 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
219 return status;
220 }
221
222 kvm = q->matrix_mdev->kvm;
223 gisa = kvm->arch.gisa_int.origin;
224
225 h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
226 aqic_gisa.gisc = isc;
227 aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
228 aqic_gisa.ir = 1;
229 aqic_gisa.gisa = (uint64_t)gisa >> 4;
230
231 status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
232 switch (status.response_code) {
233 case AP_RESPONSE_NORMAL:
234 /* See if we did clear older IRQ configuration */
235 vfio_ap_free_aqic_resources(q);
236 q->saved_pfn = g_pfn;
237 q->saved_isc = isc;
238 break;
239 case AP_RESPONSE_OTHERWISE_CHANGED:
240 /* We could not modify IRQ setings: clear new configuration */
241 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
242 kvm_s390_gisc_unregister(kvm, isc);
243 break;
244 default:
245 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
246 status.response_code);
247 vfio_ap_irq_disable(q);
248 break;
249 }
250
251 return status;
252}
253
254/**
255 * handle_pqap: PQAP instruction callback
256 *
257 * @vcpu: The vcpu on which we received the PQAP instruction
258 *
259 * Get the general register contents to initialize internal variables.
260 * REG[0]: APQN
261 * REG[1]: IR and ISC
262 * REG[2]: NIB
263 *
264 * Response.status may be set to following Response Code:
265 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
266 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
267 * - AP_RESPONSE_NORMAL (0) : in case of successs
268 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
269 * We take the matrix_dev lock to ensure serialization on queues and
270 * mediated device access.
271 *
272 * Return 0 if we could handle the request inside KVM.
273 * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
274 */
275static int handle_pqap(struct kvm_vcpu *vcpu)
276{
277 uint64_t status;
278 uint16_t apqn;
279 struct vfio_ap_queue *q;
280 struct ap_queue_status qstatus = {
281 .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
282 struct ap_matrix_mdev *matrix_mdev;
283
284 /* If we do not use the AIV facility just go to userland */
285 if (!(vcpu->arch.sie_block->eca & ECA_AIV))
286 return -EOPNOTSUPP;
287
288 apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
289 mutex_lock(&matrix_dev->lock);
290
291 if (!vcpu->kvm->arch.crypto.pqap_hook)
292 goto out_unlock;
293 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
294 struct ap_matrix_mdev, pqap_hook);
295
296 q = vfio_ap_get_queue(matrix_mdev, apqn);
297 if (!q)
298 goto out_unlock;
299
300 status = vcpu->run->s.regs.gprs[1];
301
302 /* If IR bit(16) is set we enable the interrupt */
303 if ((status >> (63 - 16)) & 0x01)
304 qstatus = vfio_ap_irq_enable(q, status & 0x07,
305 vcpu->run->s.regs.gprs[2]);
306 else
307 qstatus = vfio_ap_irq_disable(q);
308
309out_unlock:
310 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
311 vcpu->run->s.regs.gprs[1] >>= 32;
312 mutex_unlock(&matrix_dev->lock);
313 return 0;
314}
315
316static void vfio_ap_matrix_init(struct ap_config_info *info,
317 struct ap_matrix *matrix)
318{
319 matrix->apm_max = info->apxa ? info->Na : 63;
320 matrix->aqm_max = info->apxa ? info->Nd : 15;
321 matrix->adm_max = info->apxa ? info->Nd : 15;
322}
323
324static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
325{
326 struct ap_matrix_mdev *matrix_mdev;
327
328 if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
329 return -EPERM;
330
331 matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
332 if (!matrix_mdev) {
333 atomic_inc(&matrix_dev->available_instances);
334 return -ENOMEM;
335 }
336
337 matrix_mdev->mdev = mdev;
338 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
339 mdev_set_drvdata(mdev, matrix_mdev);
340 matrix_mdev->pqap_hook.hook = handle_pqap;
341 matrix_mdev->pqap_hook.owner = THIS_MODULE;
342 mutex_lock(&matrix_dev->lock);
343 list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
344 mutex_unlock(&matrix_dev->lock);
345
346 return 0;
347}
348
349static int vfio_ap_mdev_remove(struct mdev_device *mdev)
350{
351 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
352
353 if (matrix_mdev->kvm)
354 return -EBUSY;
355
356 mutex_lock(&matrix_dev->lock);
357 vfio_ap_mdev_reset_queues(mdev);
358 list_del(&matrix_mdev->node);
359 mutex_unlock(&matrix_dev->lock);
360
361 kfree(matrix_mdev);
362 mdev_set_drvdata(mdev, NULL);
363 atomic_inc(&matrix_dev->available_instances);
364
365 return 0;
366}
367
368static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
369{
370 return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
371}
372
373static MDEV_TYPE_ATTR_RO(name);
374
375static ssize_t available_instances_show(struct kobject *kobj,
376 struct device *dev, char *buf)
377{
378 return sprintf(buf, "%d\n",
379 atomic_read(&matrix_dev->available_instances));
380}
381
382static MDEV_TYPE_ATTR_RO(available_instances);
383
384static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
385 char *buf)
386{
387 return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
388}
389
390static MDEV_TYPE_ATTR_RO(device_api);
391
392static struct attribute *vfio_ap_mdev_type_attrs[] = {
393 &mdev_type_attr_name.attr,
394 &mdev_type_attr_device_api.attr,
395 &mdev_type_attr_available_instances.attr,
396 NULL,
397};
398
399static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
400 .name = VFIO_AP_MDEV_TYPE_HWVIRT,
401 .attrs = vfio_ap_mdev_type_attrs,
402};
403
404static struct attribute_group *vfio_ap_mdev_type_groups[] = {
405 &vfio_ap_mdev_hwvirt_type_group,
406 NULL,
407};
408
409struct vfio_ap_queue_reserved {
410 unsigned long *apid;
411 unsigned long *apqi;
412 bool reserved;
413};
414
415/**
416 * vfio_ap_has_queue
417 *
418 * @dev: an AP queue device
419 * @data: a struct vfio_ap_queue_reserved reference
420 *
421 * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
422 * apid or apqi specified in @data:
423 *
424 * - If @data contains both an apid and apqi value, then @data will be flagged
425 * as reserved if the APID and APQI fields for the AP queue device matches
426 *
427 * - If @data contains only an apid value, @data will be flagged as
428 * reserved if the APID field in the AP queue device matches
429 *
430 * - If @data contains only an apqi value, @data will be flagged as
431 * reserved if the APQI field in the AP queue device matches
432 *
433 * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
434 * @data does not contain either an apid or apqi.
435 */
436static int vfio_ap_has_queue(struct device *dev, void *data)
437{
438 struct vfio_ap_queue_reserved *qres = data;
439 struct ap_queue *ap_queue = to_ap_queue(dev);
440 ap_qid_t qid;
441 unsigned long id;
442
443 if (qres->apid && qres->apqi) {
444 qid = AP_MKQID(*qres->apid, *qres->apqi);
445 if (qid == ap_queue->qid)
446 qres->reserved = true;
447 } else if (qres->apid && !qres->apqi) {
448 id = AP_QID_CARD(ap_queue->qid);
449 if (id == *qres->apid)
450 qres->reserved = true;
451 } else if (!qres->apid && qres->apqi) {
452 id = AP_QID_QUEUE(ap_queue->qid);
453 if (id == *qres->apqi)
454 qres->reserved = true;
455 } else {
456 return -EINVAL;
457 }
458
459 return 0;
460}
461
462/**
463 * vfio_ap_verify_queue_reserved
464 *
465 * @matrix_dev: a mediated matrix device
466 * @apid: an AP adapter ID
467 * @apqi: an AP queue index
468 *
469 * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
470 * driver according to the following rules:
471 *
472 * - If both @apid and @apqi are not NULL, then there must be an AP queue
473 * device bound to the vfio_ap driver with the APQN identified by @apid and
474 * @apqi
475 *
476 * - If only @apid is not NULL, then there must be an AP queue device bound
477 * to the vfio_ap driver with an APQN containing @apid
478 *
479 * - If only @apqi is not NULL, then there must be an AP queue device bound
480 * to the vfio_ap driver with an APQN containing @apqi
481 *
482 * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
483 */
484static int vfio_ap_verify_queue_reserved(unsigned long *apid,
485 unsigned long *apqi)
486{
487 int ret;
488 struct vfio_ap_queue_reserved qres;
489
490 qres.apid = apid;
491 qres.apqi = apqi;
492 qres.reserved = false;
493
494 ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
495 &qres, vfio_ap_has_queue);
496 if (ret)
497 return ret;
498
499 if (qres.reserved)
500 return 0;
501
502 return -EADDRNOTAVAIL;
503}
504
505static int
506vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
507 unsigned long apid)
508{
509 int ret;
510 unsigned long apqi;
511 unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
512
513 if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
514 return vfio_ap_verify_queue_reserved(&apid, NULL);
515
516 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
517 ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
518 if (ret)
519 return ret;
520 }
521
522 return 0;
523}
524
525/**
526 * vfio_ap_mdev_verify_no_sharing
527 *
528 * Verifies that the APQNs derived from the cross product of the AP adapter IDs
529 * and AP queue indexes comprising the AP matrix are not configured for another
530 * mediated device. AP queue sharing is not allowed.
531 *
532 * @matrix_mdev: the mediated matrix device
533 *
534 * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
535 */
536static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
537{
538 struct ap_matrix_mdev *lstdev;
539 DECLARE_BITMAP(apm, AP_DEVICES);
540 DECLARE_BITMAP(aqm, AP_DOMAINS);
541
542 list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
543 if (matrix_mdev == lstdev)
544 continue;
545
546 memset(apm, 0, sizeof(apm));
547 memset(aqm, 0, sizeof(aqm));
548
549 /*
550 * We work on full longs, as we can only exclude the leftover
551 * bits in non-inverse order. The leftover is all zeros.
552 */
553 if (!bitmap_and(apm, matrix_mdev->matrix.apm,
554 lstdev->matrix.apm, AP_DEVICES))
555 continue;
556
557 if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
558 lstdev->matrix.aqm, AP_DOMAINS))
559 continue;
560
561 return -EADDRINUSE;
562 }
563
564 return 0;
565}
566
567/**
568 * assign_adapter_store
569 *
570 * @dev: the matrix device
571 * @attr: the mediated matrix device's assign_adapter attribute
572 * @buf: a buffer containing the AP adapter number (APID) to
573 * be assigned
574 * @count: the number of bytes in @buf
575 *
576 * Parses the APID from @buf and sets the corresponding bit in the mediated
577 * matrix device's APM.
578 *
579 * Returns the number of bytes processed if the APID is valid; otherwise,
580 * returns one of the following errors:
581 *
582 * 1. -EINVAL
583 * The APID is not a valid number
584 *
585 * 2. -ENODEV
586 * The APID exceeds the maximum value configured for the system
587 *
588 * 3. -EADDRNOTAVAIL
589 * An APQN derived from the cross product of the APID being assigned
590 * and the APQIs previously assigned is not bound to the vfio_ap device
591 * driver; or, if no APQIs have yet been assigned, the APID is not
592 * contained in an APQN bound to the vfio_ap device driver.
593 *
594 * 4. -EADDRINUSE
595 * An APQN derived from the cross product of the APID being assigned
596 * and the APQIs previously assigned is being used by another mediated
597 * matrix device
598 */
599static ssize_t assign_adapter_store(struct device *dev,
600 struct device_attribute *attr,
601 const char *buf, size_t count)
602{
603 int ret;
604 unsigned long apid;
605 struct mdev_device *mdev = mdev_from_dev(dev);
606 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
607
608 /* If the guest is running, disallow assignment of adapter */
609 if (matrix_mdev->kvm)
610 return -EBUSY;
611
612 ret = kstrtoul(buf, 0, &apid);
613 if (ret)
614 return ret;
615
616 if (apid > matrix_mdev->matrix.apm_max)
617 return -ENODEV;
618
619 /*
620 * Set the bit in the AP mask (APM) corresponding to the AP adapter
621 * number (APID). The bits in the mask, from most significant to least
622 * significant bit, correspond to APIDs 0-255.
623 */
624 mutex_lock(&matrix_dev->lock);
625
626 ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
627 if (ret)
628 goto done;
629
630 set_bit_inv(apid, matrix_mdev->matrix.apm);
631
632 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
633 if (ret)
634 goto share_err;
635
636 ret = count;
637 goto done;
638
639share_err:
640 clear_bit_inv(apid, matrix_mdev->matrix.apm);
641done:
642 mutex_unlock(&matrix_dev->lock);
643
644 return ret;
645}
646static DEVICE_ATTR_WO(assign_adapter);
647
648/**
649 * unassign_adapter_store
650 *
651 * @dev: the matrix device
652 * @attr: the mediated matrix device's unassign_adapter attribute
653 * @buf: a buffer containing the adapter number (APID) to be unassigned
654 * @count: the number of bytes in @buf
655 *
656 * Parses the APID from @buf and clears the corresponding bit in the mediated
657 * matrix device's APM.
658 *
659 * Returns the number of bytes processed if the APID is valid; otherwise,
660 * returns one of the following errors:
661 * -EINVAL if the APID is not a number
662 * -ENODEV if the APID it exceeds the maximum value configured for the
663 * system
664 */
665static ssize_t unassign_adapter_store(struct device *dev,
666 struct device_attribute *attr,
667 const char *buf, size_t count)
668{
669 int ret;
670 unsigned long apid;
671 struct mdev_device *mdev = mdev_from_dev(dev);
672 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
673
674 /* If the guest is running, disallow un-assignment of adapter */
675 if (matrix_mdev->kvm)
676 return -EBUSY;
677
678 ret = kstrtoul(buf, 0, &apid);
679 if (ret)
680 return ret;
681
682 if (apid > matrix_mdev->matrix.apm_max)
683 return -ENODEV;
684
685 mutex_lock(&matrix_dev->lock);
686 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
687 mutex_unlock(&matrix_dev->lock);
688
689 return count;
690}
691static DEVICE_ATTR_WO(unassign_adapter);
692
693static int
694vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
695 unsigned long apqi)
696{
697 int ret;
698 unsigned long apid;
699 unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
700
701 if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
702 return vfio_ap_verify_queue_reserved(NULL, &apqi);
703
704 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
705 ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
706 if (ret)
707 return ret;
708 }
709
710 return 0;
711}
712
713/**
714 * assign_domain_store
715 *
716 * @dev: the matrix device
717 * @attr: the mediated matrix device's assign_domain attribute
718 * @buf: a buffer containing the AP queue index (APQI) of the domain to
719 * be assigned
720 * @count: the number of bytes in @buf
721 *
722 * Parses the APQI from @buf and sets the corresponding bit in the mediated
723 * matrix device's AQM.
724 *
725 * Returns the number of bytes processed if the APQI is valid; otherwise returns
726 * one of the following errors:
727 *
728 * 1. -EINVAL
729 * The APQI is not a valid number
730 *
731 * 2. -ENODEV
732 * The APQI exceeds the maximum value configured for the system
733 *
734 * 3. -EADDRNOTAVAIL
735 * An APQN derived from the cross product of the APQI being assigned
736 * and the APIDs previously assigned is not bound to the vfio_ap device
737 * driver; or, if no APIDs have yet been assigned, the APQI is not
738 * contained in an APQN bound to the vfio_ap device driver.
739 *
740 * 4. -EADDRINUSE
741 * An APQN derived from the cross product of the APQI being assigned
742 * and the APIDs previously assigned is being used by another mediated
743 * matrix device
744 */
745static ssize_t assign_domain_store(struct device *dev,
746 struct device_attribute *attr,
747 const char *buf, size_t count)
748{
749 int ret;
750 unsigned long apqi;
751 struct mdev_device *mdev = mdev_from_dev(dev);
752 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
753 unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
754
755 /* If the guest is running, disallow assignment of domain */
756 if (matrix_mdev->kvm)
757 return -EBUSY;
758
759 ret = kstrtoul(buf, 0, &apqi);
760 if (ret)
761 return ret;
762 if (apqi > max_apqi)
763 return -ENODEV;
764
765 mutex_lock(&matrix_dev->lock);
766
767 ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
768 if (ret)
769 goto done;
770
771 set_bit_inv(apqi, matrix_mdev->matrix.aqm);
772
773 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
774 if (ret)
775 goto share_err;
776
777 ret = count;
778 goto done;
779
780share_err:
781 clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
782done:
783 mutex_unlock(&matrix_dev->lock);
784
785 return ret;
786}
787static DEVICE_ATTR_WO(assign_domain);
788
789
790/**
791 * unassign_domain_store
792 *
793 * @dev: the matrix device
794 * @attr: the mediated matrix device's unassign_domain attribute
795 * @buf: a buffer containing the AP queue index (APQI) of the domain to
796 * be unassigned
797 * @count: the number of bytes in @buf
798 *
799 * Parses the APQI from @buf and clears the corresponding bit in the
800 * mediated matrix device's AQM.
801 *
802 * Returns the number of bytes processed if the APQI is valid; otherwise,
803 * returns one of the following errors:
804 * -EINVAL if the APQI is not a number
805 * -ENODEV if the APQI exceeds the maximum value configured for the system
806 */
807static ssize_t unassign_domain_store(struct device *dev,
808 struct device_attribute *attr,
809 const char *buf, size_t count)
810{
811 int ret;
812 unsigned long apqi;
813 struct mdev_device *mdev = mdev_from_dev(dev);
814 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
815
816 /* If the guest is running, disallow un-assignment of domain */
817 if (matrix_mdev->kvm)
818 return -EBUSY;
819
820 ret = kstrtoul(buf, 0, &apqi);
821 if (ret)
822 return ret;
823
824 if (apqi > matrix_mdev->matrix.aqm_max)
825 return -ENODEV;
826
827 mutex_lock(&matrix_dev->lock);
828 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
829 mutex_unlock(&matrix_dev->lock);
830
831 return count;
832}
833static DEVICE_ATTR_WO(unassign_domain);
834
835/**
836 * assign_control_domain_store
837 *
838 * @dev: the matrix device
839 * @attr: the mediated matrix device's assign_control_domain attribute
840 * @buf: a buffer containing the domain ID to be assigned
841 * @count: the number of bytes in @buf
842 *
843 * Parses the domain ID from @buf and sets the corresponding bit in the mediated
844 * matrix device's ADM.
845 *
846 * Returns the number of bytes processed if the domain ID is valid; otherwise,
847 * returns one of the following errors:
848 * -EINVAL if the ID is not a number
849 * -ENODEV if the ID exceeds the maximum value configured for the system
850 */
851static ssize_t assign_control_domain_store(struct device *dev,
852 struct device_attribute *attr,
853 const char *buf, size_t count)
854{
855 int ret;
856 unsigned long id;
857 struct mdev_device *mdev = mdev_from_dev(dev);
858 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
859
860 /* If the guest is running, disallow assignment of control domain */
861 if (matrix_mdev->kvm)
862 return -EBUSY;
863
864 ret = kstrtoul(buf, 0, &id);
865 if (ret)
866 return ret;
867
868 if (id > matrix_mdev->matrix.adm_max)
869 return -ENODEV;
870
871 /* Set the bit in the ADM (bitmask) corresponding to the AP control
872 * domain number (id). The bits in the mask, from most significant to
873 * least significant, correspond to IDs 0 up to the one less than the
874 * number of control domains that can be assigned.
875 */
876 mutex_lock(&matrix_dev->lock);
877 set_bit_inv(id, matrix_mdev->matrix.adm);
878 mutex_unlock(&matrix_dev->lock);
879
880 return count;
881}
882static DEVICE_ATTR_WO(assign_control_domain);
883
884/**
885 * unassign_control_domain_store
886 *
887 * @dev: the matrix device
888 * @attr: the mediated matrix device's unassign_control_domain attribute
889 * @buf: a buffer containing the domain ID to be unassigned
890 * @count: the number of bytes in @buf
891 *
892 * Parses the domain ID from @buf and clears the corresponding bit in the
893 * mediated matrix device's ADM.
894 *
895 * Returns the number of bytes processed if the domain ID is valid; otherwise,
896 * returns one of the following errors:
897 * -EINVAL if the ID is not a number
898 * -ENODEV if the ID exceeds the maximum value configured for the system
899 */
900static ssize_t unassign_control_domain_store(struct device *dev,
901 struct device_attribute *attr,
902 const char *buf, size_t count)
903{
904 int ret;
905 unsigned long domid;
906 struct mdev_device *mdev = mdev_from_dev(dev);
907 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
908 unsigned long max_domid = matrix_mdev->matrix.adm_max;
909
910 /* If the guest is running, disallow un-assignment of control domain */
911 if (matrix_mdev->kvm)
912 return -EBUSY;
913
914 ret = kstrtoul(buf, 0, &domid);
915 if (ret)
916 return ret;
917 if (domid > max_domid)
918 return -ENODEV;
919
920 mutex_lock(&matrix_dev->lock);
921 clear_bit_inv(domid, matrix_mdev->matrix.adm);
922 mutex_unlock(&matrix_dev->lock);
923
924 return count;
925}
926static DEVICE_ATTR_WO(unassign_control_domain);
927
928static ssize_t control_domains_show(struct device *dev,
929 struct device_attribute *dev_attr,
930 char *buf)
931{
932 unsigned long id;
933 int nchars = 0;
934 int n;
935 char *bufpos = buf;
936 struct mdev_device *mdev = mdev_from_dev(dev);
937 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
938 unsigned long max_domid = matrix_mdev->matrix.adm_max;
939
940 mutex_lock(&matrix_dev->lock);
941 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
942 n = sprintf(bufpos, "%04lx\n", id);
943 bufpos += n;
944 nchars += n;
945 }
946 mutex_unlock(&matrix_dev->lock);
947
948 return nchars;
949}
950static DEVICE_ATTR_RO(control_domains);
951
952static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
953 char *buf)
954{
955 struct mdev_device *mdev = mdev_from_dev(dev);
956 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
957 char *bufpos = buf;
958 unsigned long apid;
959 unsigned long apqi;
960 unsigned long apid1;
961 unsigned long apqi1;
962 unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
963 unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
964 int nchars = 0;
965 int n;
966
967 apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
968 apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
969
970 mutex_lock(&matrix_dev->lock);
971
972 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
973 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
974 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
975 naqm_bits) {
976 n = sprintf(bufpos, "%02lx.%04lx\n", apid,
977 apqi);
978 bufpos += n;
979 nchars += n;
980 }
981 }
982 } else if (apid1 < napm_bits) {
983 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
984 n = sprintf(bufpos, "%02lx.\n", apid);
985 bufpos += n;
986 nchars += n;
987 }
988 } else if (apqi1 < naqm_bits) {
989 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
990 n = sprintf(bufpos, ".%04lx\n", apqi);
991 bufpos += n;
992 nchars += n;
993 }
994 }
995
996 mutex_unlock(&matrix_dev->lock);
997
998 return nchars;
999}
1000static DEVICE_ATTR_RO(matrix);
1001
1002static struct attribute *vfio_ap_mdev_attrs[] = {
1003 &dev_attr_assign_adapter.attr,
1004 &dev_attr_unassign_adapter.attr,
1005 &dev_attr_assign_domain.attr,
1006 &dev_attr_unassign_domain.attr,
1007 &dev_attr_assign_control_domain.attr,
1008 &dev_attr_unassign_control_domain.attr,
1009 &dev_attr_control_domains.attr,
1010 &dev_attr_matrix.attr,
1011 NULL,
1012};
1013
1014static struct attribute_group vfio_ap_mdev_attr_group = {
1015 .attrs = vfio_ap_mdev_attrs
1016};
1017
1018static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
1019 &vfio_ap_mdev_attr_group,
1020 NULL
1021};
1022
1023/**
1024 * vfio_ap_mdev_set_kvm
1025 *
1026 * @matrix_mdev: a mediated matrix device
1027 * @kvm: reference to KVM instance
1028 *
1029 * Verifies no other mediated matrix device has @kvm and sets a reference to
1030 * it in @matrix_mdev->kvm.
1031 *
1032 * Return 0 if no other mediated matrix device has a reference to @kvm;
1033 * otherwise, returns an -EPERM.
1034 */
1035static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
1036 struct kvm *kvm)
1037{
1038 struct ap_matrix_mdev *m;
1039
1040 mutex_lock(&matrix_dev->lock);
1041
1042 list_for_each_entry(m, &matrix_dev->mdev_list, node) {
1043 if ((m != matrix_mdev) && (m->kvm == kvm)) {
1044 mutex_unlock(&matrix_dev->lock);
1045 return -EPERM;
1046 }
1047 }
1048
1049 matrix_mdev->kvm = kvm;
1050 kvm_get_kvm(kvm);
1051 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
1052 mutex_unlock(&matrix_dev->lock);
1053
1054 return 0;
1055}
1056
1057/*
1058 * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
1059 *
1060 * @nb: The notifier block
1061 * @action: Action to be taken
1062 * @data: data associated with the request
1063 *
1064 * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
1065 * pinned before). Other requests are ignored.
1066 *
1067 */
1068static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
1069 unsigned long action, void *data)
1070{
1071 struct ap_matrix_mdev *matrix_mdev;
1072
1073 matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
1074
1075 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
1076 struct vfio_iommu_type1_dma_unmap *unmap = data;
1077 unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
1078
1079 vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
1080 return NOTIFY_OK;
1081 }
1082
1083 return NOTIFY_DONE;
1084}
1085
1086static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
1087 unsigned long action, void *data)
1088{
1089 int ret;
1090 struct ap_matrix_mdev *matrix_mdev;
1091
1092 if (action != VFIO_GROUP_NOTIFY_SET_KVM)
1093 return NOTIFY_OK;
1094
1095 matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
1096
1097 if (!data) {
1098 matrix_mdev->kvm = NULL;
1099 return NOTIFY_OK;
1100 }
1101
1102 ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
1103 if (ret)
1104 return NOTIFY_DONE;
1105
1106 /* If there is no CRYCB pointer, then we can't copy the masks */
1107 if (!matrix_mdev->kvm->arch.crypto.crycbd)
1108 return NOTIFY_DONE;
1109
1110 kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
1111 matrix_mdev->matrix.aqm,
1112 matrix_mdev->matrix.adm);
1113
1114 return NOTIFY_OK;
1115}
1116
1117static void vfio_ap_irq_disable_apqn(int apqn)
1118{
1119 struct device *dev;
1120 struct vfio_ap_queue *q;
1121
1122 dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
1123 &apqn, match_apqn);
1124 if (dev) {
1125 q = dev_get_drvdata(dev);
1126 vfio_ap_irq_disable(q);
1127 put_device(dev);
1128 }
1129}
1130
1131int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
1132 unsigned int retry)
1133{
1134 struct ap_queue_status status;
1135 int retry2 = 2;
1136 int apqn = AP_MKQID(apid, apqi);
1137
1138 do {
1139 status = ap_zapq(apqn);
1140 switch (status.response_code) {
1141 case AP_RESPONSE_NORMAL:
1142 while (!status.queue_empty && retry2--) {
1143 msleep(20);
1144 status = ap_tapq(apqn, NULL);
1145 }
1146 WARN_ON_ONCE(retry2 <= 0);
1147 return 0;
1148 case AP_RESPONSE_RESET_IN_PROGRESS:
1149 case AP_RESPONSE_BUSY:
1150 msleep(20);
1151 break;
1152 default:
1153 /* things are really broken, give up */
1154 return -EIO;
1155 }
1156 } while (retry--);
1157
1158 return -EBUSY;
1159}
1160
1161static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
1162{
1163 int ret;
1164 int rc = 0;
1165 unsigned long apid, apqi;
1166 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1167
1168 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
1169 matrix_mdev->matrix.apm_max + 1) {
1170 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
1171 matrix_mdev->matrix.aqm_max + 1) {
1172 ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
1173 /*
1174 * Regardless whether a queue turns out to be busy, or
1175 * is not operational, we need to continue resetting
1176 * the remaining queues.
1177 */
1178 if (ret)
1179 rc = ret;
1180 vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
1181 }
1182 }
1183
1184 return rc;
1185}
1186
1187static int vfio_ap_mdev_open(struct mdev_device *mdev)
1188{
1189 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1190 unsigned long events;
1191 int ret;
1192
1193
1194 if (!try_module_get(THIS_MODULE))
1195 return -ENODEV;
1196
1197 matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
1198 events = VFIO_GROUP_NOTIFY_SET_KVM;
1199
1200 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
1201 &events, &matrix_mdev->group_notifier);
1202 if (ret) {
1203 module_put(THIS_MODULE);
1204 return ret;
1205 }
1206
1207 matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
1208 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
1209 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
1210 &events, &matrix_mdev->iommu_notifier);
1211 if (!ret)
1212 return ret;
1213
1214 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
1215 &matrix_mdev->group_notifier);
1216 module_put(THIS_MODULE);
1217 return ret;
1218}
1219
1220static void vfio_ap_mdev_release(struct mdev_device *mdev)
1221{
1222 struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
1223
1224 mutex_lock(&matrix_dev->lock);
1225 if (matrix_mdev->kvm) {
1226 kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
1227 matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
1228 vfio_ap_mdev_reset_queues(mdev);
1229 kvm_put_kvm(matrix_mdev->kvm);
1230 matrix_mdev->kvm = NULL;
1231 }
1232 mutex_unlock(&matrix_dev->lock);
1233
1234 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
1235 &matrix_mdev->iommu_notifier);
1236 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
1237 &matrix_mdev->group_notifier);
1238 module_put(THIS_MODULE);
1239}
1240
1241static int vfio_ap_mdev_get_device_info(unsigned long arg)
1242{
1243 unsigned long minsz;
1244 struct vfio_device_info info;
1245
1246 minsz = offsetofend(struct vfio_device_info, num_irqs);
1247
1248 if (copy_from_user(&info, (void __user *)arg, minsz))
1249 return -EFAULT;
1250
1251 if (info.argsz < minsz)
1252 return -EINVAL;
1253
1254 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
1255 info.num_regions = 0;
1256 info.num_irqs = 0;
1257
1258 return copy_to_user((void __user *)arg, &info, minsz);
1259}
1260
1261static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
1262 unsigned int cmd, unsigned long arg)
1263{
1264 int ret;
1265
1266 mutex_lock(&matrix_dev->lock);
1267 switch (cmd) {
1268 case VFIO_DEVICE_GET_INFO:
1269 ret = vfio_ap_mdev_get_device_info(arg);
1270 break;
1271 case VFIO_DEVICE_RESET:
1272 ret = vfio_ap_mdev_reset_queues(mdev);
1273 break;
1274 default:
1275 ret = -EOPNOTSUPP;
1276 break;
1277 }
1278 mutex_unlock(&matrix_dev->lock);
1279
1280 return ret;
1281}
1282
1283static const struct mdev_parent_ops vfio_ap_matrix_ops = {
1284 .owner = THIS_MODULE,
1285 .supported_type_groups = vfio_ap_mdev_type_groups,
1286 .mdev_attr_groups = vfio_ap_mdev_attr_groups,
1287 .create = vfio_ap_mdev_create,
1288 .remove = vfio_ap_mdev_remove,
1289 .open = vfio_ap_mdev_open,
1290 .release = vfio_ap_mdev_release,
1291 .ioctl = vfio_ap_mdev_ioctl,
1292};
1293
1294int vfio_ap_mdev_register(void)
1295{
1296 atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
1297
1298 return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
1299}
1300
1301void vfio_ap_mdev_unregister(void)
1302{
1303 mdev_unregister_device(&matrix_dev->device);
1304}