1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Adjunct processor matrix VFIO device driver callbacks. 4 * 5 * Copyright IBM Corp. 2018 6 * 7 * Author(s): Tony Krowiak <akrowiak@linux.ibm.com> 8 * Halil Pasic <pasic@linux.ibm.com> 9 * Pierre Morel <pmorel@linux.ibm.com> 10 */ 11 #include <linux/string.h> 12 #include <linux/vfio.h> 13 #include <linux/device.h> 14 #include <linux/list.h> 15 #include <linux/ctype.h> 16 #include <linux/bitops.h> 17 #include <linux/kvm_host.h> 18 #include <linux/module.h> 19 #include <linux/uuid.h> 20 #include <asm/kvm.h> 21 #include <asm/zcrypt.h> 22 23 #include "vfio_ap_private.h" 24 #include "vfio_ap_debug.h" 25 26 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" 27 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" 28 29 #define AP_QUEUE_ASSIGNED "assigned" 30 #define AP_QUEUE_UNASSIGNED "unassigned" 31 #define AP_QUEUE_IN_USE "in use" 32 33 #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */ 34 35 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); 36 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist); 37 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); 38 static const struct vfio_device_ops vfio_ap_matrix_dev_ops; 39 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q); 40 41 /** 42 * get_update_locks_for_kvm: Acquire the locks required to dynamically update a 43 * KVM guest's APCB in the proper order. 44 * 45 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 46 * 47 * The proper locking order is: 48 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 49 * guest's APCB. 50 * 2. kvm->lock: required to update a guest's APCB 51 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 52 * 53 * Note: If @kvm is NULL, the KVM lock will not be taken. 54 */ 55 static inline void get_update_locks_for_kvm(struct kvm *kvm) 56 { 57 mutex_lock(&matrix_dev->guests_lock); 58 if (kvm) 59 mutex_lock(&kvm->lock); 60 mutex_lock(&matrix_dev->mdevs_lock); 61 } 62 63 /** 64 * release_update_locks_for_kvm: Release the locks used to dynamically update a 65 * KVM guest's APCB in the proper order. 66 * 67 * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. 68 * 69 * The proper unlocking order is: 70 * 1. matrix_dev->mdevs_lock 71 * 2. kvm->lock 72 * 3. matrix_dev->guests_lock 73 * 74 * Note: If @kvm is NULL, the KVM lock will not be released. 75 */ 76 static inline void release_update_locks_for_kvm(struct kvm *kvm) 77 { 78 mutex_unlock(&matrix_dev->mdevs_lock); 79 if (kvm) 80 mutex_unlock(&kvm->lock); 81 mutex_unlock(&matrix_dev->guests_lock); 82 } 83 84 /** 85 * get_update_locks_for_mdev: Acquire the locks required to dynamically update a 86 * KVM guest's APCB in the proper order. 87 * 88 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 89 * configuration data to use to update a KVM guest's APCB. 90 * 91 * The proper locking order is: 92 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 93 * guest's APCB. 94 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 95 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 96 * 97 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 98 * lock will not be taken. 99 */ 100 static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 101 { 102 mutex_lock(&matrix_dev->guests_lock); 103 if (matrix_mdev && matrix_mdev->kvm) 104 mutex_lock(&matrix_mdev->kvm->lock); 105 mutex_lock(&matrix_dev->mdevs_lock); 106 } 107 108 /** 109 * release_update_locks_for_mdev: Release the locks used to dynamically update a 110 * KVM guest's APCB in the proper order. 111 * 112 * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP 113 * configuration data to use to update a KVM guest's APCB. 114 * 115 * The proper unlocking order is: 116 * 1. matrix_dev->mdevs_lock 117 * 2. matrix_mdev->kvm->lock 118 * 3. matrix_dev->guests_lock 119 * 120 * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM 121 * lock will not be released. 122 */ 123 static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) 124 { 125 mutex_unlock(&matrix_dev->mdevs_lock); 126 if (matrix_mdev && matrix_mdev->kvm) 127 mutex_unlock(&matrix_mdev->kvm->lock); 128 mutex_unlock(&matrix_dev->guests_lock); 129 } 130 131 /** 132 * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and 133 * acquire the locks required to update the APCB of 134 * the KVM guest to which the mdev is attached. 135 * 136 * @apqn: the APQN of a queue device. 137 * 138 * The proper locking order is: 139 * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM 140 * guest's APCB. 141 * 2. matrix_mdev->kvm->lock: required to update a guest's APCB 142 * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev 143 * 144 * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock 145 * will not be taken. 146 * 147 * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn 148 * is not assigned to an ap_matrix_mdev. 149 */ 150 static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn) 151 { 152 struct ap_matrix_mdev *matrix_mdev; 153 154 mutex_lock(&matrix_dev->guests_lock); 155 156 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 157 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && 158 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { 159 if (matrix_mdev->kvm) 160 mutex_lock(&matrix_mdev->kvm->lock); 161 162 mutex_lock(&matrix_dev->mdevs_lock); 163 164 return matrix_mdev; 165 } 166 } 167 168 mutex_lock(&matrix_dev->mdevs_lock); 169 170 return NULL; 171 } 172 173 /** 174 * get_update_locks_for_queue: get the locks required to update the APCB of the 175 * KVM guest to which the matrix mdev linked to a 176 * vfio_ap_queue object is attached. 177 * 178 * @q: a pointer to a vfio_ap_queue object. 179 * 180 * The proper locking order is: 181 * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a 182 * KVM guest's APCB. 183 * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB 184 * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev 185 * 186 * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock 187 * will not be taken. 188 */ 189 static inline void get_update_locks_for_queue(struct vfio_ap_queue *q) 190 { 191 mutex_lock(&matrix_dev->guests_lock); 192 if (q->matrix_mdev && q->matrix_mdev->kvm) 193 mutex_lock(&q->matrix_mdev->kvm->lock); 194 mutex_lock(&matrix_dev->mdevs_lock); 195 } 196 197 /** 198 * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a 199 * hash table of queues assigned to a matrix mdev 200 * @matrix_mdev: the matrix mdev 201 * @apqn: The APQN of a queue device 202 * 203 * Return: the pointer to the vfio_ap_queue struct representing the queue or 204 * NULL if the queue is not assigned to @matrix_mdev 205 */ 206 static struct vfio_ap_queue *vfio_ap_mdev_get_queue( 207 struct ap_matrix_mdev *matrix_mdev, 208 int apqn) 209 { 210 struct vfio_ap_queue *q; 211 212 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, 213 apqn) { 214 if (q && q->apqn == apqn) 215 return q; 216 } 217 218 return NULL; 219 } 220 221 /** 222 * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries 223 * @apqn: The AP Queue number 224 * 225 * Checks the IRQ bit for the status of this APQN using ap_tapq. 226 * Returns if the ap_tapq function succeeded and the bit is clear. 227 * Returns if ap_tapq function failed with invalid, deconfigured or 228 * checkstopped AP. 229 * Otherwise retries up to 5 times after waiting 20ms. 230 */ 231 static void vfio_ap_wait_for_irqclear(int apqn) 232 { 233 struct ap_queue_status status; 234 int retry = 5; 235 236 do { 237 status = ap_tapq(apqn, NULL); 238 switch (status.response_code) { 239 case AP_RESPONSE_NORMAL: 240 case AP_RESPONSE_RESET_IN_PROGRESS: 241 if (!status.irq_enabled) 242 return; 243 fallthrough; 244 case AP_RESPONSE_BUSY: 245 msleep(20); 246 break; 247 case AP_RESPONSE_Q_NOT_AVAIL: 248 case AP_RESPONSE_DECONFIGURED: 249 case AP_RESPONSE_CHECKSTOPPED: 250 default: 251 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__, 252 status.response_code, apqn); 253 return; 254 } 255 } while (--retry); 256 257 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n", 258 __func__, status.response_code, apqn); 259 } 260 261 /** 262 * vfio_ap_free_aqic_resources - free vfio_ap_queue resources 263 * @q: The vfio_ap_queue 264 * 265 * Unregisters the ISC in the GIB when the saved ISC not invalid. 266 * Unpins the guest's page holding the NIB when it exists. 267 * Resets the saved_iova and saved_isc to invalid values. 268 */ 269 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) 270 { 271 if (!q) 272 return; 273 if (q->saved_isc != VFIO_AP_ISC_INVALID && 274 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { 275 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); 276 q->saved_isc = VFIO_AP_ISC_INVALID; 277 } 278 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) { 279 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1); 280 q->saved_iova = 0; 281 } 282 } 283 284 /** 285 * vfio_ap_irq_disable - disables and clears an ap_queue interrupt 286 * @q: The vfio_ap_queue 287 * 288 * Uses ap_aqic to disable the interruption and in case of success, reset 289 * in progress or IRQ disable command already proceeded: calls 290 * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear 291 * and calls vfio_ap_free_aqic_resources() to free the resources associated 292 * with the AP interrupt handling. 293 * 294 * In the case the AP is busy, or a reset is in progress, 295 * retries after 20ms, up to 5 times. 296 * 297 * Returns if ap_aqic function failed with invalid, deconfigured or 298 * checkstopped AP. 299 * 300 * Return: &struct ap_queue_status 301 */ 302 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) 303 { 304 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 305 struct ap_queue_status status; 306 int retries = 5; 307 308 do { 309 status = ap_aqic(q->apqn, aqic_gisa, 0); 310 switch (status.response_code) { 311 case AP_RESPONSE_OTHERWISE_CHANGED: 312 case AP_RESPONSE_NORMAL: 313 vfio_ap_wait_for_irqclear(q->apqn); 314 goto end_free; 315 case AP_RESPONSE_RESET_IN_PROGRESS: 316 case AP_RESPONSE_BUSY: 317 msleep(20); 318 break; 319 case AP_RESPONSE_Q_NOT_AVAIL: 320 case AP_RESPONSE_DECONFIGURED: 321 case AP_RESPONSE_CHECKSTOPPED: 322 case AP_RESPONSE_INVALID_ADDRESS: 323 default: 324 /* All cases in default means AP not operational */ 325 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 326 status.response_code); 327 goto end_free; 328 } 329 } while (retries--); 330 331 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, 332 status.response_code); 333 end_free: 334 vfio_ap_free_aqic_resources(q); 335 return status; 336 } 337 338 /** 339 * vfio_ap_validate_nib - validate a notification indicator byte (nib) address. 340 * 341 * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction. 342 * @nib: the location for storing the nib address. 343 * 344 * When the PQAP(AQIC) instruction is executed, general register 2 contains the 345 * address of the notification indicator byte (nib) used for IRQ notification. 346 * This function parses and validates the nib from gr2. 347 * 348 * Return: returns zero if the nib address is a valid; otherwise, returns 349 * -EINVAL. 350 */ 351 static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib) 352 { 353 *nib = vcpu->run->s.regs.gprs[2]; 354 355 if (!*nib) 356 return -EINVAL; 357 if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT))) 358 return -EINVAL; 359 360 return 0; 361 } 362 363 static int ensure_nib_shared(unsigned long addr, struct gmap *gmap) 364 { 365 int ret; 366 367 /* 368 * The nib has to be located in shared storage since guest and 369 * host access it. vfio_pin_pages() will do a pin shared and 370 * if that fails (possibly because it's not a shared page) it 371 * calls export. We try to do a second pin shared here so that 372 * the UV gives us an error code if we try to pin a non-shared 373 * page. 374 * 375 * If the page is already pinned shared the UV will return a success. 376 */ 377 ret = uv_pin_shared(addr); 378 if (ret) { 379 /* vfio_pin_pages() likely exported the page so let's re-import */ 380 gmap_convert_to_secure(gmap, addr); 381 } 382 return ret; 383 } 384 385 /** 386 * vfio_ap_irq_enable - Enable Interruption for a APQN 387 * 388 * @q: the vfio_ap_queue holding AQIC parameters 389 * @isc: the guest ISC to register with the GIB interface 390 * @vcpu: the vcpu object containing the registers specifying the parameters 391 * passed to the PQAP(AQIC) instruction. 392 * 393 * Pin the NIB saved in *q 394 * Register the guest ISC to GIB interface and retrieve the 395 * host ISC to issue the host side PQAP/AQIC 396 * 397 * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the 398 * vfio_pin_pages failed. 399 * 400 * Otherwise return the ap_queue_status returned by the ap_aqic(), 401 * all retry handling will be done by the guest. 402 * 403 * Return: &struct ap_queue_status 404 */ 405 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, 406 int isc, 407 struct kvm_vcpu *vcpu) 408 { 409 union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 410 struct ap_queue_status status = {}; 411 struct kvm_s390_gisa *gisa; 412 struct page *h_page; 413 int nisc; 414 struct kvm *kvm; 415 phys_addr_t h_nib; 416 dma_addr_t nib; 417 int ret; 418 419 /* Verify that the notification indicator byte address is valid */ 420 if (vfio_ap_validate_nib(vcpu, &nib)) { 421 VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n", 422 __func__, &nib, q->apqn); 423 424 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 425 return status; 426 } 427 428 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, 429 IOMMU_READ | IOMMU_WRITE, &h_page); 430 switch (ret) { 431 case 1: 432 break; 433 default: 434 VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d," 435 "nib=%pad, apqn=%#04x\n", 436 __func__, ret, &nib, q->apqn); 437 438 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 439 return status; 440 } 441 442 kvm = q->matrix_mdev->kvm; 443 gisa = kvm->arch.gisa_int.origin; 444 445 h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK); 446 aqic_gisa.gisc = isc; 447 448 /* NIB in non-shared storage is a rc 6 for PV guests */ 449 if (kvm_s390_pv_cpu_is_protected(vcpu) && 450 ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) { 451 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 452 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 453 return status; 454 } 455 456 nisc = kvm_s390_gisc_register(kvm, isc); 457 if (nisc < 0) { 458 VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n", 459 __func__, nisc, isc, q->apqn); 460 461 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 462 status.response_code = AP_RESPONSE_INVALID_GISA; 463 return status; 464 } 465 466 aqic_gisa.isc = nisc; 467 aqic_gisa.ir = 1; 468 aqic_gisa.gisa = virt_to_phys(gisa) >> 4; 469 470 status = ap_aqic(q->apqn, aqic_gisa, h_nib); 471 switch (status.response_code) { 472 case AP_RESPONSE_NORMAL: 473 /* See if we did clear older IRQ configuration */ 474 vfio_ap_free_aqic_resources(q); 475 q->saved_iova = nib; 476 q->saved_isc = isc; 477 break; 478 case AP_RESPONSE_OTHERWISE_CHANGED: 479 /* We could not modify IRQ settings: clear new configuration */ 480 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 481 kvm_s390_gisc_unregister(kvm, isc); 482 break; 483 default: 484 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn, 485 status.response_code); 486 vfio_ap_irq_disable(q); 487 break; 488 } 489 490 if (status.response_code != AP_RESPONSE_NORMAL) { 491 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: " 492 "zone=%#x, ir=%#x, gisc=%#x, f=%#x," 493 "gisa=%#x, isc=%#x, apqn=%#04x\n", 494 __func__, status.response_code, 495 aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc, 496 aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc, 497 q->apqn); 498 } 499 500 return status; 501 } 502 503 /** 504 * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array 505 * of big endian elements that can be passed by 506 * value to an s390dbf sprintf event function to 507 * format a UUID string. 508 * 509 * @guid: the object containing the little endian guid 510 * @uuid: a six-element array of long values that can be passed by value as 511 * arguments for a formatting string specifying a UUID. 512 * 513 * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf 514 * event functions if the memory for the passed string is available as long as 515 * the debug feature exists. Since a mediated device can be removed at any 516 * time, it's name can not be used because %s passes the reference to the string 517 * in memory and the reference will go stale once the device is removed . 518 * 519 * The s390dbf string formatting function allows a maximum of 9 arguments for a 520 * message to be displayed in the 'sprintf' view. In order to use the bytes 521 * comprising the mediated device's UUID to display the mediated device name, 522 * they will have to be converted into an array whose elements can be passed by 523 * value to sprintf. For example: 524 * 525 * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 } 526 * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804 527 * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 } 528 * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx" 529 */ 530 static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid) 531 { 532 /* 533 * The input guid is ordered in little endian, so it needs to be 534 * reordered for displaying a UUID as a string. This specifies the 535 * guid indices in proper order. 536 */ 537 uuid[0] = le32_to_cpup((__le32 *)guid); 538 uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]); 539 uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]); 540 uuid[3] = *((__u16 *)&guid->b[8]); 541 uuid[4] = *((__u16 *)&guid->b[10]); 542 uuid[5] = *((__u32 *)&guid->b[12]); 543 } 544 545 /** 546 * handle_pqap - PQAP instruction callback 547 * 548 * @vcpu: The vcpu on which we received the PQAP instruction 549 * 550 * Get the general register contents to initialize internal variables. 551 * REG[0]: APQN 552 * REG[1]: IR and ISC 553 * REG[2]: NIB 554 * 555 * Response.status may be set to following Response Code: 556 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available 557 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured 558 * - AP_RESPONSE_NORMAL (0) : in case of success 559 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC. 560 * We take the matrix_dev lock to ensure serialization on queues and 561 * mediated device access. 562 * 563 * Return: 0 if we could handle the request inside KVM. 564 * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault. 565 */ 566 static int handle_pqap(struct kvm_vcpu *vcpu) 567 { 568 uint64_t status; 569 uint16_t apqn; 570 unsigned long uuid[6]; 571 struct vfio_ap_queue *q; 572 struct ap_queue_status qstatus = { 573 .response_code = AP_RESPONSE_Q_NOT_AVAIL, }; 574 struct ap_matrix_mdev *matrix_mdev; 575 576 apqn = vcpu->run->s.regs.gprs[0] & 0xffff; 577 578 /* If we do not use the AIV facility just go to userland */ 579 if (!(vcpu->arch.sie_block->eca & ECA_AIV)) { 580 VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n", 581 __func__, apqn, vcpu->arch.sie_block->eca); 582 583 return -EOPNOTSUPP; 584 } 585 586 mutex_lock(&matrix_dev->mdevs_lock); 587 588 if (!vcpu->kvm->arch.crypto.pqap_hook) { 589 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n", 590 __func__, apqn); 591 592 goto out_unlock; 593 } 594 595 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, 596 struct ap_matrix_mdev, pqap_hook); 597 598 /* If the there is no guest using the mdev, there is nothing to do */ 599 if (!matrix_mdev->kvm) { 600 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid); 601 VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n", 602 __func__, uuid[0], uuid[1], uuid[2], 603 uuid[3], uuid[4], uuid[5], apqn); 604 goto out_unlock; 605 } 606 607 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); 608 if (!q) { 609 VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n", 610 __func__, AP_QID_CARD(apqn), 611 AP_QID_QUEUE(apqn)); 612 goto out_unlock; 613 } 614 615 status = vcpu->run->s.regs.gprs[1]; 616 617 /* If IR bit(16) is set we enable the interrupt */ 618 if ((status >> (63 - 16)) & 0x01) 619 qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu); 620 else 621 qstatus = vfio_ap_irq_disable(q); 622 623 out_unlock: 624 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus)); 625 vcpu->run->s.regs.gprs[1] >>= 32; 626 mutex_unlock(&matrix_dev->mdevs_lock); 627 return 0; 628 } 629 630 static void vfio_ap_matrix_init(struct ap_config_info *info, 631 struct ap_matrix *matrix) 632 { 633 matrix->apm_max = info->apxa ? info->na : 63; 634 matrix->aqm_max = info->apxa ? info->nd : 15; 635 matrix->adm_max = info->apxa ? info->nd : 15; 636 } 637 638 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) 639 { 640 if (matrix_mdev->kvm) 641 kvm_arch_crypto_set_masks(matrix_mdev->kvm, 642 matrix_mdev->shadow_apcb.apm, 643 matrix_mdev->shadow_apcb.aqm, 644 matrix_mdev->shadow_apcb.adm); 645 } 646 647 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) 648 { 649 DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS); 650 651 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); 652 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, 653 (unsigned long *)matrix_dev->info.adm, AP_DOMAINS); 654 655 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, 656 AP_DOMAINS); 657 } 658 659 /* 660 * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev 661 * to ensure no queue devices are passed through to 662 * the guest that are not bound to the vfio_ap 663 * device driver. 664 * 665 * @matrix_mdev: the matrix mdev whose matrix is to be filtered. 666 * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the 667 * guest's AP configuration that are still in the host's AP 668 * configuration. 669 * 670 * Note: If an APQN referencing a queue device that is not bound to the vfio_ap 671 * driver, its APID will be filtered from the guest's APCB. The matrix 672 * structure precludes filtering an individual APQN, so its APID will be 673 * filtered. Consequently, all queues associated with the adapter that 674 * are in the host's AP configuration must be reset. If queues are 675 * subsequently made available again to the guest, they should re-appear 676 * in a reset state 677 * 678 * Return: a boolean value indicating whether the KVM guest's APCB was changed 679 * by the filtering or not. 680 */ 681 static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, 682 unsigned long *apm_filtered) 683 { 684 unsigned long apid, apqi, apqn; 685 DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES); 686 DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS); 687 struct vfio_ap_queue *q; 688 689 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); 690 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); 691 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 692 bitmap_clear(apm_filtered, 0, AP_DEVICES); 693 694 /* 695 * Copy the adapters, domains and control domains to the shadow_apcb 696 * from the matrix mdev, but only those that are assigned to the host's 697 * AP configuration. 698 */ 699 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, 700 (unsigned long *)matrix_dev->info.apm, AP_DEVICES); 701 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, 702 (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS); 703 704 for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) { 705 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, 706 AP_DOMAINS) { 707 /* 708 * If the APQN is not bound to the vfio_ap device 709 * driver, then we can't assign it to the guest's 710 * AP configuration. The AP architecture won't 711 * allow filtering of a single APQN, so let's filter 712 * the APID since an adapter represents a physical 713 * hardware device. 714 */ 715 apqn = AP_MKQID(apid, apqi); 716 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); 717 if (!q || q->reset_status.response_code) { 718 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 719 720 /* 721 * If the adapter was previously plugged into 722 * the guest, let's let the caller know that 723 * the APID was filtered. 724 */ 725 if (test_bit_inv(apid, prev_shadow_apm)) 726 set_bit_inv(apid, apm_filtered); 727 728 break; 729 } 730 } 731 } 732 733 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, 734 AP_DEVICES) || 735 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, 736 AP_DOMAINS); 737 } 738 739 static int vfio_ap_mdev_init_dev(struct vfio_device *vdev) 740 { 741 struct ap_matrix_mdev *matrix_mdev = 742 container_of(vdev, struct ap_matrix_mdev, vdev); 743 744 matrix_mdev->mdev = to_mdev_device(vdev->dev); 745 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); 746 matrix_mdev->pqap_hook = handle_pqap; 747 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); 748 hash_init(matrix_mdev->qtable.queues); 749 750 return 0; 751 } 752 753 static int vfio_ap_mdev_probe(struct mdev_device *mdev) 754 { 755 struct ap_matrix_mdev *matrix_mdev; 756 int ret; 757 758 matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev, 759 &vfio_ap_matrix_dev_ops); 760 if (IS_ERR(matrix_mdev)) 761 return PTR_ERR(matrix_mdev); 762 763 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); 764 if (ret) 765 goto err_put_vdev; 766 matrix_mdev->req_trigger = NULL; 767 dev_set_drvdata(&mdev->dev, matrix_mdev); 768 mutex_lock(&matrix_dev->mdevs_lock); 769 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); 770 mutex_unlock(&matrix_dev->mdevs_lock); 771 return 0; 772 773 err_put_vdev: 774 vfio_put_device(&matrix_mdev->vdev); 775 return ret; 776 } 777 778 static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, 779 struct vfio_ap_queue *q) 780 { 781 if (q) { 782 q->matrix_mdev = matrix_mdev; 783 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); 784 } 785 } 786 787 static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) 788 { 789 struct vfio_ap_queue *q; 790 791 q = vfio_ap_find_queue(apqn); 792 vfio_ap_mdev_link_queue(matrix_mdev, q); 793 } 794 795 static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q) 796 { 797 hash_del(&q->mdev_qnode); 798 } 799 800 static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q) 801 { 802 q->matrix_mdev = NULL; 803 } 804 805 static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) 806 { 807 struct vfio_ap_queue *q; 808 unsigned long apid, apqi; 809 810 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 811 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, 812 AP_DOMAINS) { 813 q = vfio_ap_mdev_get_queue(matrix_mdev, 814 AP_MKQID(apid, apqi)); 815 if (q) 816 q->matrix_mdev = NULL; 817 } 818 } 819 } 820 821 static void vfio_ap_mdev_remove(struct mdev_device *mdev) 822 { 823 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); 824 825 vfio_unregister_group_dev(&matrix_mdev->vdev); 826 827 mutex_lock(&matrix_dev->guests_lock); 828 mutex_lock(&matrix_dev->mdevs_lock); 829 vfio_ap_mdev_reset_queues(matrix_mdev); 830 vfio_ap_mdev_unlink_fr_queues(matrix_mdev); 831 list_del(&matrix_mdev->node); 832 mutex_unlock(&matrix_dev->mdevs_lock); 833 mutex_unlock(&matrix_dev->guests_lock); 834 vfio_put_device(&matrix_mdev->vdev); 835 } 836 837 #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ 838 "already assigned to %s" 839 840 static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, 841 unsigned long *apm, 842 unsigned long *aqm) 843 { 844 unsigned long apid, apqi; 845 const struct device *dev = mdev_dev(matrix_mdev->mdev); 846 const char *mdev_name = dev_name(dev); 847 848 for_each_set_bit_inv(apid, apm, AP_DEVICES) 849 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) 850 dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); 851 } 852 853 /** 854 * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs 855 * 856 * @mdev_apm: mask indicating the APIDs of the APQNs to be verified 857 * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified 858 * 859 * Verifies that each APQN derived from the Cartesian product of a bitmap of 860 * AP adapter IDs and AP queue indexes is not configured for any matrix 861 * mediated device. AP queue sharing is not allowed. 862 * 863 * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. 864 */ 865 static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, 866 unsigned long *mdev_aqm) 867 { 868 struct ap_matrix_mdev *matrix_mdev; 869 DECLARE_BITMAP(apm, AP_DEVICES); 870 DECLARE_BITMAP(aqm, AP_DOMAINS); 871 872 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 873 /* 874 * If the input apm and aqm are fields of the matrix_mdev 875 * object, then move on to the next matrix_mdev. 876 */ 877 if (mdev_apm == matrix_mdev->matrix.apm && 878 mdev_aqm == matrix_mdev->matrix.aqm) 879 continue; 880 881 memset(apm, 0, sizeof(apm)); 882 memset(aqm, 0, sizeof(aqm)); 883 884 /* 885 * We work on full longs, as we can only exclude the leftover 886 * bits in non-inverse order. The leftover is all zeros. 887 */ 888 if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, 889 AP_DEVICES)) 890 continue; 891 892 if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, 893 AP_DOMAINS)) 894 continue; 895 896 vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); 897 898 return -EADDRINUSE; 899 } 900 901 return 0; 902 } 903 904 /** 905 * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are 906 * not reserved for the default zcrypt driver and 907 * are not assigned to another mdev. 908 * 909 * @matrix_mdev: the mdev to which the APQNs being validated are assigned. 910 * 911 * Return: One of the following values: 912 * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, 913 * most likely -EBUSY indicating the ap_perms_mutex lock is already held. 914 * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the 915 * zcrypt default driver. 916 * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev 917 * o A zero indicating validation succeeded. 918 */ 919 static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) 920 { 921 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, 922 matrix_mdev->matrix.aqm)) 923 return -EADDRNOTAVAIL; 924 925 return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, 926 matrix_mdev->matrix.aqm); 927 } 928 929 static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, 930 unsigned long apid) 931 { 932 unsigned long apqi; 933 934 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) 935 vfio_ap_mdev_link_apqn(matrix_mdev, 936 AP_MKQID(apid, apqi)); 937 } 938 939 static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev, 940 unsigned long apid, 941 struct list_head *qlist) 942 { 943 struct vfio_ap_queue *q; 944 unsigned long apqi; 945 946 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) { 947 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 948 if (q) 949 list_add_tail(&q->reset_qnode, qlist); 950 } 951 } 952 953 static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev, 954 unsigned long apid) 955 { 956 struct list_head qlist; 957 958 INIT_LIST_HEAD(&qlist); 959 collect_queues_to_reset(matrix_mdev, apid, &qlist); 960 vfio_ap_mdev_reset_qlist(&qlist); 961 } 962 963 static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev, 964 unsigned long *apm_reset) 965 { 966 struct list_head qlist; 967 unsigned long apid; 968 969 if (bitmap_empty(apm_reset, AP_DEVICES)) 970 return 0; 971 972 INIT_LIST_HEAD(&qlist); 973 974 for_each_set_bit_inv(apid, apm_reset, AP_DEVICES) 975 collect_queues_to_reset(matrix_mdev, apid, &qlist); 976 977 return vfio_ap_mdev_reset_qlist(&qlist); 978 } 979 980 /** 981 * assign_adapter_store - parses the APID from @buf and sets the 982 * corresponding bit in the mediated matrix device's APM 983 * 984 * @dev: the matrix device 985 * @attr: the mediated matrix device's assign_adapter attribute 986 * @buf: a buffer containing the AP adapter number (APID) to 987 * be assigned 988 * @count: the number of bytes in @buf 989 * 990 * Return: the number of bytes processed if the APID is valid; otherwise, 991 * returns one of the following errors: 992 * 993 * 1. -EINVAL 994 * The APID is not a valid number 995 * 996 * 2. -ENODEV 997 * The APID exceeds the maximum value configured for the system 998 * 999 * 3. -EADDRNOTAVAIL 1000 * An APQN derived from the cross product of the APID being assigned 1001 * and the APQIs previously assigned is not bound to the vfio_ap device 1002 * driver; or, if no APQIs have yet been assigned, the APID is not 1003 * contained in an APQN bound to the vfio_ap device driver. 1004 * 1005 * 4. -EADDRINUSE 1006 * An APQN derived from the cross product of the APID being assigned 1007 * and the APQIs previously assigned is being used by another mediated 1008 * matrix device 1009 * 1010 * 5. -EAGAIN 1011 * A lock required to validate the mdev's AP configuration could not 1012 * be obtained. 1013 */ 1014 static ssize_t assign_adapter_store(struct device *dev, 1015 struct device_attribute *attr, 1016 const char *buf, size_t count) 1017 { 1018 int ret; 1019 unsigned long apid; 1020 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1021 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1022 1023 mutex_lock(&ap_perms_mutex); 1024 get_update_locks_for_mdev(matrix_mdev); 1025 1026 ret = kstrtoul(buf, 0, &apid); 1027 if (ret) 1028 goto done; 1029 1030 if (apid > matrix_mdev->matrix.apm_max) { 1031 ret = -ENODEV; 1032 goto done; 1033 } 1034 1035 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1036 ret = count; 1037 goto done; 1038 } 1039 1040 set_bit_inv(apid, matrix_mdev->matrix.apm); 1041 1042 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1043 if (ret) { 1044 clear_bit_inv(apid, matrix_mdev->matrix.apm); 1045 goto done; 1046 } 1047 1048 vfio_ap_mdev_link_adapter(matrix_mdev, apid); 1049 1050 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1051 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1052 reset_queues_for_apids(matrix_mdev, apm_filtered); 1053 } 1054 1055 ret = count; 1056 done: 1057 release_update_locks_for_mdev(matrix_mdev); 1058 mutex_unlock(&ap_perms_mutex); 1059 1060 return ret; 1061 } 1062 static DEVICE_ATTR_WO(assign_adapter); 1063 1064 static struct vfio_ap_queue 1065 *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, 1066 unsigned long apid, unsigned long apqi) 1067 { 1068 struct vfio_ap_queue *q = NULL; 1069 1070 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); 1071 /* If the queue is assigned to the matrix mdev, unlink it. */ 1072 if (q) 1073 vfio_ap_unlink_queue_fr_mdev(q); 1074 1075 return q; 1076 } 1077 1078 /** 1079 * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned 1080 * adapter from the matrix mdev to which the 1081 * adapter was assigned. 1082 * @matrix_mdev: the matrix mediated device to which the adapter was assigned. 1083 * @apid: the APID of the unassigned adapter. 1084 * @qlist: list for storing queues associated with unassigned adapter that 1085 * need to be reset. 1086 */ 1087 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, 1088 unsigned long apid, 1089 struct list_head *qlist) 1090 { 1091 unsigned long apqi; 1092 struct vfio_ap_queue *q; 1093 1094 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { 1095 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1096 1097 if (q && qlist) { 1098 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1099 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1100 list_add_tail(&q->reset_qnode, qlist); 1101 } 1102 } 1103 } 1104 1105 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, 1106 unsigned long apid) 1107 { 1108 struct vfio_ap_queue *q, *tmpq; 1109 struct list_head qlist; 1110 1111 INIT_LIST_HEAD(&qlist); 1112 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist); 1113 1114 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { 1115 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 1116 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1117 } 1118 1119 vfio_ap_mdev_reset_qlist(&qlist); 1120 1121 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1122 vfio_ap_unlink_mdev_fr_queue(q); 1123 list_del(&q->reset_qnode); 1124 } 1125 } 1126 1127 /** 1128 * unassign_adapter_store - parses the APID from @buf and clears the 1129 * corresponding bit in the mediated matrix device's APM 1130 * 1131 * @dev: the matrix device 1132 * @attr: the mediated matrix device's unassign_adapter attribute 1133 * @buf: a buffer containing the adapter number (APID) to be unassigned 1134 * @count: the number of bytes in @buf 1135 * 1136 * Return: the number of bytes processed if the APID is valid; otherwise, 1137 * returns one of the following errors: 1138 * -EINVAL if the APID is not a number 1139 * -ENODEV if the APID it exceeds the maximum value configured for the 1140 * system 1141 */ 1142 static ssize_t unassign_adapter_store(struct device *dev, 1143 struct device_attribute *attr, 1144 const char *buf, size_t count) 1145 { 1146 int ret; 1147 unsigned long apid; 1148 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1149 1150 get_update_locks_for_mdev(matrix_mdev); 1151 1152 ret = kstrtoul(buf, 0, &apid); 1153 if (ret) 1154 goto done; 1155 1156 if (apid > matrix_mdev->matrix.apm_max) { 1157 ret = -ENODEV; 1158 goto done; 1159 } 1160 1161 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) { 1162 ret = count; 1163 goto done; 1164 } 1165 1166 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); 1167 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); 1168 ret = count; 1169 done: 1170 release_update_locks_for_mdev(matrix_mdev); 1171 return ret; 1172 } 1173 static DEVICE_ATTR_WO(unassign_adapter); 1174 1175 static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, 1176 unsigned long apqi) 1177 { 1178 unsigned long apid; 1179 1180 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) 1181 vfio_ap_mdev_link_apqn(matrix_mdev, 1182 AP_MKQID(apid, apqi)); 1183 } 1184 1185 /** 1186 * assign_domain_store - parses the APQI from @buf and sets the 1187 * corresponding bit in the mediated matrix device's AQM 1188 * 1189 * @dev: the matrix device 1190 * @attr: the mediated matrix device's assign_domain attribute 1191 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1192 * be assigned 1193 * @count: the number of bytes in @buf 1194 * 1195 * Return: the number of bytes processed if the APQI is valid; otherwise returns 1196 * one of the following errors: 1197 * 1198 * 1. -EINVAL 1199 * The APQI is not a valid number 1200 * 1201 * 2. -ENODEV 1202 * The APQI exceeds the maximum value configured for the system 1203 * 1204 * 3. -EADDRNOTAVAIL 1205 * An APQN derived from the cross product of the APQI being assigned 1206 * and the APIDs previously assigned is not bound to the vfio_ap device 1207 * driver; or, if no APIDs have yet been assigned, the APQI is not 1208 * contained in an APQN bound to the vfio_ap device driver. 1209 * 1210 * 4. -EADDRINUSE 1211 * An APQN derived from the cross product of the APQI being assigned 1212 * and the APIDs previously assigned is being used by another mediated 1213 * matrix device 1214 * 1215 * 5. -EAGAIN 1216 * The lock required to validate the mdev's AP configuration could not 1217 * be obtained. 1218 */ 1219 static ssize_t assign_domain_store(struct device *dev, 1220 struct device_attribute *attr, 1221 const char *buf, size_t count) 1222 { 1223 int ret; 1224 unsigned long apqi; 1225 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1226 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1227 1228 mutex_lock(&ap_perms_mutex); 1229 get_update_locks_for_mdev(matrix_mdev); 1230 1231 ret = kstrtoul(buf, 0, &apqi); 1232 if (ret) 1233 goto done; 1234 1235 if (apqi > matrix_mdev->matrix.aqm_max) { 1236 ret = -ENODEV; 1237 goto done; 1238 } 1239 1240 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1241 ret = count; 1242 goto done; 1243 } 1244 1245 set_bit_inv(apqi, matrix_mdev->matrix.aqm); 1246 1247 ret = vfio_ap_mdev_validate_masks(matrix_mdev); 1248 if (ret) { 1249 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); 1250 goto done; 1251 } 1252 1253 vfio_ap_mdev_link_domain(matrix_mdev, apqi); 1254 1255 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 1256 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1257 reset_queues_for_apids(matrix_mdev, apm_filtered); 1258 } 1259 1260 ret = count; 1261 done: 1262 release_update_locks_for_mdev(matrix_mdev); 1263 mutex_unlock(&ap_perms_mutex); 1264 1265 return ret; 1266 } 1267 static DEVICE_ATTR_WO(assign_domain); 1268 1269 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, 1270 unsigned long apqi, 1271 struct list_head *qlist) 1272 { 1273 unsigned long apid; 1274 struct vfio_ap_queue *q; 1275 1276 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { 1277 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); 1278 1279 if (q && qlist) { 1280 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 1281 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 1282 list_add_tail(&q->reset_qnode, qlist); 1283 } 1284 } 1285 } 1286 1287 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, 1288 unsigned long apqi) 1289 { 1290 struct vfio_ap_queue *q, *tmpq; 1291 struct list_head qlist; 1292 1293 INIT_LIST_HEAD(&qlist); 1294 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist); 1295 1296 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 1297 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); 1298 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1299 } 1300 1301 vfio_ap_mdev_reset_qlist(&qlist); 1302 1303 list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) { 1304 vfio_ap_unlink_mdev_fr_queue(q); 1305 list_del(&q->reset_qnode); 1306 } 1307 } 1308 1309 /** 1310 * unassign_domain_store - parses the APQI from @buf and clears the 1311 * corresponding bit in the mediated matrix device's AQM 1312 * 1313 * @dev: the matrix device 1314 * @attr: the mediated matrix device's unassign_domain attribute 1315 * @buf: a buffer containing the AP queue index (APQI) of the domain to 1316 * be unassigned 1317 * @count: the number of bytes in @buf 1318 * 1319 * Return: the number of bytes processed if the APQI is valid; otherwise, 1320 * returns one of the following errors: 1321 * -EINVAL if the APQI is not a number 1322 * -ENODEV if the APQI exceeds the maximum value configured for the system 1323 */ 1324 static ssize_t unassign_domain_store(struct device *dev, 1325 struct device_attribute *attr, 1326 const char *buf, size_t count) 1327 { 1328 int ret; 1329 unsigned long apqi; 1330 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1331 1332 get_update_locks_for_mdev(matrix_mdev); 1333 1334 ret = kstrtoul(buf, 0, &apqi); 1335 if (ret) 1336 goto done; 1337 1338 if (apqi > matrix_mdev->matrix.aqm_max) { 1339 ret = -ENODEV; 1340 goto done; 1341 } 1342 1343 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { 1344 ret = count; 1345 goto done; 1346 } 1347 1348 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); 1349 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); 1350 ret = count; 1351 1352 done: 1353 release_update_locks_for_mdev(matrix_mdev); 1354 return ret; 1355 } 1356 static DEVICE_ATTR_WO(unassign_domain); 1357 1358 /** 1359 * assign_control_domain_store - parses the domain ID from @buf and sets 1360 * the corresponding bit in the mediated matrix device's ADM 1361 * 1362 * @dev: the matrix device 1363 * @attr: the mediated matrix device's assign_control_domain attribute 1364 * @buf: a buffer containing the domain ID to be assigned 1365 * @count: the number of bytes in @buf 1366 * 1367 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1368 * returns one of the following errors: 1369 * -EINVAL if the ID is not a number 1370 * -ENODEV if the ID exceeds the maximum value configured for the system 1371 */ 1372 static ssize_t assign_control_domain_store(struct device *dev, 1373 struct device_attribute *attr, 1374 const char *buf, size_t count) 1375 { 1376 int ret; 1377 unsigned long id; 1378 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1379 1380 get_update_locks_for_mdev(matrix_mdev); 1381 1382 ret = kstrtoul(buf, 0, &id); 1383 if (ret) 1384 goto done; 1385 1386 if (id > matrix_mdev->matrix.adm_max) { 1387 ret = -ENODEV; 1388 goto done; 1389 } 1390 1391 if (test_bit_inv(id, matrix_mdev->matrix.adm)) { 1392 ret = count; 1393 goto done; 1394 } 1395 1396 /* Set the bit in the ADM (bitmask) corresponding to the AP control 1397 * domain number (id). The bits in the mask, from most significant to 1398 * least significant, correspond to IDs 0 up to the one less than the 1399 * number of control domains that can be assigned. 1400 */ 1401 set_bit_inv(id, matrix_mdev->matrix.adm); 1402 if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) 1403 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1404 1405 ret = count; 1406 done: 1407 release_update_locks_for_mdev(matrix_mdev); 1408 return ret; 1409 } 1410 static DEVICE_ATTR_WO(assign_control_domain); 1411 1412 /** 1413 * unassign_control_domain_store - parses the domain ID from @buf and 1414 * clears the corresponding bit in the mediated matrix device's ADM 1415 * 1416 * @dev: the matrix device 1417 * @attr: the mediated matrix device's unassign_control_domain attribute 1418 * @buf: a buffer containing the domain ID to be unassigned 1419 * @count: the number of bytes in @buf 1420 * 1421 * Return: the number of bytes processed if the domain ID is valid; otherwise, 1422 * returns one of the following errors: 1423 * -EINVAL if the ID is not a number 1424 * -ENODEV if the ID exceeds the maximum value configured for the system 1425 */ 1426 static ssize_t unassign_control_domain_store(struct device *dev, 1427 struct device_attribute *attr, 1428 const char *buf, size_t count) 1429 { 1430 int ret; 1431 unsigned long domid; 1432 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1433 1434 get_update_locks_for_mdev(matrix_mdev); 1435 1436 ret = kstrtoul(buf, 0, &domid); 1437 if (ret) 1438 goto done; 1439 1440 if (domid > matrix_mdev->matrix.adm_max) { 1441 ret = -ENODEV; 1442 goto done; 1443 } 1444 1445 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) { 1446 ret = count; 1447 goto done; 1448 } 1449 1450 clear_bit_inv(domid, matrix_mdev->matrix.adm); 1451 1452 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { 1453 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm); 1454 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1455 } 1456 1457 ret = count; 1458 done: 1459 release_update_locks_for_mdev(matrix_mdev); 1460 return ret; 1461 } 1462 static DEVICE_ATTR_WO(unassign_control_domain); 1463 1464 static ssize_t control_domains_show(struct device *dev, 1465 struct device_attribute *dev_attr, 1466 char *buf) 1467 { 1468 unsigned long id; 1469 int nchars = 0; 1470 int n; 1471 char *bufpos = buf; 1472 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1473 unsigned long max_domid = matrix_mdev->matrix.adm_max; 1474 1475 mutex_lock(&matrix_dev->mdevs_lock); 1476 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { 1477 n = sprintf(bufpos, "%04lx\n", id); 1478 bufpos += n; 1479 nchars += n; 1480 } 1481 mutex_unlock(&matrix_dev->mdevs_lock); 1482 1483 return nchars; 1484 } 1485 static DEVICE_ATTR_RO(control_domains); 1486 1487 static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf) 1488 { 1489 char *bufpos = buf; 1490 unsigned long apid; 1491 unsigned long apqi; 1492 unsigned long apid1; 1493 unsigned long apqi1; 1494 unsigned long napm_bits = matrix->apm_max + 1; 1495 unsigned long naqm_bits = matrix->aqm_max + 1; 1496 int nchars = 0; 1497 int n; 1498 1499 apid1 = find_first_bit_inv(matrix->apm, napm_bits); 1500 apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits); 1501 1502 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) { 1503 for_each_set_bit_inv(apid, matrix->apm, napm_bits) { 1504 for_each_set_bit_inv(apqi, matrix->aqm, 1505 naqm_bits) { 1506 n = sprintf(bufpos, "%02lx.%04lx\n", apid, 1507 apqi); 1508 bufpos += n; 1509 nchars += n; 1510 } 1511 } 1512 } else if (apid1 < napm_bits) { 1513 for_each_set_bit_inv(apid, matrix->apm, napm_bits) { 1514 n = sprintf(bufpos, "%02lx.\n", apid); 1515 bufpos += n; 1516 nchars += n; 1517 } 1518 } else if (apqi1 < naqm_bits) { 1519 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) { 1520 n = sprintf(bufpos, ".%04lx\n", apqi); 1521 bufpos += n; 1522 nchars += n; 1523 } 1524 } 1525 1526 return nchars; 1527 } 1528 1529 static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, 1530 char *buf) 1531 { 1532 ssize_t nchars; 1533 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1534 1535 mutex_lock(&matrix_dev->mdevs_lock); 1536 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf); 1537 mutex_unlock(&matrix_dev->mdevs_lock); 1538 1539 return nchars; 1540 } 1541 static DEVICE_ATTR_RO(matrix); 1542 1543 static ssize_t guest_matrix_show(struct device *dev, 1544 struct device_attribute *attr, char *buf) 1545 { 1546 ssize_t nchars; 1547 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1548 1549 mutex_lock(&matrix_dev->mdevs_lock); 1550 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf); 1551 mutex_unlock(&matrix_dev->mdevs_lock); 1552 1553 return nchars; 1554 } 1555 static DEVICE_ATTR_RO(guest_matrix); 1556 1557 static struct attribute *vfio_ap_mdev_attrs[] = { 1558 &dev_attr_assign_adapter.attr, 1559 &dev_attr_unassign_adapter.attr, 1560 &dev_attr_assign_domain.attr, 1561 &dev_attr_unassign_domain.attr, 1562 &dev_attr_assign_control_domain.attr, 1563 &dev_attr_unassign_control_domain.attr, 1564 &dev_attr_control_domains.attr, 1565 &dev_attr_matrix.attr, 1566 &dev_attr_guest_matrix.attr, 1567 NULL, 1568 }; 1569 1570 static struct attribute_group vfio_ap_mdev_attr_group = { 1571 .attrs = vfio_ap_mdev_attrs 1572 }; 1573 1574 static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { 1575 &vfio_ap_mdev_attr_group, 1576 NULL 1577 }; 1578 1579 /** 1580 * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed 1581 * to manage AP resources for the guest whose state is represented by @kvm 1582 * 1583 * @matrix_mdev: a mediated matrix device 1584 * @kvm: reference to KVM instance 1585 * 1586 * Return: 0 if no other mediated matrix device has a reference to @kvm; 1587 * otherwise, returns an -EPERM. 1588 */ 1589 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, 1590 struct kvm *kvm) 1591 { 1592 struct ap_matrix_mdev *m; 1593 1594 if (kvm->arch.crypto.crycbd) { 1595 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1596 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; 1597 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1598 1599 get_update_locks_for_kvm(kvm); 1600 1601 list_for_each_entry(m, &matrix_dev->mdev_list, node) { 1602 if (m != matrix_mdev && m->kvm == kvm) { 1603 release_update_locks_for_kvm(kvm); 1604 return -EPERM; 1605 } 1606 } 1607 1608 kvm_get_kvm(kvm); 1609 matrix_mdev->kvm = kvm; 1610 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 1611 1612 release_update_locks_for_kvm(kvm); 1613 } 1614 1615 return 0; 1616 } 1617 1618 static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length) 1619 { 1620 struct ap_queue_table *qtable = &matrix_mdev->qtable; 1621 struct vfio_ap_queue *q; 1622 int loop_cursor; 1623 1624 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) { 1625 if (q->saved_iova >= iova && q->saved_iova < iova + length) 1626 vfio_ap_irq_disable(q); 1627 } 1628 } 1629 1630 static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova, 1631 u64 length) 1632 { 1633 struct ap_matrix_mdev *matrix_mdev = 1634 container_of(vdev, struct ap_matrix_mdev, vdev); 1635 1636 mutex_lock(&matrix_dev->mdevs_lock); 1637 1638 unmap_iova(matrix_mdev, iova, length); 1639 1640 mutex_unlock(&matrix_dev->mdevs_lock); 1641 } 1642 1643 /** 1644 * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed 1645 * by @matrix_mdev. 1646 * 1647 * @matrix_mdev: a matrix mediated device 1648 */ 1649 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) 1650 { 1651 struct kvm *kvm = matrix_mdev->kvm; 1652 1653 if (kvm && kvm->arch.crypto.crycbd) { 1654 down_write(&kvm->arch.crypto.pqap_hook_rwsem); 1655 kvm->arch.crypto.pqap_hook = NULL; 1656 up_write(&kvm->arch.crypto.pqap_hook_rwsem); 1657 1658 get_update_locks_for_kvm(kvm); 1659 1660 kvm_arch_crypto_clear_masks(kvm); 1661 vfio_ap_mdev_reset_queues(matrix_mdev); 1662 kvm_put_kvm(kvm); 1663 matrix_mdev->kvm = NULL; 1664 1665 release_update_locks_for_kvm(kvm); 1666 } 1667 } 1668 1669 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) 1670 { 1671 struct ap_queue *queue; 1672 struct vfio_ap_queue *q = NULL; 1673 1674 queue = ap_get_qdev(apqn); 1675 if (!queue) 1676 return NULL; 1677 1678 if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver) 1679 q = dev_get_drvdata(&queue->ap_dev.device); 1680 1681 put_device(&queue->ap_dev.device); 1682 1683 return q; 1684 } 1685 1686 static int apq_status_check(int apqn, struct ap_queue_status *status) 1687 { 1688 switch (status->response_code) { 1689 case AP_RESPONSE_NORMAL: 1690 case AP_RESPONSE_DECONFIGURED: 1691 return 0; 1692 case AP_RESPONSE_RESET_IN_PROGRESS: 1693 case AP_RESPONSE_BUSY: 1694 return -EBUSY; 1695 case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE: 1696 case AP_RESPONSE_ASSOC_FAILED: 1697 /* 1698 * These asynchronous response codes indicate a PQAP(AAPQ) 1699 * instruction to associate a secret with the guest failed. All 1700 * subsequent AP instructions will end with the asynchronous 1701 * response code until the AP queue is reset; so, let's return 1702 * a value indicating a reset needs to be performed again. 1703 */ 1704 return -EAGAIN; 1705 default: 1706 WARN(true, 1707 "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n", 1708 AP_QID_CARD(apqn), AP_QID_QUEUE(apqn), 1709 status->response_code); 1710 return -EIO; 1711 } 1712 } 1713 1714 #define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)" 1715 1716 static void apq_reset_check(struct work_struct *reset_work) 1717 { 1718 int ret = -EBUSY, elapsed = 0; 1719 struct ap_queue_status status; 1720 struct vfio_ap_queue *q; 1721 1722 q = container_of(reset_work, struct vfio_ap_queue, reset_work); 1723 memcpy(&status, &q->reset_status, sizeof(status)); 1724 while (true) { 1725 msleep(AP_RESET_INTERVAL); 1726 elapsed += AP_RESET_INTERVAL; 1727 status = ap_tapq(q->apqn, NULL); 1728 ret = apq_status_check(q->apqn, &status); 1729 if (ret == -EIO) 1730 return; 1731 if (ret == -EBUSY) { 1732 pr_notice_ratelimited(WAIT_MSG, elapsed, 1733 AP_QID_CARD(q->apqn), 1734 AP_QID_QUEUE(q->apqn), 1735 status.response_code, 1736 status.queue_empty, 1737 status.irq_enabled); 1738 } else { 1739 if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS || 1740 q->reset_status.response_code == AP_RESPONSE_BUSY || 1741 q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS || 1742 ret == -EAGAIN) { 1743 status = ap_zapq(q->apqn, 0); 1744 memcpy(&q->reset_status, &status, sizeof(status)); 1745 continue; 1746 } 1747 /* 1748 * When an AP adapter is deconfigured, the 1749 * associated queues are reset, so let's set the 1750 * status response code to 0 so the queue may be 1751 * passed through (i.e., not filtered) 1752 */ 1753 if (status.response_code == AP_RESPONSE_DECONFIGURED) 1754 q->reset_status.response_code = 0; 1755 if (q->saved_isc != VFIO_AP_ISC_INVALID) 1756 vfio_ap_free_aqic_resources(q); 1757 break; 1758 } 1759 } 1760 } 1761 1762 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q) 1763 { 1764 struct ap_queue_status status; 1765 1766 if (!q) 1767 return; 1768 status = ap_zapq(q->apqn, 0); 1769 memcpy(&q->reset_status, &status, sizeof(status)); 1770 switch (status.response_code) { 1771 case AP_RESPONSE_NORMAL: 1772 case AP_RESPONSE_RESET_IN_PROGRESS: 1773 case AP_RESPONSE_BUSY: 1774 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS: 1775 /* 1776 * Let's verify whether the ZAPQ completed successfully on a work queue. 1777 */ 1778 queue_work(system_long_wq, &q->reset_work); 1779 break; 1780 case AP_RESPONSE_DECONFIGURED: 1781 /* 1782 * When an AP adapter is deconfigured, the associated 1783 * queues are reset, so let's set the status response code to 0 1784 * so the queue may be passed through (i.e., not filtered). 1785 */ 1786 q->reset_status.response_code = 0; 1787 vfio_ap_free_aqic_resources(q); 1788 break; 1789 default: 1790 WARN(true, 1791 "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n", 1792 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn), 1793 status.response_code); 1794 } 1795 } 1796 1797 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) 1798 { 1799 int ret = 0, loop_cursor; 1800 struct vfio_ap_queue *q; 1801 1802 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) 1803 vfio_ap_mdev_reset_queue(q); 1804 1805 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) { 1806 flush_work(&q->reset_work); 1807 1808 if (q->reset_status.response_code) 1809 ret = -EIO; 1810 } 1811 1812 return ret; 1813 } 1814 1815 static int vfio_ap_mdev_reset_qlist(struct list_head *qlist) 1816 { 1817 int ret = 0; 1818 struct vfio_ap_queue *q; 1819 1820 list_for_each_entry(q, qlist, reset_qnode) 1821 vfio_ap_mdev_reset_queue(q); 1822 1823 list_for_each_entry(q, qlist, reset_qnode) { 1824 flush_work(&q->reset_work); 1825 1826 if (q->reset_status.response_code) 1827 ret = -EIO; 1828 } 1829 1830 return ret; 1831 } 1832 1833 static int vfio_ap_mdev_open_device(struct vfio_device *vdev) 1834 { 1835 struct ap_matrix_mdev *matrix_mdev = 1836 container_of(vdev, struct ap_matrix_mdev, vdev); 1837 1838 if (!vdev->kvm) 1839 return -EINVAL; 1840 1841 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm); 1842 } 1843 1844 static void vfio_ap_mdev_close_device(struct vfio_device *vdev) 1845 { 1846 struct ap_matrix_mdev *matrix_mdev = 1847 container_of(vdev, struct ap_matrix_mdev, vdev); 1848 1849 vfio_ap_mdev_unset_kvm(matrix_mdev); 1850 } 1851 1852 static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) 1853 { 1854 struct device *dev = vdev->dev; 1855 struct ap_matrix_mdev *matrix_mdev; 1856 1857 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); 1858 1859 if (matrix_mdev->req_trigger) { 1860 if (!(count % 10)) 1861 dev_notice_ratelimited(dev, 1862 "Relaying device request to user (#%u)\n", 1863 count); 1864 1865 eventfd_signal(matrix_mdev->req_trigger, 1); 1866 } else if (count == 0) { 1867 dev_notice(dev, 1868 "No device request registered, blocked until released by user\n"); 1869 } 1870 } 1871 1872 static int vfio_ap_mdev_get_device_info(unsigned long arg) 1873 { 1874 unsigned long minsz; 1875 struct vfio_device_info info; 1876 1877 minsz = offsetofend(struct vfio_device_info, num_irqs); 1878 1879 if (copy_from_user(&info, (void __user *)arg, minsz)) 1880 return -EFAULT; 1881 1882 if (info.argsz < minsz) 1883 return -EINVAL; 1884 1885 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; 1886 info.num_regions = 0; 1887 info.num_irqs = VFIO_AP_NUM_IRQS; 1888 1889 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 1890 } 1891 1892 static ssize_t vfio_ap_get_irq_info(unsigned long arg) 1893 { 1894 unsigned long minsz; 1895 struct vfio_irq_info info; 1896 1897 minsz = offsetofend(struct vfio_irq_info, count); 1898 1899 if (copy_from_user(&info, (void __user *)arg, minsz)) 1900 return -EFAULT; 1901 1902 if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS) 1903 return -EINVAL; 1904 1905 switch (info.index) { 1906 case VFIO_AP_REQ_IRQ_INDEX: 1907 info.count = 1; 1908 info.flags = VFIO_IRQ_INFO_EVENTFD; 1909 break; 1910 default: 1911 return -EINVAL; 1912 } 1913 1914 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 1915 } 1916 1917 static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg) 1918 { 1919 int ret; 1920 size_t data_size; 1921 unsigned long minsz; 1922 1923 minsz = offsetofend(struct vfio_irq_set, count); 1924 1925 if (copy_from_user(irq_set, (void __user *)arg, minsz)) 1926 return -EFAULT; 1927 1928 ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS, 1929 &data_size); 1930 if (ret) 1931 return ret; 1932 1933 if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER)) 1934 return -EINVAL; 1935 1936 return 0; 1937 } 1938 1939 static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, 1940 unsigned long arg) 1941 { 1942 s32 fd; 1943 void __user *data; 1944 unsigned long minsz; 1945 struct eventfd_ctx *req_trigger; 1946 1947 minsz = offsetofend(struct vfio_irq_set, count); 1948 data = (void __user *)(arg + minsz); 1949 1950 if (get_user(fd, (s32 __user *)data)) 1951 return -EFAULT; 1952 1953 if (fd == -1) { 1954 if (matrix_mdev->req_trigger) 1955 eventfd_ctx_put(matrix_mdev->req_trigger); 1956 matrix_mdev->req_trigger = NULL; 1957 } else if (fd >= 0) { 1958 req_trigger = eventfd_ctx_fdget(fd); 1959 if (IS_ERR(req_trigger)) 1960 return PTR_ERR(req_trigger); 1961 1962 if (matrix_mdev->req_trigger) 1963 eventfd_ctx_put(matrix_mdev->req_trigger); 1964 1965 matrix_mdev->req_trigger = req_trigger; 1966 } else { 1967 return -EINVAL; 1968 } 1969 1970 return 0; 1971 } 1972 1973 static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, 1974 unsigned long arg) 1975 { 1976 int ret; 1977 struct vfio_irq_set irq_set; 1978 1979 ret = vfio_ap_irq_set_init(&irq_set, arg); 1980 if (ret) 1981 return ret; 1982 1983 switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { 1984 case VFIO_IRQ_SET_DATA_EVENTFD: 1985 switch (irq_set.index) { 1986 case VFIO_AP_REQ_IRQ_INDEX: 1987 return vfio_ap_set_request_irq(matrix_mdev, arg); 1988 default: 1989 return -EINVAL; 1990 } 1991 default: 1992 return -EINVAL; 1993 } 1994 } 1995 1996 static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, 1997 unsigned int cmd, unsigned long arg) 1998 { 1999 struct ap_matrix_mdev *matrix_mdev = 2000 container_of(vdev, struct ap_matrix_mdev, vdev); 2001 int ret; 2002 2003 mutex_lock(&matrix_dev->mdevs_lock); 2004 switch (cmd) { 2005 case VFIO_DEVICE_GET_INFO: 2006 ret = vfio_ap_mdev_get_device_info(arg); 2007 break; 2008 case VFIO_DEVICE_RESET: 2009 ret = vfio_ap_mdev_reset_queues(matrix_mdev); 2010 break; 2011 case VFIO_DEVICE_GET_IRQ_INFO: 2012 ret = vfio_ap_get_irq_info(arg); 2013 break; 2014 case VFIO_DEVICE_SET_IRQS: 2015 ret = vfio_ap_set_irqs(matrix_mdev, arg); 2016 break; 2017 default: 2018 ret = -EOPNOTSUPP; 2019 break; 2020 } 2021 mutex_unlock(&matrix_dev->mdevs_lock); 2022 2023 return ret; 2024 } 2025 2026 static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) 2027 { 2028 struct ap_matrix_mdev *matrix_mdev; 2029 unsigned long apid = AP_QID_CARD(q->apqn); 2030 unsigned long apqi = AP_QID_QUEUE(q->apqn); 2031 2032 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2033 if (test_bit_inv(apid, matrix_mdev->matrix.apm) && 2034 test_bit_inv(apqi, matrix_mdev->matrix.aqm)) 2035 return matrix_mdev; 2036 } 2037 2038 return NULL; 2039 } 2040 2041 static ssize_t status_show(struct device *dev, 2042 struct device_attribute *attr, 2043 char *buf) 2044 { 2045 ssize_t nchars = 0; 2046 struct vfio_ap_queue *q; 2047 unsigned long apid, apqi; 2048 struct ap_matrix_mdev *matrix_mdev; 2049 struct ap_device *apdev = to_ap_dev(dev); 2050 2051 mutex_lock(&matrix_dev->mdevs_lock); 2052 q = dev_get_drvdata(&apdev->device); 2053 matrix_mdev = vfio_ap_mdev_for_queue(q); 2054 2055 /* If the queue is assigned to the matrix mediated device, then 2056 * determine whether it is passed through to a guest; otherwise, 2057 * indicate that it is unassigned. 2058 */ 2059 if (matrix_mdev) { 2060 apid = AP_QID_CARD(q->apqn); 2061 apqi = AP_QID_QUEUE(q->apqn); 2062 /* 2063 * If the queue is passed through to the guest, then indicate 2064 * that it is in use; otherwise, indicate that it is 2065 * merely assigned to a matrix mediated device. 2066 */ 2067 if (matrix_mdev->kvm && 2068 test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2069 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) 2070 nchars = scnprintf(buf, PAGE_SIZE, "%s\n", 2071 AP_QUEUE_IN_USE); 2072 else 2073 nchars = scnprintf(buf, PAGE_SIZE, "%s\n", 2074 AP_QUEUE_ASSIGNED); 2075 } else { 2076 nchars = scnprintf(buf, PAGE_SIZE, "%s\n", 2077 AP_QUEUE_UNASSIGNED); 2078 } 2079 2080 mutex_unlock(&matrix_dev->mdevs_lock); 2081 2082 return nchars; 2083 } 2084 2085 static DEVICE_ATTR_RO(status); 2086 2087 static struct attribute *vfio_queue_attrs[] = { 2088 &dev_attr_status.attr, 2089 NULL, 2090 }; 2091 2092 static const struct attribute_group vfio_queue_attr_group = { 2093 .attrs = vfio_queue_attrs, 2094 }; 2095 2096 static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { 2097 .init = vfio_ap_mdev_init_dev, 2098 .open_device = vfio_ap_mdev_open_device, 2099 .close_device = vfio_ap_mdev_close_device, 2100 .ioctl = vfio_ap_mdev_ioctl, 2101 .dma_unmap = vfio_ap_mdev_dma_unmap, 2102 .bind_iommufd = vfio_iommufd_emulated_bind, 2103 .unbind_iommufd = vfio_iommufd_emulated_unbind, 2104 .attach_ioas = vfio_iommufd_emulated_attach_ioas, 2105 .detach_ioas = vfio_iommufd_emulated_detach_ioas, 2106 .request = vfio_ap_mdev_request 2107 }; 2108 2109 static struct mdev_driver vfio_ap_matrix_driver = { 2110 .device_api = VFIO_DEVICE_API_AP_STRING, 2111 .max_instances = MAX_ZDEV_ENTRIES_EXT, 2112 .driver = { 2113 .name = "vfio_ap_mdev", 2114 .owner = THIS_MODULE, 2115 .mod_name = KBUILD_MODNAME, 2116 .dev_groups = vfio_ap_mdev_attr_groups, 2117 }, 2118 .probe = vfio_ap_mdev_probe, 2119 .remove = vfio_ap_mdev_remove, 2120 }; 2121 2122 int vfio_ap_mdev_register(void) 2123 { 2124 int ret; 2125 2126 ret = mdev_register_driver(&vfio_ap_matrix_driver); 2127 if (ret) 2128 return ret; 2129 2130 matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; 2131 matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; 2132 matrix_dev->mdev_types[0] = &matrix_dev->mdev_type; 2133 ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, 2134 &vfio_ap_matrix_driver, 2135 matrix_dev->mdev_types, 1); 2136 if (ret) 2137 goto err_driver; 2138 return 0; 2139 2140 err_driver: 2141 mdev_unregister_driver(&vfio_ap_matrix_driver); 2142 return ret; 2143 } 2144 2145 void vfio_ap_mdev_unregister(void) 2146 { 2147 mdev_unregister_parent(&matrix_dev->parent); 2148 mdev_unregister_driver(&vfio_ap_matrix_driver); 2149 } 2150 2151 int vfio_ap_mdev_probe_queue(struct ap_device *apdev) 2152 { 2153 int ret; 2154 struct vfio_ap_queue *q; 2155 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2156 struct ap_matrix_mdev *matrix_mdev; 2157 2158 ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); 2159 if (ret) 2160 return ret; 2161 2162 q = kzalloc(sizeof(*q), GFP_KERNEL); 2163 if (!q) { 2164 ret = -ENOMEM; 2165 goto err_remove_group; 2166 } 2167 2168 q->apqn = to_ap_queue(&apdev->device)->qid; 2169 q->saved_isc = VFIO_AP_ISC_INVALID; 2170 memset(&q->reset_status, 0, sizeof(q->reset_status)); 2171 INIT_WORK(&q->reset_work, apq_reset_check); 2172 matrix_mdev = get_update_locks_by_apqn(q->apqn); 2173 2174 if (matrix_mdev) { 2175 vfio_ap_mdev_link_queue(matrix_mdev, q); 2176 2177 /* 2178 * If we're in the process of handling the adding of adapters or 2179 * domains to the host's AP configuration, then let the 2180 * vfio_ap device driver's on_scan_complete callback filter the 2181 * matrix and update the guest's AP configuration after all of 2182 * the new queue devices are probed. 2183 */ 2184 if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) || 2185 !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS)) 2186 goto done; 2187 2188 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { 2189 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2190 reset_queues_for_apids(matrix_mdev, apm_filtered); 2191 } 2192 } 2193 2194 done: 2195 dev_set_drvdata(&apdev->device, q); 2196 release_update_locks_for_mdev(matrix_mdev); 2197 2198 return ret; 2199 2200 err_remove_group: 2201 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2202 return ret; 2203 } 2204 2205 void vfio_ap_mdev_remove_queue(struct ap_device *apdev) 2206 { 2207 unsigned long apid, apqi; 2208 struct vfio_ap_queue *q; 2209 struct ap_matrix_mdev *matrix_mdev; 2210 2211 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); 2212 q = dev_get_drvdata(&apdev->device); 2213 get_update_locks_for_queue(q); 2214 matrix_mdev = q->matrix_mdev; 2215 apid = AP_QID_CARD(q->apqn); 2216 apqi = AP_QID_QUEUE(q->apqn); 2217 2218 if (matrix_mdev) { 2219 /* If the queue is assigned to the guest's AP configuration */ 2220 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && 2221 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { 2222 /* 2223 * Since the queues are defined via a matrix of adapters 2224 * and domains, it is not possible to hot unplug a 2225 * single queue; so, let's unplug the adapter. 2226 */ 2227 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); 2228 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2229 reset_queues_for_apid(matrix_mdev, apid); 2230 goto done; 2231 } 2232 } 2233 2234 /* 2235 * If the queue is not in the host's AP configuration, then resetting 2236 * it will fail with response code 01, (APQN not valid); so, let's make 2237 * sure it is in the host's config. 2238 */ 2239 if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) && 2240 test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) { 2241 vfio_ap_mdev_reset_queue(q); 2242 flush_work(&q->reset_work); 2243 } 2244 2245 done: 2246 if (matrix_mdev) 2247 vfio_ap_unlink_queue_fr_mdev(q); 2248 2249 dev_set_drvdata(&apdev->device, NULL); 2250 kfree(q); 2251 release_update_locks_for_mdev(matrix_mdev); 2252 } 2253 2254 /** 2255 * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is 2256 * assigned to a mediated device under the control 2257 * of the vfio_ap device driver. 2258 * 2259 * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check. 2260 * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check. 2261 * 2262 * Return: 2263 * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are 2264 * assigned to a mediated device under the control of the vfio_ap 2265 * device driver. 2266 * * Otherwise, return 0. 2267 */ 2268 int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) 2269 { 2270 int ret; 2271 2272 mutex_lock(&matrix_dev->guests_lock); 2273 mutex_lock(&matrix_dev->mdevs_lock); 2274 ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); 2275 mutex_unlock(&matrix_dev->mdevs_lock); 2276 mutex_unlock(&matrix_dev->guests_lock); 2277 2278 return ret; 2279 } 2280 2281 /** 2282 * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control 2283 * domains that have been removed from the host's 2284 * AP configuration from a guest. 2285 * 2286 * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest. 2287 * @aprem: the adapters that have been removed from the host's AP configuration 2288 * @aqrem: the domains that have been removed from the host's AP configuration 2289 * @cdrem: the control domains that have been removed from the host's AP 2290 * configuration. 2291 */ 2292 static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev, 2293 unsigned long *aprem, 2294 unsigned long *aqrem, 2295 unsigned long *cdrem) 2296 { 2297 int do_hotplug = 0; 2298 2299 if (!bitmap_empty(aprem, AP_DEVICES)) { 2300 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm, 2301 matrix_mdev->shadow_apcb.apm, 2302 aprem, AP_DEVICES); 2303 } 2304 2305 if (!bitmap_empty(aqrem, AP_DOMAINS)) { 2306 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm, 2307 matrix_mdev->shadow_apcb.aqm, 2308 aqrem, AP_DEVICES); 2309 } 2310 2311 if (!bitmap_empty(cdrem, AP_DOMAINS)) 2312 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm, 2313 matrix_mdev->shadow_apcb.adm, 2314 cdrem, AP_DOMAINS); 2315 2316 if (do_hotplug) 2317 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2318 } 2319 2320 /** 2321 * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters, 2322 * domains and control domains that have been removed 2323 * from the host AP configuration and unplugs them 2324 * from those guests. 2325 * 2326 * @ap_remove: bitmap specifying which adapters have been removed from the host 2327 * config. 2328 * @aq_remove: bitmap specifying which domains have been removed from the host 2329 * config. 2330 * @cd_remove: bitmap specifying which control domains have been removed from 2331 * the host config. 2332 */ 2333 static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove, 2334 unsigned long *aq_remove, 2335 unsigned long *cd_remove) 2336 { 2337 struct ap_matrix_mdev *matrix_mdev; 2338 DECLARE_BITMAP(aprem, AP_DEVICES); 2339 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2340 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2341 int do_remove = 0; 2342 2343 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2344 mutex_lock(&matrix_mdev->kvm->lock); 2345 mutex_lock(&matrix_dev->mdevs_lock); 2346 2347 do_remove |= bitmap_and(aprem, ap_remove, 2348 matrix_mdev->matrix.apm, 2349 AP_DEVICES); 2350 do_remove |= bitmap_and(aqrem, aq_remove, 2351 matrix_mdev->matrix.aqm, 2352 AP_DOMAINS); 2353 do_remove |= bitmap_andnot(cdrem, cd_remove, 2354 matrix_mdev->matrix.adm, 2355 AP_DOMAINS); 2356 2357 if (do_remove) 2358 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem, 2359 cdrem); 2360 2361 mutex_unlock(&matrix_dev->mdevs_lock); 2362 mutex_unlock(&matrix_mdev->kvm->lock); 2363 } 2364 } 2365 2366 /** 2367 * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and 2368 * control domains from the host AP configuration 2369 * by unplugging them from the guests that are 2370 * using them. 2371 * @cur_config_info: the current host AP configuration information 2372 * @prev_config_info: the previous host AP configuration information 2373 */ 2374 static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info, 2375 struct ap_config_info *prev_config_info) 2376 { 2377 int do_remove; 2378 DECLARE_BITMAP(aprem, AP_DEVICES); 2379 DECLARE_BITMAP(aqrem, AP_DOMAINS); 2380 DECLARE_BITMAP(cdrem, AP_DOMAINS); 2381 2382 do_remove = bitmap_andnot(aprem, 2383 (unsigned long *)prev_config_info->apm, 2384 (unsigned long *)cur_config_info->apm, 2385 AP_DEVICES); 2386 do_remove |= bitmap_andnot(aqrem, 2387 (unsigned long *)prev_config_info->aqm, 2388 (unsigned long *)cur_config_info->aqm, 2389 AP_DEVICES); 2390 do_remove |= bitmap_andnot(cdrem, 2391 (unsigned long *)prev_config_info->adm, 2392 (unsigned long *)cur_config_info->adm, 2393 AP_DEVICES); 2394 2395 if (do_remove) 2396 vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem); 2397 } 2398 2399 /** 2400 * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that 2401 * are older than AP type 10 (CEX4). 2402 * @apm: a bitmap of the APIDs to examine 2403 * @aqm: a bitmap of the APQIs of the queues to query for the AP type. 2404 */ 2405 static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm) 2406 { 2407 bool apid_cleared; 2408 struct ap_queue_status status; 2409 unsigned long apid, apqi; 2410 struct ap_tapq_gr2 info; 2411 2412 for_each_set_bit_inv(apid, apm, AP_DEVICES) { 2413 apid_cleared = false; 2414 2415 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { 2416 status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info); 2417 switch (status.response_code) { 2418 /* 2419 * According to the architecture in each case 2420 * below, the queue's info should be filled. 2421 */ 2422 case AP_RESPONSE_NORMAL: 2423 case AP_RESPONSE_RESET_IN_PROGRESS: 2424 case AP_RESPONSE_DECONFIGURED: 2425 case AP_RESPONSE_CHECKSTOPPED: 2426 case AP_RESPONSE_BUSY: 2427 /* 2428 * The vfio_ap device driver only 2429 * supports CEX4 and newer adapters, so 2430 * remove the APID if the adapter is 2431 * older than a CEX4. 2432 */ 2433 if (info.at < AP_DEVICE_TYPE_CEX4) { 2434 clear_bit_inv(apid, apm); 2435 apid_cleared = true; 2436 } 2437 2438 break; 2439 2440 default: 2441 /* 2442 * If we don't know the adapter type, 2443 * clear its APID since it can't be 2444 * determined whether the vfio_ap 2445 * device driver supports it. 2446 */ 2447 clear_bit_inv(apid, apm); 2448 apid_cleared = true; 2449 break; 2450 } 2451 2452 /* 2453 * If we've already cleared the APID from the apm, there 2454 * is no need to continue examining the remainin AP 2455 * queues to determine the type of the adapter. 2456 */ 2457 if (apid_cleared) 2458 continue; 2459 } 2460 } 2461 } 2462 2463 /** 2464 * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and 2465 * control domains that have been added to the host's 2466 * AP configuration for each matrix mdev to which they 2467 * are assigned. 2468 * 2469 * @apm_add: a bitmap specifying the adapters that have been added to the AP 2470 * configuration. 2471 * @aqm_add: a bitmap specifying the domains that have been added to the AP 2472 * configuration. 2473 * @adm_add: a bitmap specifying the control domains that have been added to the 2474 * AP configuration. 2475 */ 2476 static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add, 2477 unsigned long *adm_add) 2478 { 2479 struct ap_matrix_mdev *matrix_mdev; 2480 2481 if (list_empty(&matrix_dev->mdev_list)) 2482 return; 2483 2484 vfio_ap_filter_apid_by_qtype(apm_add, aqm_add); 2485 2486 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2487 bitmap_and(matrix_mdev->apm_add, 2488 matrix_mdev->matrix.apm, apm_add, AP_DEVICES); 2489 bitmap_and(matrix_mdev->aqm_add, 2490 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS); 2491 bitmap_and(matrix_mdev->adm_add, 2492 matrix_mdev->matrix.adm, adm_add, AP_DEVICES); 2493 } 2494 } 2495 2496 /** 2497 * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and 2498 * control domains to the host AP configuration 2499 * by updating the bitmaps that specify what adapters, 2500 * domains and control domains have been added so they 2501 * can be hot plugged into the guest when the AP bus 2502 * scan completes (see vfio_ap_on_scan_complete 2503 * function). 2504 * @cur_config_info: the current AP configuration information 2505 * @prev_config_info: the previous AP configuration information 2506 */ 2507 static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info, 2508 struct ap_config_info *prev_config_info) 2509 { 2510 bool do_add; 2511 DECLARE_BITMAP(apm_add, AP_DEVICES); 2512 DECLARE_BITMAP(aqm_add, AP_DOMAINS); 2513 DECLARE_BITMAP(adm_add, AP_DOMAINS); 2514 2515 do_add = bitmap_andnot(apm_add, 2516 (unsigned long *)cur_config_info->apm, 2517 (unsigned long *)prev_config_info->apm, 2518 AP_DEVICES); 2519 do_add |= bitmap_andnot(aqm_add, 2520 (unsigned long *)cur_config_info->aqm, 2521 (unsigned long *)prev_config_info->aqm, 2522 AP_DOMAINS); 2523 do_add |= bitmap_andnot(adm_add, 2524 (unsigned long *)cur_config_info->adm, 2525 (unsigned long *)prev_config_info->adm, 2526 AP_DOMAINS); 2527 2528 if (do_add) 2529 vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add); 2530 } 2531 2532 /** 2533 * vfio_ap_on_cfg_changed - handles notification of changes to the host AP 2534 * configuration. 2535 * 2536 * @cur_cfg_info: the current host AP configuration 2537 * @prev_cfg_info: the previous host AP configuration 2538 */ 2539 void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info, 2540 struct ap_config_info *prev_cfg_info) 2541 { 2542 if (!cur_cfg_info || !prev_cfg_info) 2543 return; 2544 2545 mutex_lock(&matrix_dev->guests_lock); 2546 2547 vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info); 2548 vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info); 2549 memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info)); 2550 2551 mutex_unlock(&matrix_dev->guests_lock); 2552 } 2553 2554 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) 2555 { 2556 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 2557 bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false; 2558 2559 mutex_lock(&matrix_mdev->kvm->lock); 2560 mutex_lock(&matrix_dev->mdevs_lock); 2561 2562 filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm, 2563 matrix_mdev->apm_add, AP_DEVICES); 2564 filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm, 2565 matrix_mdev->aqm_add, AP_DOMAINS); 2566 filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm, 2567 matrix_mdev->adm_add, AP_DOMAINS); 2568 2569 if (filter_adapters || filter_domains) 2570 do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); 2571 2572 if (filter_cdoms) 2573 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); 2574 2575 if (do_hotplug) 2576 vfio_ap_mdev_update_guest_apcb(matrix_mdev); 2577 2578 reset_queues_for_apids(matrix_mdev, apm_filtered); 2579 2580 mutex_unlock(&matrix_dev->mdevs_lock); 2581 mutex_unlock(&matrix_mdev->kvm->lock); 2582 } 2583 2584 void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info, 2585 struct ap_config_info *old_config_info) 2586 { 2587 struct ap_matrix_mdev *matrix_mdev; 2588 2589 mutex_lock(&matrix_dev->guests_lock); 2590 2591 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 2592 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) && 2593 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) && 2594 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS)) 2595 continue; 2596 2597 vfio_ap_mdev_hot_plug_cfg(matrix_mdev); 2598 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES); 2599 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS); 2600 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS); 2601 } 2602 2603 mutex_unlock(&matrix_dev->guests_lock); 2604 } 2605