1 /* 2 * vfio based subchannel assignment support 3 * 4 * Copyright 2017 IBM Corp. 5 * Copyright 2019 Red Hat, Inc. 6 * 7 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 8 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 9 * Pierre Morel <pmorel@linux.vnet.ibm.com> 10 * Cornelia Huck <cohuck@redhat.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2 or (at 13 * your option) any later version. See the COPYING file in the top-level 14 * directory. 15 */ 16 17 #include "qemu/osdep.h" 18 #include <linux/vfio.h> 19 #include <linux/vfio_ccw.h> 20 #include <sys/ioctl.h> 21 22 #include "qapi/error.h" 23 #include "hw/sysbus.h" 24 #include "hw/vfio/vfio.h" 25 #include "hw/vfio/vfio-common.h" 26 #include "hw/s390x/s390-ccw.h" 27 #include "hw/s390x/vfio-ccw.h" 28 #include "hw/qdev-properties.h" 29 #include "hw/s390x/ccw-device.h" 30 #include "exec/address-spaces.h" 31 #include "qemu/error-report.h" 32 #include "qemu/main-loop.h" 33 #include "qemu/module.h" 34 35 struct VFIOCCWDevice { 36 S390CCWDevice cdev; 37 VFIODevice vdev; 38 uint64_t io_region_size; 39 uint64_t io_region_offset; 40 struct ccw_io_region *io_region; 41 uint64_t async_cmd_region_size; 42 uint64_t async_cmd_region_offset; 43 struct ccw_cmd_region *async_cmd_region; 44 uint64_t schib_region_size; 45 uint64_t schib_region_offset; 46 struct ccw_schib_region *schib_region; 47 uint64_t crw_region_size; 48 uint64_t crw_region_offset; 49 struct ccw_crw_region *crw_region; 50 EventNotifier io_notifier; 51 EventNotifier crw_notifier; 52 bool force_orb_pfch; 53 bool warned_orb_pfch; 54 }; 55 56 static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch, 57 const char *msg) 58 { 59 warn_report_once_cond(&vcdev->warned_orb_pfch, 60 "vfio-ccw (devno %x.%x.%04x): %s", 61 sch->cssid, sch->ssid, sch->devno, msg); 62 } 63 64 static void vfio_ccw_compute_needs_reset(VFIODevice *vdev) 65 { 66 vdev->needs_reset = false; 67 } 68 69 /* 70 * We don't need vfio_hot_reset_multi and vfio_eoi operations for 71 * vfio_ccw device now. 72 */ 73 struct VFIODeviceOps vfio_ccw_ops = { 74 .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset, 75 }; 76 77 static IOInstEnding vfio_ccw_handle_request(SubchDev *sch) 78 { 79 S390CCWDevice *cdev = sch->driver_data; 80 VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); 81 struct ccw_io_region *region = vcdev->io_region; 82 int ret; 83 84 if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) { 85 sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH; 86 warn_once_pfch(vcdev, sch, "PFCH flag forced"); 87 } 88 89 QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB)); 90 QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW)); 91 QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB)); 92 93 memset(region, 0, sizeof(*region)); 94 95 memcpy(region->orb_area, &sch->orb, sizeof(ORB)); 96 memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW)); 97 98 again: 99 ret = pwrite(vcdev->vdev.fd, region, 100 vcdev->io_region_size, vcdev->io_region_offset); 101 if (ret != vcdev->io_region_size) { 102 if (errno == EAGAIN) { 103 goto again; 104 } 105 error_report("vfio-ccw: write I/O region failed with errno=%d", errno); 106 ret = -errno; 107 } else { 108 ret = region->ret_code; 109 } 110 switch (ret) { 111 case 0: 112 return IOINST_CC_EXPECTED; 113 case -EBUSY: 114 return IOINST_CC_BUSY; 115 case -ENODEV: 116 case -EACCES: 117 return IOINST_CC_NOT_OPERATIONAL; 118 case -EFAULT: 119 default: 120 sch_gen_unit_exception(sch); 121 css_inject_io_interrupt(sch); 122 return IOINST_CC_EXPECTED; 123 } 124 } 125 126 static IOInstEnding vfio_ccw_handle_store(SubchDev *sch) 127 { 128 S390CCWDevice *cdev = sch->driver_data; 129 VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); 130 SCHIB *schib = &sch->curr_status; 131 struct ccw_schib_region *region = vcdev->schib_region; 132 SCHIB *s; 133 int ret; 134 135 /* schib region not available so nothing else to do */ 136 if (!region) { 137 return IOINST_CC_EXPECTED; 138 } 139 140 memset(region, 0, sizeof(*region)); 141 ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size, 142 vcdev->schib_region_offset); 143 144 if (ret == -1) { 145 /* 146 * Device is probably damaged, but store subchannel does not 147 * have a nonzero cc defined for this scenario. Log an error, 148 * and presume things are otherwise fine. 149 */ 150 error_report("vfio-ccw: store region read failed with errno=%d", errno); 151 return IOINST_CC_EXPECTED; 152 } 153 154 /* 155 * Selectively copy path-related bits of the SCHIB, 156 * rather than copying the entire struct. 157 */ 158 s = (SCHIB *)region->schib_area; 159 schib->pmcw.pnom = s->pmcw.pnom; 160 schib->pmcw.lpum = s->pmcw.lpum; 161 schib->pmcw.pam = s->pmcw.pam; 162 schib->pmcw.pom = s->pmcw.pom; 163 164 if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) { 165 schib->scsw.flags |= SCSW_FLAGS_MASK_PNO; 166 } 167 168 return IOINST_CC_EXPECTED; 169 } 170 171 static int vfio_ccw_handle_clear(SubchDev *sch) 172 { 173 S390CCWDevice *cdev = sch->driver_data; 174 VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); 175 struct ccw_cmd_region *region = vcdev->async_cmd_region; 176 int ret; 177 178 if (!vcdev->async_cmd_region) { 179 /* Async command region not available, fall back to emulation */ 180 return -ENOSYS; 181 } 182 183 memset(region, 0, sizeof(*region)); 184 region->command = VFIO_CCW_ASYNC_CMD_CSCH; 185 186 again: 187 ret = pwrite(vcdev->vdev.fd, region, 188 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset); 189 if (ret != vcdev->async_cmd_region_size) { 190 if (errno == EAGAIN) { 191 goto again; 192 } 193 error_report("vfio-ccw: write cmd region failed with errno=%d", errno); 194 ret = -errno; 195 } else { 196 ret = region->ret_code; 197 } 198 switch (ret) { 199 case 0: 200 case -ENODEV: 201 case -EACCES: 202 return 0; 203 case -EFAULT: 204 default: 205 sch_gen_unit_exception(sch); 206 css_inject_io_interrupt(sch); 207 return 0; 208 } 209 } 210 211 static int vfio_ccw_handle_halt(SubchDev *sch) 212 { 213 S390CCWDevice *cdev = sch->driver_data; 214 VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); 215 struct ccw_cmd_region *region = vcdev->async_cmd_region; 216 int ret; 217 218 if (!vcdev->async_cmd_region) { 219 /* Async command region not available, fall back to emulation */ 220 return -ENOSYS; 221 } 222 223 memset(region, 0, sizeof(*region)); 224 region->command = VFIO_CCW_ASYNC_CMD_HSCH; 225 226 again: 227 ret = pwrite(vcdev->vdev.fd, region, 228 vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset); 229 if (ret != vcdev->async_cmd_region_size) { 230 if (errno == EAGAIN) { 231 goto again; 232 } 233 error_report("vfio-ccw: write cmd region failed with errno=%d", errno); 234 ret = -errno; 235 } else { 236 ret = region->ret_code; 237 } 238 switch (ret) { 239 case 0: 240 case -EBUSY: 241 case -ENODEV: 242 case -EACCES: 243 return 0; 244 case -EFAULT: 245 default: 246 sch_gen_unit_exception(sch); 247 css_inject_io_interrupt(sch); 248 return 0; 249 } 250 } 251 252 static void vfio_ccw_reset(DeviceState *dev) 253 { 254 CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev); 255 S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev); 256 VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); 257 258 ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET); 259 } 260 261 static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev) 262 { 263 struct ccw_crw_region *region = vcdev->crw_region; 264 CRW crw; 265 int size; 266 267 /* Keep reading CRWs as long as data is returned */ 268 do { 269 memset(region, 0, sizeof(*region)); 270 size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size, 271 vcdev->crw_region_offset); 272 273 if (size == -1) { 274 error_report("vfio-ccw: Read crw region failed with errno=%d", 275 errno); 276 break; 277 } 278 279 if (region->crw == 0) { 280 /* No more CRWs to queue */ 281 break; 282 } 283 284 memcpy(&crw, ®ion->crw, sizeof(CRW)); 285 286 css_crw_add_to_queue(crw); 287 } while (1); 288 } 289 290 static void vfio_ccw_crw_notifier_handler(void *opaque) 291 { 292 VFIOCCWDevice *vcdev = opaque; 293 294 while (event_notifier_test_and_clear(&vcdev->crw_notifier)) { 295 vfio_ccw_crw_read(vcdev); 296 } 297 } 298 299 static void vfio_ccw_io_notifier_handler(void *opaque) 300 { 301 VFIOCCWDevice *vcdev = opaque; 302 struct ccw_io_region *region = vcdev->io_region; 303 S390CCWDevice *cdev = S390_CCW_DEVICE(vcdev); 304 CcwDevice *ccw_dev = CCW_DEVICE(cdev); 305 SubchDev *sch = ccw_dev->sch; 306 SCHIB *schib = &sch->curr_status; 307 SCSW s; 308 IRB irb; 309 int size; 310 311 if (!event_notifier_test_and_clear(&vcdev->io_notifier)) { 312 return; 313 } 314 315 size = pread(vcdev->vdev.fd, region, vcdev->io_region_size, 316 vcdev->io_region_offset); 317 if (size == -1) { 318 switch (errno) { 319 case ENODEV: 320 /* Generate a deferred cc 3 condition. */ 321 schib->scsw.flags |= SCSW_FLAGS_MASK_CC; 322 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 323 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); 324 goto read_err; 325 case EFAULT: 326 /* Memory problem, generate channel data check. */ 327 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; 328 schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK; 329 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 330 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 331 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 332 goto read_err; 333 default: 334 /* Error, generate channel program check. */ 335 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; 336 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK; 337 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 338 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 339 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 340 goto read_err; 341 } 342 } else if (size != vcdev->io_region_size) { 343 /* Information transfer error, generate channel-control check. */ 344 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; 345 schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK; 346 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 347 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 348 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 349 goto read_err; 350 } 351 352 memcpy(&irb, region->irb_area, sizeof(IRB)); 353 354 /* Update control block via irb. */ 355 s = schib->scsw; 356 copy_scsw_to_guest(&s, &irb.scsw); 357 schib->scsw = s; 358 359 /* If a uint check is pending, copy sense data. */ 360 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && 361 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { 362 memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw)); 363 } 364 365 read_err: 366 css_inject_io_interrupt(sch); 367 } 368 369 static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev, 370 unsigned int irq, 371 Error **errp) 372 { 373 VFIODevice *vdev = &vcdev->vdev; 374 struct vfio_irq_info *irq_info; 375 size_t argsz; 376 int fd; 377 EventNotifier *notifier; 378 IOHandler *fd_read; 379 380 switch (irq) { 381 case VFIO_CCW_IO_IRQ_INDEX: 382 notifier = &vcdev->io_notifier; 383 fd_read = vfio_ccw_io_notifier_handler; 384 break; 385 case VFIO_CCW_CRW_IRQ_INDEX: 386 notifier = &vcdev->crw_notifier; 387 fd_read = vfio_ccw_crw_notifier_handler; 388 break; 389 default: 390 error_setg(errp, "vfio: Unsupported device irq(%d)", irq); 391 return; 392 } 393 394 if (vdev->num_irqs < irq + 1) { 395 error_setg(errp, "vfio: unexpected number of irqs %u", 396 vdev->num_irqs); 397 return; 398 } 399 400 argsz = sizeof(*irq_info); 401 irq_info = g_malloc0(argsz); 402 irq_info->index = irq; 403 irq_info->argsz = argsz; 404 if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, 405 irq_info) < 0 || irq_info->count < 1) { 406 error_setg_errno(errp, errno, "vfio: Error getting irq info"); 407 goto out_free_info; 408 } 409 410 if (event_notifier_init(notifier, 0)) { 411 error_setg_errno(errp, errno, 412 "vfio: Unable to init event notifier for irq (%d)", 413 irq); 414 goto out_free_info; 415 } 416 417 fd = event_notifier_get_fd(notifier); 418 qemu_set_fd_handler(fd, fd_read, NULL, vcdev); 419 420 if (vfio_set_irq_signaling(vdev, irq, 0, 421 VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) { 422 qemu_set_fd_handler(fd, NULL, NULL, vcdev); 423 event_notifier_cleanup(notifier); 424 } 425 426 out_free_info: 427 g_free(irq_info); 428 } 429 430 static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev, 431 unsigned int irq) 432 { 433 Error *err = NULL; 434 EventNotifier *notifier; 435 436 switch (irq) { 437 case VFIO_CCW_IO_IRQ_INDEX: 438 notifier = &vcdev->io_notifier; 439 break; 440 case VFIO_CCW_CRW_IRQ_INDEX: 441 notifier = &vcdev->crw_notifier; 442 break; 443 default: 444 error_report("vfio: Unsupported device irq(%d)", irq); 445 return; 446 } 447 448 if (vfio_set_irq_signaling(&vcdev->vdev, irq, 0, 449 VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { 450 error_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name); 451 } 452 453 qemu_set_fd_handler(event_notifier_get_fd(notifier), 454 NULL, NULL, vcdev); 455 event_notifier_cleanup(notifier); 456 } 457 458 static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) 459 { 460 VFIODevice *vdev = &vcdev->vdev; 461 struct vfio_region_info *info; 462 int ret; 463 464 /* Sanity check device */ 465 if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) { 466 error_setg(errp, "vfio: Um, this isn't a vfio-ccw device"); 467 return; 468 } 469 470 /* 471 * We always expect at least the I/O region to be present. We also 472 * may have a variable number of regions governed by capabilities. 473 */ 474 if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) { 475 error_setg(errp, "vfio: too few regions (%u), expected at least %u", 476 vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1); 477 return; 478 } 479 480 ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info); 481 if (ret) { 482 error_setg_errno(errp, -ret, "vfio: Error getting config info"); 483 return; 484 } 485 486 vcdev->io_region_size = info->size; 487 if (sizeof(*vcdev->io_region) != vcdev->io_region_size) { 488 error_setg(errp, "vfio: Unexpected size of the I/O region"); 489 goto out_err; 490 } 491 492 vcdev->io_region_offset = info->offset; 493 vcdev->io_region = g_malloc0(info->size); 494 495 /* check for the optional async command region */ 496 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, 497 VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info); 498 if (!ret) { 499 vcdev->async_cmd_region_size = info->size; 500 if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) { 501 error_setg(errp, "vfio: Unexpected size of the async cmd region"); 502 goto out_err; 503 } 504 vcdev->async_cmd_region_offset = info->offset; 505 vcdev->async_cmd_region = g_malloc0(info->size); 506 } 507 508 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, 509 VFIO_REGION_SUBTYPE_CCW_SCHIB, &info); 510 if (!ret) { 511 vcdev->schib_region_size = info->size; 512 if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) { 513 error_setg(errp, "vfio: Unexpected size of the schib region"); 514 goto out_err; 515 } 516 vcdev->schib_region_offset = info->offset; 517 vcdev->schib_region = g_malloc(info->size); 518 } 519 520 ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, 521 VFIO_REGION_SUBTYPE_CCW_CRW, &info); 522 523 if (!ret) { 524 vcdev->crw_region_size = info->size; 525 if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) { 526 error_setg(errp, "vfio: Unexpected size of the CRW region"); 527 goto out_err; 528 } 529 vcdev->crw_region_offset = info->offset; 530 vcdev->crw_region = g_malloc(info->size); 531 } 532 533 g_free(info); 534 return; 535 536 out_err: 537 g_free(vcdev->crw_region); 538 g_free(vcdev->schib_region); 539 g_free(vcdev->async_cmd_region); 540 g_free(vcdev->io_region); 541 g_free(info); 542 return; 543 } 544 545 static void vfio_ccw_put_region(VFIOCCWDevice *vcdev) 546 { 547 g_free(vcdev->crw_region); 548 g_free(vcdev->schib_region); 549 g_free(vcdev->async_cmd_region); 550 g_free(vcdev->io_region); 551 } 552 553 static void vfio_ccw_put_device(VFIOCCWDevice *vcdev) 554 { 555 g_free(vcdev->vdev.name); 556 vfio_put_base_device(&vcdev->vdev); 557 } 558 559 static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev, 560 Error **errp) 561 { 562 char *name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid, 563 vcdev->cdev.hostid.ssid, 564 vcdev->cdev.hostid.devid); 565 VFIODevice *vbasedev; 566 567 QLIST_FOREACH(vbasedev, &group->device_list, next) { 568 if (strcmp(vbasedev->name, name) == 0) { 569 error_setg(errp, "vfio: subchannel %s has already been attached", 570 name); 571 goto out_err; 572 } 573 } 574 575 /* 576 * All vfio-ccw devices are believed to operate in a way compatible with 577 * memory ballooning, ie. pages pinned in the host are in the current 578 * working set of the guest driver and therefore never overlap with pages 579 * available to the guest balloon driver. This needs to be set before 580 * vfio_get_device() for vfio common to handle the balloon inhibitor. 581 */ 582 vcdev->vdev.balloon_allowed = true; 583 584 if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) { 585 goto out_err; 586 } 587 588 vcdev->vdev.ops = &vfio_ccw_ops; 589 vcdev->vdev.type = VFIO_DEVICE_TYPE_CCW; 590 vcdev->vdev.name = name; 591 vcdev->vdev.dev = &vcdev->cdev.parent_obj.parent_obj; 592 593 return; 594 595 out_err: 596 g_free(name); 597 } 598 599 static VFIOGroup *vfio_ccw_get_group(S390CCWDevice *cdev, Error **errp) 600 { 601 char *tmp, group_path[PATH_MAX]; 602 ssize_t len; 603 int groupid; 604 605 tmp = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/%s/iommu_group", 606 cdev->hostid.cssid, cdev->hostid.ssid, 607 cdev->hostid.devid, cdev->mdevid); 608 len = readlink(tmp, group_path, sizeof(group_path)); 609 g_free(tmp); 610 611 if (len <= 0 || len >= sizeof(group_path)) { 612 error_setg(errp, "vfio: no iommu_group found"); 613 return NULL; 614 } 615 616 group_path[len] = 0; 617 618 if (sscanf(basename(group_path), "%d", &groupid) != 1) { 619 error_setg(errp, "vfio: failed to read %s", group_path); 620 return NULL; 621 } 622 623 return vfio_get_group(groupid, &address_space_memory, errp); 624 } 625 626 static void vfio_ccw_realize(DeviceState *dev, Error **errp) 627 { 628 VFIOGroup *group; 629 CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev); 630 S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev); 631 VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); 632 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev); 633 Error *err = NULL; 634 635 /* Call the class init function for subchannel. */ 636 if (cdc->realize) { 637 cdc->realize(cdev, vcdev->vdev.sysfsdev, &err); 638 if (err) { 639 goto out_err_propagate; 640 } 641 } 642 643 group = vfio_ccw_get_group(cdev, &err); 644 if (!group) { 645 goto out_group_err; 646 } 647 648 vfio_ccw_get_device(group, vcdev, &err); 649 if (err) { 650 goto out_device_err; 651 } 652 653 vfio_ccw_get_region(vcdev, &err); 654 if (err) { 655 goto out_region_err; 656 } 657 658 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err); 659 if (err) { 660 goto out_notifier_err; 661 } 662 663 if (vcdev->crw_region) { 664 vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX, &err); 665 if (err) { 666 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX); 667 goto out_notifier_err; 668 } 669 } 670 671 return; 672 673 out_notifier_err: 674 vfio_ccw_put_region(vcdev); 675 out_region_err: 676 vfio_ccw_put_device(vcdev); 677 out_device_err: 678 vfio_put_group(group); 679 out_group_err: 680 if (cdc->unrealize) { 681 cdc->unrealize(cdev); 682 } 683 out_err_propagate: 684 error_propagate(errp, err); 685 } 686 687 static void vfio_ccw_unrealize(DeviceState *dev) 688 { 689 CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev); 690 S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev); 691 VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); 692 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev); 693 VFIOGroup *group = vcdev->vdev.group; 694 695 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX); 696 vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX); 697 vfio_ccw_put_region(vcdev); 698 vfio_ccw_put_device(vcdev); 699 vfio_put_group(group); 700 701 if (cdc->unrealize) { 702 cdc->unrealize(cdev); 703 } 704 } 705 706 static Property vfio_ccw_properties[] = { 707 DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev), 708 DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false), 709 DEFINE_PROP_END_OF_LIST(), 710 }; 711 712 static const VMStateDescription vfio_ccw_vmstate = { 713 .name = "vfio-ccw", 714 .unmigratable = 1, 715 }; 716 717 static void vfio_ccw_class_init(ObjectClass *klass, void *data) 718 { 719 DeviceClass *dc = DEVICE_CLASS(klass); 720 S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass); 721 722 device_class_set_props(dc, vfio_ccw_properties); 723 dc->vmsd = &vfio_ccw_vmstate; 724 dc->desc = "VFIO-based subchannel assignment"; 725 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 726 dc->realize = vfio_ccw_realize; 727 dc->unrealize = vfio_ccw_unrealize; 728 dc->reset = vfio_ccw_reset; 729 730 cdc->handle_request = vfio_ccw_handle_request; 731 cdc->handle_halt = vfio_ccw_handle_halt; 732 cdc->handle_clear = vfio_ccw_handle_clear; 733 cdc->handle_store = vfio_ccw_handle_store; 734 } 735 736 static const TypeInfo vfio_ccw_info = { 737 .name = TYPE_VFIO_CCW, 738 .parent = TYPE_S390_CCW, 739 .instance_size = sizeof(VFIOCCWDevice), 740 .class_init = vfio_ccw_class_init, 741 }; 742 743 static void register_vfio_ccw_type(void) 744 { 745 type_register_static(&vfio_ccw_info); 746 } 747 748 type_init(register_vfio_ccw_type) 749