1 /* 2 * virtio ccw target implementation 3 * 4 * Copyright 2012,2015 IBM Corp. 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Pierre Morel <pmorel@linux.vnet.ibm.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or (at 9 * your option) any later version. See the COPYING file in the top-level 10 * directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "hw/hw.h" 16 #include "sysemu/sysemu.h" 17 #include "sysemu/kvm.h" 18 #include "net/net.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-net.h" 21 #include "hw/sysbus.h" 22 #include "qemu/bitops.h" 23 #include "qemu/error-report.h" 24 #include "hw/virtio/virtio-access.h" 25 #include "hw/virtio/virtio-bus.h" 26 #include "hw/s390x/adapter.h" 27 #include "hw/s390x/s390_flic.h" 28 29 #include "hw/s390x/ioinst.h" 30 #include "hw/s390x/css.h" 31 #include "virtio-ccw.h" 32 #include "trace.h" 33 #include "hw/s390x/css-bridge.h" 34 #include "hw/s390x/s390-virtio-ccw.h" 35 36 #define NR_CLASSIC_INDICATOR_BITS 64 37 38 static int virtio_ccw_dev_post_load(void *opaque, int version_id) 39 { 40 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque); 41 CcwDevice *ccw_dev = CCW_DEVICE(dev); 42 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 43 44 ccw_dev->sch->driver_data = dev; 45 if (ccw_dev->sch->thinint_active) { 46 dev->routes.adapter.adapter_id = css_get_adapter_id( 47 CSS_IO_ADAPTER_VIRTIO, 48 dev->thinint_isc); 49 } 50 /* Re-fill subch_id after loading the subchannel states.*/ 51 if (ck->refill_ids) { 52 ck->refill_ids(ccw_dev); 53 } 54 return 0; 55 } 56 57 typedef struct VirtioCcwDeviceTmp { 58 VirtioCcwDevice *parent; 59 uint16_t config_vector; 60 } VirtioCcwDeviceTmp; 61 62 static int virtio_ccw_dev_tmp_pre_save(void *opaque) 63 { 64 VirtioCcwDeviceTmp *tmp = opaque; 65 VirtioCcwDevice *dev = tmp->parent; 66 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 67 68 tmp->config_vector = vdev->config_vector; 69 70 return 0; 71 } 72 73 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id) 74 { 75 VirtioCcwDeviceTmp *tmp = opaque; 76 VirtioCcwDevice *dev = tmp->parent; 77 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 78 79 vdev->config_vector = tmp->config_vector; 80 return 0; 81 } 82 83 const VMStateDescription vmstate_virtio_ccw_dev_tmp = { 84 .name = "s390_virtio_ccw_dev_tmp", 85 .pre_save = virtio_ccw_dev_tmp_pre_save, 86 .post_load = virtio_ccw_dev_tmp_post_load, 87 .fields = (VMStateField[]) { 88 VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), 89 VMSTATE_END_OF_LIST() 90 } 91 }; 92 93 const VMStateDescription vmstate_virtio_ccw_dev = { 94 .name = "s390_virtio_ccw_dev", 95 .version_id = 1, 96 .minimum_version_id = 1, 97 .post_load = virtio_ccw_dev_post_load, 98 .fields = (VMStateField[]) { 99 VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), 100 VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), 101 VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), 102 VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice), 103 /* 104 * Ugly hack because VirtIODevice does not migrate itself. 105 * This also makes legacy via vmstate_save_state possible. 106 */ 107 VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp, 108 vmstate_virtio_ccw_dev_tmp), 109 VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes, 110 AdapterRoutes), 111 VMSTATE_UINT8(thinint_isc, VirtioCcwDevice), 112 VMSTATE_INT32(revision, VirtioCcwDevice), 113 VMSTATE_END_OF_LIST() 114 } 115 }; 116 117 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 118 VirtioCcwDevice *dev); 119 120 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) 121 { 122 VirtIODevice *vdev = NULL; 123 VirtioCcwDevice *dev = sch->driver_data; 124 125 if (dev) { 126 vdev = virtio_bus_get_device(&dev->bus); 127 } 128 return vdev; 129 } 130 131 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) 132 { 133 virtio_bus_start_ioeventfd(&dev->bus); 134 } 135 136 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) 137 { 138 virtio_bus_stop_ioeventfd(&dev->bus); 139 } 140 141 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d) 142 { 143 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 144 145 return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0; 146 } 147 148 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 149 int n, bool assign) 150 { 151 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 152 CcwDevice *ccw_dev = CCW_DEVICE(dev); 153 SubchDev *sch = ccw_dev->sch; 154 uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; 155 156 return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); 157 } 158 159 /* Communication blocks used by several channel commands. */ 160 typedef struct VqInfoBlockLegacy { 161 uint64_t queue; 162 uint32_t align; 163 uint16_t index; 164 uint16_t num; 165 } QEMU_PACKED VqInfoBlockLegacy; 166 167 typedef struct VqInfoBlock { 168 uint64_t desc; 169 uint32_t res0; 170 uint16_t index; 171 uint16_t num; 172 uint64_t avail; 173 uint64_t used; 174 } QEMU_PACKED VqInfoBlock; 175 176 typedef struct VqConfigBlock { 177 uint16_t index; 178 uint16_t num_max; 179 } QEMU_PACKED VqConfigBlock; 180 181 typedef struct VirtioFeatDesc { 182 uint32_t features; 183 uint8_t index; 184 } QEMU_PACKED VirtioFeatDesc; 185 186 typedef struct VirtioThinintInfo { 187 hwaddr summary_indicator; 188 hwaddr device_indicator; 189 uint64_t ind_bit; 190 uint8_t isc; 191 } QEMU_PACKED VirtioThinintInfo; 192 193 typedef struct VirtioRevInfo { 194 uint16_t revision; 195 uint16_t length; 196 uint8_t data[0]; 197 } QEMU_PACKED VirtioRevInfo; 198 199 /* Specify where the virtqueues for the subchannel are in guest memory. */ 200 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, 201 VqInfoBlockLegacy *linfo) 202 { 203 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 204 uint16_t index = info ? info->index : linfo->index; 205 uint16_t num = info ? info->num : linfo->num; 206 uint64_t desc = info ? info->desc : linfo->queue; 207 208 if (index >= VIRTIO_QUEUE_MAX) { 209 return -EINVAL; 210 } 211 212 /* Current code in virtio.c relies on 4K alignment. */ 213 if (linfo && desc && (linfo->align != 4096)) { 214 return -EINVAL; 215 } 216 217 if (!vdev) { 218 return -EINVAL; 219 } 220 221 if (info) { 222 virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); 223 } else { 224 virtio_queue_set_addr(vdev, index, desc); 225 } 226 if (!desc) { 227 virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); 228 } else { 229 if (info) { 230 /* virtio-1 allows changing the ring size. */ 231 if (virtio_queue_get_max_num(vdev, index) < num) { 232 /* Fail if we exceed the maximum number. */ 233 return -EINVAL; 234 } 235 virtio_queue_set_num(vdev, index, num); 236 } else if (virtio_queue_get_num(vdev, index) > num) { 237 /* Fail if we don't have a big enough queue. */ 238 return -EINVAL; 239 } 240 /* We ignore possible increased num for legacy for compatibility. */ 241 virtio_queue_set_vector(vdev, index, index); 242 } 243 /* tell notify handler in case of config change */ 244 vdev->config_vector = VIRTIO_QUEUE_MAX; 245 return 0; 246 } 247 248 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) 249 { 250 CcwDevice *ccw_dev = CCW_DEVICE(dev); 251 252 virtio_ccw_stop_ioeventfd(dev); 253 virtio_reset(vdev); 254 if (dev->indicators) { 255 release_indicator(&dev->routes.adapter, dev->indicators); 256 dev->indicators = NULL; 257 } 258 if (dev->indicators2) { 259 release_indicator(&dev->routes.adapter, dev->indicators2); 260 dev->indicators2 = NULL; 261 } 262 if (dev->summary_indicator) { 263 release_indicator(&dev->routes.adapter, dev->summary_indicator); 264 dev->summary_indicator = NULL; 265 } 266 ccw_dev->sch->thinint_active = false; 267 } 268 269 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, 270 bool is_legacy) 271 { 272 int ret; 273 VqInfoBlock info; 274 VqInfoBlockLegacy linfo; 275 size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info); 276 277 if (check_len) { 278 if (ccw.count != info_len) { 279 return -EINVAL; 280 } 281 } else if (ccw.count < info_len) { 282 /* Can't execute command. */ 283 return -EINVAL; 284 } 285 if (!ccw.cda) { 286 return -EFAULT; 287 } 288 if (is_legacy) { 289 ccw_dstream_read(&sch->cds, linfo); 290 be64_to_cpus(&linfo.queue); 291 be32_to_cpus(&linfo.align); 292 be16_to_cpus(&linfo.index); 293 be16_to_cpus(&linfo.num); 294 ret = virtio_ccw_set_vqs(sch, NULL, &linfo); 295 } else { 296 ccw_dstream_read(&sch->cds, info); 297 be64_to_cpus(&info.desc); 298 be16_to_cpus(&info.index); 299 be16_to_cpus(&info.num); 300 be64_to_cpus(&info.avail); 301 be64_to_cpus(&info.used); 302 ret = virtio_ccw_set_vqs(sch, &info, NULL); 303 } 304 sch->curr_status.scsw.count = 0; 305 return ret; 306 } 307 308 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) 309 { 310 int ret; 311 VirtioRevInfo revinfo; 312 uint8_t status; 313 VirtioFeatDesc features; 314 hwaddr indicators; 315 VqConfigBlock vq_config; 316 VirtioCcwDevice *dev = sch->driver_data; 317 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 318 bool check_len; 319 int len; 320 VirtioThinintInfo thinint; 321 322 if (!dev) { 323 return -EINVAL; 324 } 325 326 trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, 327 ccw.cmd_code); 328 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); 329 330 if (dev->force_revision_1 && dev->revision < 0 && 331 ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) { 332 /* 333 * virtio-1 drivers must start with negotiating to a revision >= 1, 334 * so post a command reject for all other commands 335 */ 336 return -ENOSYS; 337 } 338 339 /* Look at the command. */ 340 switch (ccw.cmd_code) { 341 case CCW_CMD_SET_VQ: 342 ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1); 343 break; 344 case CCW_CMD_VDEV_RESET: 345 virtio_ccw_reset_virtio(dev, vdev); 346 ret = 0; 347 break; 348 case CCW_CMD_READ_FEAT: 349 if (check_len) { 350 if (ccw.count != sizeof(features)) { 351 ret = -EINVAL; 352 break; 353 } 354 } else if (ccw.count < sizeof(features)) { 355 /* Can't execute command. */ 356 ret = -EINVAL; 357 break; 358 } 359 if (!ccw.cda) { 360 ret = -EFAULT; 361 } else { 362 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 363 364 ccw_dstream_advance(&sch->cds, sizeof(features.features)); 365 ccw_dstream_read(&sch->cds, features.index); 366 if (features.index == 0) { 367 if (dev->revision >= 1) { 368 /* Don't offer legacy features for modern devices. */ 369 features.features = (uint32_t) 370 (vdev->host_features & ~vdc->legacy_features); 371 } else { 372 features.features = (uint32_t)vdev->host_features; 373 } 374 } else if ((features.index == 1) && (dev->revision >= 1)) { 375 /* 376 * Only offer feature bits beyond 31 if the guest has 377 * negotiated at least revision 1. 378 */ 379 features.features = (uint32_t)(vdev->host_features >> 32); 380 } else { 381 /* Return zeroes if the guest supports more feature bits. */ 382 features.features = 0; 383 } 384 ccw_dstream_rewind(&sch->cds); 385 cpu_to_le32s(&features.features); 386 ccw_dstream_write(&sch->cds, features.features); 387 sch->curr_status.scsw.count = ccw.count - sizeof(features); 388 ret = 0; 389 } 390 break; 391 case CCW_CMD_WRITE_FEAT: 392 if (check_len) { 393 if (ccw.count != sizeof(features)) { 394 ret = -EINVAL; 395 break; 396 } 397 } else if (ccw.count < sizeof(features)) { 398 /* Can't execute command. */ 399 ret = -EINVAL; 400 break; 401 } 402 if (!ccw.cda) { 403 ret = -EFAULT; 404 } else { 405 ccw_dstream_read(&sch->cds, features); 406 le32_to_cpus(&features.features); 407 if (features.index == 0) { 408 virtio_set_features(vdev, 409 (vdev->guest_features & 0xffffffff00000000ULL) | 410 features.features); 411 } else if ((features.index == 1) && (dev->revision >= 1)) { 412 /* 413 * If the guest did not negotiate at least revision 1, 414 * we did not offer it any feature bits beyond 31. Such a 415 * guest passing us any bit here is therefore buggy. 416 */ 417 virtio_set_features(vdev, 418 (vdev->guest_features & 0x00000000ffffffffULL) | 419 ((uint64_t)features.features << 32)); 420 } else { 421 /* 422 * If the guest supports more feature bits, assert that it 423 * passes us zeroes for those we don't support. 424 */ 425 if (features.features) { 426 qemu_log_mask(LOG_GUEST_ERROR, 427 "Guest bug: features[%i]=%x (expected 0)", 428 features.index, features.features); 429 /* XXX: do a unit check here? */ 430 } 431 } 432 sch->curr_status.scsw.count = ccw.count - sizeof(features); 433 ret = 0; 434 } 435 break; 436 case CCW_CMD_READ_CONF: 437 if (check_len) { 438 if (ccw.count > vdev->config_len) { 439 ret = -EINVAL; 440 break; 441 } 442 } 443 len = MIN(ccw.count, vdev->config_len); 444 if (!ccw.cda) { 445 ret = -EFAULT; 446 } else { 447 virtio_bus_get_vdev_config(&dev->bus, vdev->config); 448 ccw_dstream_write_buf(&sch->cds, vdev->config, len); 449 sch->curr_status.scsw.count = ccw.count - len; 450 ret = 0; 451 } 452 break; 453 case CCW_CMD_WRITE_CONF: 454 if (check_len) { 455 if (ccw.count > vdev->config_len) { 456 ret = -EINVAL; 457 break; 458 } 459 } 460 len = MIN(ccw.count, vdev->config_len); 461 if (!ccw.cda) { 462 ret = -EFAULT; 463 } else { 464 ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len); 465 if (!ret) { 466 virtio_bus_set_vdev_config(&dev->bus, vdev->config); 467 sch->curr_status.scsw.count = ccw.count - len; 468 } 469 } 470 break; 471 case CCW_CMD_READ_STATUS: 472 if (check_len) { 473 if (ccw.count != sizeof(status)) { 474 ret = -EINVAL; 475 break; 476 } 477 } else if (ccw.count < sizeof(status)) { 478 /* Can't execute command. */ 479 ret = -EINVAL; 480 break; 481 } 482 if (!ccw.cda) { 483 ret = -EFAULT; 484 } else { 485 address_space_stb(&address_space_memory, ccw.cda, vdev->status, 486 MEMTXATTRS_UNSPECIFIED, NULL); 487 sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status); 488 ret = 0; 489 } 490 break; 491 case CCW_CMD_WRITE_STATUS: 492 if (check_len) { 493 if (ccw.count != sizeof(status)) { 494 ret = -EINVAL; 495 break; 496 } 497 } else if (ccw.count < sizeof(status)) { 498 /* Can't execute command. */ 499 ret = -EINVAL; 500 break; 501 } 502 if (!ccw.cda) { 503 ret = -EFAULT; 504 } else { 505 ccw_dstream_read(&sch->cds, status); 506 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { 507 virtio_ccw_stop_ioeventfd(dev); 508 } 509 if (virtio_set_status(vdev, status) == 0) { 510 if (vdev->status == 0) { 511 virtio_ccw_reset_virtio(dev, vdev); 512 } 513 if (status & VIRTIO_CONFIG_S_DRIVER_OK) { 514 virtio_ccw_start_ioeventfd(dev); 515 } 516 sch->curr_status.scsw.count = ccw.count - sizeof(status); 517 ret = 0; 518 } else { 519 /* Trigger a command reject. */ 520 ret = -ENOSYS; 521 } 522 } 523 break; 524 case CCW_CMD_SET_IND: 525 if (check_len) { 526 if (ccw.count != sizeof(indicators)) { 527 ret = -EINVAL; 528 break; 529 } 530 } else if (ccw.count < sizeof(indicators)) { 531 /* Can't execute command. */ 532 ret = -EINVAL; 533 break; 534 } 535 if (sch->thinint_active) { 536 /* Trigger a command reject. */ 537 ret = -ENOSYS; 538 break; 539 } 540 if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) { 541 /* More queues than indicator bits --> trigger a reject */ 542 ret = -ENOSYS; 543 break; 544 } 545 if (!ccw.cda) { 546 ret = -EFAULT; 547 } else { 548 ccw_dstream_read(&sch->cds, indicators); 549 be64_to_cpus(&indicators); 550 dev->indicators = get_indicator(indicators, sizeof(uint64_t)); 551 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 552 ret = 0; 553 } 554 break; 555 case CCW_CMD_SET_CONF_IND: 556 if (check_len) { 557 if (ccw.count != sizeof(indicators)) { 558 ret = -EINVAL; 559 break; 560 } 561 } else if (ccw.count < sizeof(indicators)) { 562 /* Can't execute command. */ 563 ret = -EINVAL; 564 break; 565 } 566 if (!ccw.cda) { 567 ret = -EFAULT; 568 } else { 569 ccw_dstream_read(&sch->cds, indicators); 570 be64_to_cpus(&indicators); 571 dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); 572 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 573 ret = 0; 574 } 575 break; 576 case CCW_CMD_READ_VQ_CONF: 577 if (check_len) { 578 if (ccw.count != sizeof(vq_config)) { 579 ret = -EINVAL; 580 break; 581 } 582 } else if (ccw.count < sizeof(vq_config)) { 583 /* Can't execute command. */ 584 ret = -EINVAL; 585 break; 586 } 587 if (!ccw.cda) { 588 ret = -EFAULT; 589 } else { 590 ccw_dstream_read(&sch->cds, vq_config.index); 591 be16_to_cpus(&vq_config.index); 592 if (vq_config.index >= VIRTIO_QUEUE_MAX) { 593 ret = -EINVAL; 594 break; 595 } 596 vq_config.num_max = virtio_queue_get_num(vdev, 597 vq_config.index); 598 cpu_to_be16s(&vq_config.num_max); 599 ccw_dstream_write(&sch->cds, vq_config.num_max); 600 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); 601 ret = 0; 602 } 603 break; 604 case CCW_CMD_SET_IND_ADAPTER: 605 if (check_len) { 606 if (ccw.count != sizeof(thinint)) { 607 ret = -EINVAL; 608 break; 609 } 610 } else if (ccw.count < sizeof(thinint)) { 611 /* Can't execute command. */ 612 ret = -EINVAL; 613 break; 614 } 615 if (!ccw.cda) { 616 ret = -EFAULT; 617 } else if (dev->indicators && !sch->thinint_active) { 618 /* Trigger a command reject. */ 619 ret = -ENOSYS; 620 } else { 621 if (ccw_dstream_read(&sch->cds, thinint)) { 622 ret = -EFAULT; 623 } else { 624 be64_to_cpus(&thinint.ind_bit); 625 be64_to_cpus(&thinint.summary_indicator); 626 be64_to_cpus(&thinint.device_indicator); 627 628 dev->summary_indicator = 629 get_indicator(thinint.summary_indicator, sizeof(uint8_t)); 630 dev->indicators = 631 get_indicator(thinint.device_indicator, 632 thinint.ind_bit / 8 + 1); 633 dev->thinint_isc = thinint.isc; 634 dev->routes.adapter.ind_offset = thinint.ind_bit; 635 dev->routes.adapter.summary_offset = 7; 636 dev->routes.adapter.adapter_id = css_get_adapter_id( 637 CSS_IO_ADAPTER_VIRTIO, 638 dev->thinint_isc); 639 sch->thinint_active = ((dev->indicators != NULL) && 640 (dev->summary_indicator != NULL)); 641 sch->curr_status.scsw.count = ccw.count - sizeof(thinint); 642 ret = 0; 643 } 644 } 645 break; 646 case CCW_CMD_SET_VIRTIO_REV: 647 len = sizeof(revinfo); 648 if (ccw.count < len) { 649 ret = -EINVAL; 650 break; 651 } 652 if (!ccw.cda) { 653 ret = -EFAULT; 654 break; 655 } 656 ccw_dstream_read_buf(&sch->cds, &revinfo, 4); 657 be16_to_cpus(&revinfo.revision); 658 be16_to_cpus(&revinfo.length); 659 if (ccw.count < len + revinfo.length || 660 (check_len && ccw.count > len + revinfo.length)) { 661 ret = -EINVAL; 662 break; 663 } 664 /* 665 * Once we start to support revisions with additional data, we'll 666 * need to fetch it here. Nothing to do for now, though. 667 */ 668 if (dev->revision >= 0 || 669 revinfo.revision > virtio_ccw_rev_max(dev) || 670 (dev->force_revision_1 && !revinfo.revision)) { 671 ret = -ENOSYS; 672 break; 673 } 674 ret = 0; 675 dev->revision = revinfo.revision; 676 break; 677 default: 678 ret = -ENOSYS; 679 break; 680 } 681 return ret; 682 } 683 684 static void virtio_sch_disable_cb(SubchDev *sch) 685 { 686 VirtioCcwDevice *dev = sch->driver_data; 687 688 dev->revision = -1; 689 } 690 691 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) 692 { 693 VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 694 CcwDevice *ccw_dev = CCW_DEVICE(dev); 695 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 696 SubchDev *sch; 697 Error *err = NULL; 698 699 sch = css_create_sch(ccw_dev->devno, errp); 700 if (!sch) { 701 return; 702 } 703 if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) { 704 error_setg(&err, "Invalid value of property max_rev " 705 "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); 706 goto out_err; 707 } 708 709 sch->driver_data = dev; 710 sch->ccw_cb = virtio_ccw_cb; 711 sch->disable_cb = virtio_sch_disable_cb; 712 sch->id.reserved = 0xff; 713 sch->id.cu_type = VIRTIO_CCW_CU_TYPE; 714 sch->do_subchannel_work = do_subchannel_work_virtual; 715 ccw_dev->sch = sch; 716 dev->indicators = NULL; 717 dev->revision = -1; 718 css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); 719 720 trace_virtio_ccw_new_device( 721 sch->cssid, sch->ssid, sch->schid, sch->devno, 722 ccw_dev->devno.valid ? "user-configured" : "auto-configured"); 723 724 if (kvm_enabled() && !kvm_eventfds_enabled()) { 725 dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; 726 } 727 728 if (k->realize) { 729 k->realize(dev, &err); 730 if (err) { 731 goto out_err; 732 } 733 } 734 735 ck->realize(ccw_dev, &err); 736 if (err) { 737 goto out_err; 738 } 739 740 return; 741 742 out_err: 743 error_propagate(errp, err); 744 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 745 ccw_dev->sch = NULL; 746 g_free(sch); 747 } 748 749 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev, Error **errp) 750 { 751 VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 752 CcwDevice *ccw_dev = CCW_DEVICE(dev); 753 SubchDev *sch = ccw_dev->sch; 754 755 if (dc->unrealize) { 756 dc->unrealize(dev, errp); 757 } 758 759 if (sch) { 760 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 761 g_free(sch); 762 ccw_dev->sch = NULL; 763 } 764 if (dev->indicators) { 765 release_indicator(&dev->routes.adapter, dev->indicators); 766 dev->indicators = NULL; 767 } 768 } 769 770 /* DeviceState to VirtioCcwDevice. Note: used on datapath, 771 * be careful and test performance if you change this. 772 */ 773 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d) 774 { 775 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 776 777 return container_of(ccw_dev, VirtioCcwDevice, parent_obj); 778 } 779 780 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, 781 uint8_t to_be_set) 782 { 783 uint8_t ind_old, ind_new; 784 hwaddr len = 1; 785 uint8_t *ind_addr; 786 787 ind_addr = cpu_physical_memory_map(ind_loc, &len, 1); 788 if (!ind_addr) { 789 error_report("%s(%x.%x.%04x): unable to access indicator", 790 __func__, sch->cssid, sch->ssid, sch->schid); 791 return -1; 792 } 793 do { 794 ind_old = *ind_addr; 795 ind_new = ind_old | to_be_set; 796 } while (atomic_cmpxchg(ind_addr, ind_old, ind_new) != ind_old); 797 trace_virtio_ccw_set_ind(ind_loc, ind_old, ind_new); 798 cpu_physical_memory_unmap(ind_addr, len, 1, len); 799 800 return ind_old; 801 } 802 803 static void virtio_ccw_notify(DeviceState *d, uint16_t vector) 804 { 805 VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); 806 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 807 SubchDev *sch = ccw_dev->sch; 808 uint64_t indicators; 809 810 if (vector == VIRTIO_NO_VECTOR) { 811 return; 812 } 813 /* 814 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue 815 * vector == VIRTIO_QUEUE_MAX: configuration change notification 816 * bits beyond that are unused and should never be notified for 817 */ 818 assert(vector <= VIRTIO_QUEUE_MAX); 819 820 if (vector < VIRTIO_QUEUE_MAX) { 821 if (!dev->indicators) { 822 return; 823 } 824 if (sch->thinint_active) { 825 /* 826 * In the adapter interrupt case, indicators points to a 827 * memory area that may be (way) larger than 64 bit and 828 * ind_bit indicates the start of the indicators in a big 829 * endian notation. 830 */ 831 uint64_t ind_bit = dev->routes.adapter.ind_offset; 832 833 virtio_set_ind_atomic(sch, dev->indicators->addr + 834 (ind_bit + vector) / 8, 835 0x80 >> ((ind_bit + vector) % 8)); 836 if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr, 837 0x01)) { 838 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc); 839 } 840 } else { 841 assert(vector < NR_CLASSIC_INDICATOR_BITS); 842 indicators = address_space_ldq(&address_space_memory, 843 dev->indicators->addr, 844 MEMTXATTRS_UNSPECIFIED, 845 NULL); 846 indicators |= 1ULL << vector; 847 address_space_stq(&address_space_memory, dev->indicators->addr, 848 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 849 css_conditional_io_interrupt(sch); 850 } 851 } else { 852 if (!dev->indicators2) { 853 return; 854 } 855 indicators = address_space_ldq(&address_space_memory, 856 dev->indicators2->addr, 857 MEMTXATTRS_UNSPECIFIED, 858 NULL); 859 indicators |= 1ULL; 860 address_space_stq(&address_space_memory, dev->indicators2->addr, 861 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 862 css_conditional_io_interrupt(sch); 863 } 864 } 865 866 static void virtio_ccw_reset(DeviceState *d) 867 { 868 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 869 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 870 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 871 872 virtio_ccw_reset_virtio(dev, vdev); 873 if (vdc->parent_reset) { 874 vdc->parent_reset(d); 875 } 876 } 877 878 static void virtio_ccw_vmstate_change(DeviceState *d, bool running) 879 { 880 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 881 882 if (running) { 883 virtio_ccw_start_ioeventfd(dev); 884 } else { 885 virtio_ccw_stop_ioeventfd(dev); 886 } 887 } 888 889 static bool virtio_ccw_query_guest_notifiers(DeviceState *d) 890 { 891 CcwDevice *dev = CCW_DEVICE(d); 892 893 return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); 894 } 895 896 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) 897 { 898 int r; 899 CcwDevice *ccw_dev = CCW_DEVICE(dev); 900 901 if (!ccw_dev->sch->thinint_active) { 902 return -EINVAL; 903 } 904 905 r = map_indicator(&dev->routes.adapter, dev->summary_indicator); 906 if (r) { 907 return r; 908 } 909 r = map_indicator(&dev->routes.adapter, dev->indicators); 910 if (r) { 911 return r; 912 } 913 dev->routes.adapter.summary_addr = dev->summary_indicator->map; 914 dev->routes.adapter.ind_addr = dev->indicators->map; 915 916 return 0; 917 } 918 919 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs) 920 { 921 int i; 922 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 923 int ret; 924 S390FLICState *fs = s390_get_flic(); 925 S390FLICStateClass *fsc = s390_get_flic_class(fs); 926 927 ret = virtio_ccw_get_mappings(dev); 928 if (ret) { 929 return ret; 930 } 931 for (i = 0; i < nvqs; i++) { 932 if (!virtio_queue_get_num(vdev, i)) { 933 break; 934 } 935 } 936 dev->routes.num_routes = i; 937 return fsc->add_adapter_routes(fs, &dev->routes); 938 } 939 940 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs) 941 { 942 S390FLICState *fs = s390_get_flic(); 943 S390FLICStateClass *fsc = s390_get_flic_class(fs); 944 945 fsc->release_adapter_routes(fs, &dev->routes); 946 } 947 948 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n) 949 { 950 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 951 VirtQueue *vq = virtio_get_queue(vdev, n); 952 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 953 954 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL, 955 dev->routes.gsi[n]); 956 } 957 958 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n) 959 { 960 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 961 VirtQueue *vq = virtio_get_queue(vdev, n); 962 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 963 int ret; 964 965 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier, 966 dev->routes.gsi[n]); 967 assert(ret == 0); 968 } 969 970 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n, 971 bool assign, bool with_irqfd) 972 { 973 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 974 VirtQueue *vq = virtio_get_queue(vdev, n); 975 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 976 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 977 978 if (assign) { 979 int r = event_notifier_init(notifier, 0); 980 981 if (r < 0) { 982 return r; 983 } 984 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 985 if (with_irqfd) { 986 r = virtio_ccw_add_irqfd(dev, n); 987 if (r) { 988 virtio_queue_set_guest_notifier_fd_handler(vq, false, 989 with_irqfd); 990 return r; 991 } 992 } 993 /* 994 * We do not support individual masking for channel devices, so we 995 * need to manually trigger any guest masking callbacks here. 996 */ 997 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 998 k->guest_notifier_mask(vdev, n, false); 999 } 1000 /* get lost events and re-inject */ 1001 if (k->guest_notifier_pending && 1002 k->guest_notifier_pending(vdev, n)) { 1003 event_notifier_set(notifier); 1004 } 1005 } else { 1006 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 1007 k->guest_notifier_mask(vdev, n, true); 1008 } 1009 if (with_irqfd) { 1010 virtio_ccw_remove_irqfd(dev, n); 1011 } 1012 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 1013 event_notifier_cleanup(notifier); 1014 } 1015 return 0; 1016 } 1017 1018 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs, 1019 bool assigned) 1020 { 1021 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1022 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1023 CcwDevice *ccw_dev = CCW_DEVICE(d); 1024 bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled(); 1025 int r, n; 1026 1027 if (with_irqfd && assigned) { 1028 /* irq routes need to be set up before assigning irqfds */ 1029 r = virtio_ccw_setup_irqroutes(dev, nvqs); 1030 if (r < 0) { 1031 goto irqroute_error; 1032 } 1033 } 1034 for (n = 0; n < nvqs; n++) { 1035 if (!virtio_queue_get_num(vdev, n)) { 1036 break; 1037 } 1038 r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd); 1039 if (r < 0) { 1040 goto assign_error; 1041 } 1042 } 1043 if (with_irqfd && !assigned) { 1044 /* release irq routes after irqfds have been released */ 1045 virtio_ccw_release_irqroutes(dev, nvqs); 1046 } 1047 return 0; 1048 1049 assign_error: 1050 while (--n >= 0) { 1051 virtio_ccw_set_guest_notifier(dev, n, !assigned, false); 1052 } 1053 irqroute_error: 1054 if (with_irqfd && assigned) { 1055 virtio_ccw_release_irqroutes(dev, nvqs); 1056 } 1057 return r; 1058 } 1059 1060 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f) 1061 { 1062 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1063 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1064 1065 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 1066 } 1067 1068 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) 1069 { 1070 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1071 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1072 uint16_t vector; 1073 1074 qemu_get_be16s(f, &vector); 1075 virtio_queue_set_vector(vdev, n , vector); 1076 1077 return 0; 1078 } 1079 1080 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) 1081 { 1082 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1083 vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); 1084 } 1085 1086 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) 1087 { 1088 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1089 return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); 1090 } 1091 1092 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) 1093 { 1094 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1095 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1096 1097 if (dev->max_rev >= 1) { 1098 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1099 } 1100 } 1101 1102 /* This is called by virtio-bus just after the device is plugged. */ 1103 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) 1104 { 1105 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1106 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1107 CcwDevice *ccw_dev = CCW_DEVICE(d); 1108 SubchDev *sch = ccw_dev->sch; 1109 int n = virtio_get_num_queues(vdev); 1110 S390FLICState *flic = s390_get_flic(); 1111 1112 if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1113 dev->max_rev = 0; 1114 } 1115 1116 if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) { 1117 error_setg(errp, "The number of virtqueues %d " 1118 "exceeds virtio limit %d", n, 1119 VIRTIO_QUEUE_MAX); 1120 return; 1121 } 1122 if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) { 1123 error_setg(errp, "The number of virtqueues %d " 1124 "exceeds flic adapter route limit %d", n, 1125 flic->adapter_routes_max_batch); 1126 return; 1127 } 1128 1129 sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus); 1130 1131 1132 css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1133 d->hotplugged, 1); 1134 } 1135 1136 static void virtio_ccw_device_unplugged(DeviceState *d) 1137 { 1138 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1139 1140 virtio_ccw_stop_ioeventfd(dev); 1141 } 1142 /**************** Virtio-ccw Bus Device Descriptions *******************/ 1143 1144 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp) 1145 { 1146 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1147 1148 virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev); 1149 virtio_ccw_device_realize(_dev, errp); 1150 } 1151 1152 static void virtio_ccw_busdev_unrealize(DeviceState *dev, Error **errp) 1153 { 1154 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1155 1156 virtio_ccw_device_unrealize(_dev, errp); 1157 } 1158 1159 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, 1160 DeviceState *dev, Error **errp) 1161 { 1162 VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev); 1163 1164 virtio_ccw_stop_ioeventfd(_dev); 1165 } 1166 1167 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) 1168 { 1169 DeviceClass *dc = DEVICE_CLASS(klass); 1170 CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); 1171 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass); 1172 1173 k->unplug = virtio_ccw_busdev_unplug; 1174 dc->realize = virtio_ccw_busdev_realize; 1175 dc->unrealize = virtio_ccw_busdev_unrealize; 1176 dc->bus_type = TYPE_VIRTUAL_CSS_BUS; 1177 device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset); 1178 } 1179 1180 static const TypeInfo virtio_ccw_device_info = { 1181 .name = TYPE_VIRTIO_CCW_DEVICE, 1182 .parent = TYPE_CCW_DEVICE, 1183 .instance_size = sizeof(VirtioCcwDevice), 1184 .class_init = virtio_ccw_device_class_init, 1185 .class_size = sizeof(VirtIOCCWDeviceClass), 1186 .abstract = true, 1187 }; 1188 1189 /* virtio-ccw-bus */ 1190 1191 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 1192 VirtioCcwDevice *dev) 1193 { 1194 DeviceState *qdev = DEVICE(dev); 1195 char virtio_bus_name[] = "virtio-bus"; 1196 1197 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS, 1198 qdev, virtio_bus_name); 1199 } 1200 1201 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) 1202 { 1203 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 1204 BusClass *bus_class = BUS_CLASS(klass); 1205 1206 bus_class->max_dev = 1; 1207 k->notify = virtio_ccw_notify; 1208 k->vmstate_change = virtio_ccw_vmstate_change; 1209 k->query_guest_notifiers = virtio_ccw_query_guest_notifiers; 1210 k->set_guest_notifiers = virtio_ccw_set_guest_notifiers; 1211 k->save_queue = virtio_ccw_save_queue; 1212 k->load_queue = virtio_ccw_load_queue; 1213 k->save_config = virtio_ccw_save_config; 1214 k->load_config = virtio_ccw_load_config; 1215 k->pre_plugged = virtio_ccw_pre_plugged; 1216 k->device_plugged = virtio_ccw_device_plugged; 1217 k->device_unplugged = virtio_ccw_device_unplugged; 1218 k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled; 1219 k->ioeventfd_assign = virtio_ccw_ioeventfd_assign; 1220 } 1221 1222 static const TypeInfo virtio_ccw_bus_info = { 1223 .name = TYPE_VIRTIO_CCW_BUS, 1224 .parent = TYPE_VIRTIO_BUS, 1225 .instance_size = sizeof(VirtioCcwBusState), 1226 .class_init = virtio_ccw_bus_class_init, 1227 }; 1228 1229 static void virtio_ccw_register(void) 1230 { 1231 type_register_static(&virtio_ccw_bus_info); 1232 type_register_static(&virtio_ccw_device_info); 1233 } 1234 1235 type_init(virtio_ccw_register) 1236