1 /* 2 * virtio ccw target implementation 3 * 4 * Copyright 2012,2015 IBM Corp. 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Pierre Morel <pmorel@linux.vnet.ibm.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or (at 9 * your option) any later version. See the COPYING file in the top-level 10 * directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "sysemu/kvm.h" 16 #include "net/net.h" 17 #include "hw/virtio/virtio.h" 18 #include "migration/qemu-file-types.h" 19 #include "hw/virtio/virtio-net.h" 20 #include "hw/sysbus.h" 21 #include "qemu/bitops.h" 22 #include "qemu/error-report.h" 23 #include "qemu/module.h" 24 #include "hw/virtio/virtio-access.h" 25 #include "hw/virtio/virtio-bus.h" 26 #include "hw/s390x/adapter.h" 27 #include "hw/s390x/s390_flic.h" 28 29 #include "hw/s390x/ioinst.h" 30 #include "hw/s390x/css.h" 31 #include "virtio-ccw.h" 32 #include "trace.h" 33 #include "hw/s390x/css-bridge.h" 34 #include "hw/s390x/s390-virtio-ccw.h" 35 36 #define NR_CLASSIC_INDICATOR_BITS 64 37 38 static int virtio_ccw_dev_post_load(void *opaque, int version_id) 39 { 40 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque); 41 CcwDevice *ccw_dev = CCW_DEVICE(dev); 42 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 43 44 ccw_dev->sch->driver_data = dev; 45 if (ccw_dev->sch->thinint_active) { 46 dev->routes.adapter.adapter_id = css_get_adapter_id( 47 CSS_IO_ADAPTER_VIRTIO, 48 dev->thinint_isc); 49 } 50 /* Re-fill subch_id after loading the subchannel states.*/ 51 if (ck->refill_ids) { 52 ck->refill_ids(ccw_dev); 53 } 54 return 0; 55 } 56 57 typedef struct VirtioCcwDeviceTmp { 58 VirtioCcwDevice *parent; 59 uint16_t config_vector; 60 } VirtioCcwDeviceTmp; 61 62 static int virtio_ccw_dev_tmp_pre_save(void *opaque) 63 { 64 VirtioCcwDeviceTmp *tmp = opaque; 65 VirtioCcwDevice *dev = tmp->parent; 66 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 67 68 tmp->config_vector = vdev->config_vector; 69 70 return 0; 71 } 72 73 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id) 74 { 75 VirtioCcwDeviceTmp *tmp = opaque; 76 VirtioCcwDevice *dev = tmp->parent; 77 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 78 79 vdev->config_vector = tmp->config_vector; 80 return 0; 81 } 82 83 const VMStateDescription vmstate_virtio_ccw_dev_tmp = { 84 .name = "s390_virtio_ccw_dev_tmp", 85 .pre_save = virtio_ccw_dev_tmp_pre_save, 86 .post_load = virtio_ccw_dev_tmp_post_load, 87 .fields = (VMStateField[]) { 88 VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), 89 VMSTATE_END_OF_LIST() 90 } 91 }; 92 93 const VMStateDescription vmstate_virtio_ccw_dev = { 94 .name = "s390_virtio_ccw_dev", 95 .version_id = 1, 96 .minimum_version_id = 1, 97 .post_load = virtio_ccw_dev_post_load, 98 .fields = (VMStateField[]) { 99 VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), 100 VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), 101 VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), 102 VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice), 103 /* 104 * Ugly hack because VirtIODevice does not migrate itself. 105 * This also makes legacy via vmstate_save_state possible. 106 */ 107 VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp, 108 vmstate_virtio_ccw_dev_tmp), 109 VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes, 110 AdapterRoutes), 111 VMSTATE_UINT8(thinint_isc, VirtioCcwDevice), 112 VMSTATE_INT32(revision, VirtioCcwDevice), 113 VMSTATE_END_OF_LIST() 114 } 115 }; 116 117 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 118 VirtioCcwDevice *dev); 119 120 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) 121 { 122 VirtIODevice *vdev = NULL; 123 VirtioCcwDevice *dev = sch->driver_data; 124 125 if (dev) { 126 vdev = virtio_bus_get_device(&dev->bus); 127 } 128 return vdev; 129 } 130 131 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) 132 { 133 virtio_bus_start_ioeventfd(&dev->bus); 134 } 135 136 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) 137 { 138 virtio_bus_stop_ioeventfd(&dev->bus); 139 } 140 141 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d) 142 { 143 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 144 145 return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0; 146 } 147 148 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 149 int n, bool assign) 150 { 151 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 152 CcwDevice *ccw_dev = CCW_DEVICE(dev); 153 SubchDev *sch = ccw_dev->sch; 154 uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; 155 156 return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); 157 } 158 159 /* Communication blocks used by several channel commands. */ 160 typedef struct VqInfoBlockLegacy { 161 uint64_t queue; 162 uint32_t align; 163 uint16_t index; 164 uint16_t num; 165 } QEMU_PACKED VqInfoBlockLegacy; 166 167 typedef struct VqInfoBlock { 168 uint64_t desc; 169 uint32_t res0; 170 uint16_t index; 171 uint16_t num; 172 uint64_t avail; 173 uint64_t used; 174 } QEMU_PACKED VqInfoBlock; 175 176 typedef struct VqConfigBlock { 177 uint16_t index; 178 uint16_t num_max; 179 } QEMU_PACKED VqConfigBlock; 180 181 typedef struct VirtioFeatDesc { 182 uint32_t features; 183 uint8_t index; 184 } QEMU_PACKED VirtioFeatDesc; 185 186 typedef struct VirtioThinintInfo { 187 hwaddr summary_indicator; 188 hwaddr device_indicator; 189 uint64_t ind_bit; 190 uint8_t isc; 191 } QEMU_PACKED VirtioThinintInfo; 192 193 typedef struct VirtioRevInfo { 194 uint16_t revision; 195 uint16_t length; 196 uint8_t data[]; 197 } QEMU_PACKED VirtioRevInfo; 198 199 /* Specify where the virtqueues for the subchannel are in guest memory. */ 200 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, 201 VqInfoBlockLegacy *linfo) 202 { 203 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 204 uint16_t index = info ? info->index : linfo->index; 205 uint16_t num = info ? info->num : linfo->num; 206 uint64_t desc = info ? info->desc : linfo->queue; 207 208 if (index >= VIRTIO_QUEUE_MAX) { 209 return -EINVAL; 210 } 211 212 /* Current code in virtio.c relies on 4K alignment. */ 213 if (linfo && desc && (linfo->align != 4096)) { 214 return -EINVAL; 215 } 216 217 if (!vdev) { 218 return -EINVAL; 219 } 220 221 if (info) { 222 virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); 223 } else { 224 virtio_queue_set_addr(vdev, index, desc); 225 } 226 if (!desc) { 227 virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); 228 } else { 229 if (info) { 230 /* virtio-1 allows changing the ring size. */ 231 if (virtio_queue_get_max_num(vdev, index) < num) { 232 /* Fail if we exceed the maximum number. */ 233 return -EINVAL; 234 } 235 virtio_queue_set_num(vdev, index, num); 236 } else if (virtio_queue_get_num(vdev, index) > num) { 237 /* Fail if we don't have a big enough queue. */ 238 return -EINVAL; 239 } 240 /* We ignore possible increased num for legacy for compatibility. */ 241 virtio_queue_set_vector(vdev, index, index); 242 } 243 /* tell notify handler in case of config change */ 244 vdev->config_vector = VIRTIO_QUEUE_MAX; 245 return 0; 246 } 247 248 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) 249 { 250 CcwDevice *ccw_dev = CCW_DEVICE(dev); 251 252 virtio_ccw_stop_ioeventfd(dev); 253 virtio_reset(vdev); 254 if (dev->indicators) { 255 release_indicator(&dev->routes.adapter, dev->indicators); 256 dev->indicators = NULL; 257 } 258 if (dev->indicators2) { 259 release_indicator(&dev->routes.adapter, dev->indicators2); 260 dev->indicators2 = NULL; 261 } 262 if (dev->summary_indicator) { 263 release_indicator(&dev->routes.adapter, dev->summary_indicator); 264 dev->summary_indicator = NULL; 265 } 266 ccw_dev->sch->thinint_active = false; 267 } 268 269 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, 270 bool is_legacy) 271 { 272 int ret; 273 VqInfoBlock info; 274 VqInfoBlockLegacy linfo; 275 size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info); 276 277 if (check_len) { 278 if (ccw.count != info_len) { 279 return -EINVAL; 280 } 281 } else if (ccw.count < info_len) { 282 /* Can't execute command. */ 283 return -EINVAL; 284 } 285 if (!ccw.cda) { 286 return -EFAULT; 287 } 288 if (is_legacy) { 289 ccw_dstream_read(&sch->cds, linfo); 290 linfo.queue = be64_to_cpu(linfo.queue); 291 linfo.align = be32_to_cpu(linfo.align); 292 linfo.index = be16_to_cpu(linfo.index); 293 linfo.num = be16_to_cpu(linfo.num); 294 ret = virtio_ccw_set_vqs(sch, NULL, &linfo); 295 } else { 296 ccw_dstream_read(&sch->cds, info); 297 info.desc = be64_to_cpu(info.desc); 298 info.index = be16_to_cpu(info.index); 299 info.num = be16_to_cpu(info.num); 300 info.avail = be64_to_cpu(info.avail); 301 info.used = be64_to_cpu(info.used); 302 ret = virtio_ccw_set_vqs(sch, &info, NULL); 303 } 304 sch->curr_status.scsw.count = 0; 305 return ret; 306 } 307 308 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) 309 { 310 int ret; 311 VirtioRevInfo revinfo; 312 uint8_t status; 313 VirtioFeatDesc features; 314 hwaddr indicators; 315 VqConfigBlock vq_config; 316 VirtioCcwDevice *dev = sch->driver_data; 317 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 318 bool check_len; 319 int len; 320 VirtioThinintInfo thinint; 321 322 if (!dev) { 323 return -EINVAL; 324 } 325 326 trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, 327 ccw.cmd_code); 328 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); 329 330 if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) { 331 if (dev->force_revision_1) { 332 /* 333 * virtio-1 drivers must start with negotiating to a revision >= 1, 334 * so post a command reject for all other commands 335 */ 336 return -ENOSYS; 337 } else { 338 /* 339 * If the driver issues any command that is not SET_VIRTIO_REV, 340 * we'll have to operate the device in legacy mode. 341 */ 342 dev->revision = 0; 343 } 344 } 345 346 /* Look at the command. */ 347 switch (ccw.cmd_code) { 348 case CCW_CMD_SET_VQ: 349 ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1); 350 break; 351 case CCW_CMD_VDEV_RESET: 352 virtio_ccw_reset_virtio(dev, vdev); 353 ret = 0; 354 break; 355 case CCW_CMD_READ_FEAT: 356 if (check_len) { 357 if (ccw.count != sizeof(features)) { 358 ret = -EINVAL; 359 break; 360 } 361 } else if (ccw.count < sizeof(features)) { 362 /* Can't execute command. */ 363 ret = -EINVAL; 364 break; 365 } 366 if (!ccw.cda) { 367 ret = -EFAULT; 368 } else { 369 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 370 371 ccw_dstream_advance(&sch->cds, sizeof(features.features)); 372 ccw_dstream_read(&sch->cds, features.index); 373 if (features.index == 0) { 374 if (dev->revision >= 1) { 375 /* Don't offer legacy features for modern devices. */ 376 features.features = (uint32_t) 377 (vdev->host_features & ~vdc->legacy_features); 378 } else { 379 features.features = (uint32_t)vdev->host_features; 380 } 381 } else if ((features.index == 1) && (dev->revision >= 1)) { 382 /* 383 * Only offer feature bits beyond 31 if the guest has 384 * negotiated at least revision 1. 385 */ 386 features.features = (uint32_t)(vdev->host_features >> 32); 387 } else { 388 /* Return zeroes if the guest supports more feature bits. */ 389 features.features = 0; 390 } 391 ccw_dstream_rewind(&sch->cds); 392 features.features = cpu_to_le32(features.features); 393 ccw_dstream_write(&sch->cds, features.features); 394 sch->curr_status.scsw.count = ccw.count - sizeof(features); 395 ret = 0; 396 } 397 break; 398 case CCW_CMD_WRITE_FEAT: 399 if (check_len) { 400 if (ccw.count != sizeof(features)) { 401 ret = -EINVAL; 402 break; 403 } 404 } else if (ccw.count < sizeof(features)) { 405 /* Can't execute command. */ 406 ret = -EINVAL; 407 break; 408 } 409 if (!ccw.cda) { 410 ret = -EFAULT; 411 } else { 412 ccw_dstream_read(&sch->cds, features); 413 features.features = le32_to_cpu(features.features); 414 if (features.index == 0) { 415 virtio_set_features(vdev, 416 (vdev->guest_features & 0xffffffff00000000ULL) | 417 features.features); 418 } else if ((features.index == 1) && (dev->revision >= 1)) { 419 /* 420 * If the guest did not negotiate at least revision 1, 421 * we did not offer it any feature bits beyond 31. Such a 422 * guest passing us any bit here is therefore buggy. 423 */ 424 virtio_set_features(vdev, 425 (vdev->guest_features & 0x00000000ffffffffULL) | 426 ((uint64_t)features.features << 32)); 427 } else { 428 /* 429 * If the guest supports more feature bits, assert that it 430 * passes us zeroes for those we don't support. 431 */ 432 if (features.features) { 433 qemu_log_mask(LOG_GUEST_ERROR, 434 "Guest bug: features[%i]=%x (expected 0)", 435 features.index, features.features); 436 /* XXX: do a unit check here? */ 437 } 438 } 439 sch->curr_status.scsw.count = ccw.count - sizeof(features); 440 ret = 0; 441 } 442 break; 443 case CCW_CMD_READ_CONF: 444 if (check_len) { 445 if (ccw.count > vdev->config_len) { 446 ret = -EINVAL; 447 break; 448 } 449 } 450 len = MIN(ccw.count, vdev->config_len); 451 if (!ccw.cda) { 452 ret = -EFAULT; 453 } else { 454 virtio_bus_get_vdev_config(&dev->bus, vdev->config); 455 ccw_dstream_write_buf(&sch->cds, vdev->config, len); 456 sch->curr_status.scsw.count = ccw.count - len; 457 ret = 0; 458 } 459 break; 460 case CCW_CMD_WRITE_CONF: 461 if (check_len) { 462 if (ccw.count > vdev->config_len) { 463 ret = -EINVAL; 464 break; 465 } 466 } 467 len = MIN(ccw.count, vdev->config_len); 468 if (!ccw.cda) { 469 ret = -EFAULT; 470 } else { 471 ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len); 472 if (!ret) { 473 virtio_bus_set_vdev_config(&dev->bus, vdev->config); 474 sch->curr_status.scsw.count = ccw.count - len; 475 } 476 } 477 break; 478 case CCW_CMD_READ_STATUS: 479 if (check_len) { 480 if (ccw.count != sizeof(status)) { 481 ret = -EINVAL; 482 break; 483 } 484 } else if (ccw.count < sizeof(status)) { 485 /* Can't execute command. */ 486 ret = -EINVAL; 487 break; 488 } 489 if (!ccw.cda) { 490 ret = -EFAULT; 491 } else { 492 address_space_stb(&address_space_memory, ccw.cda, vdev->status, 493 MEMTXATTRS_UNSPECIFIED, NULL); 494 sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status); 495 ret = 0; 496 } 497 break; 498 case CCW_CMD_WRITE_STATUS: 499 if (check_len) { 500 if (ccw.count != sizeof(status)) { 501 ret = -EINVAL; 502 break; 503 } 504 } else if (ccw.count < sizeof(status)) { 505 /* Can't execute command. */ 506 ret = -EINVAL; 507 break; 508 } 509 if (!ccw.cda) { 510 ret = -EFAULT; 511 } else { 512 ccw_dstream_read(&sch->cds, status); 513 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { 514 virtio_ccw_stop_ioeventfd(dev); 515 } 516 if (virtio_set_status(vdev, status) == 0) { 517 if (vdev->status == 0) { 518 virtio_ccw_reset_virtio(dev, vdev); 519 } 520 if (status & VIRTIO_CONFIG_S_DRIVER_OK) { 521 virtio_ccw_start_ioeventfd(dev); 522 } 523 sch->curr_status.scsw.count = ccw.count - sizeof(status); 524 ret = 0; 525 } else { 526 /* Trigger a command reject. */ 527 ret = -ENOSYS; 528 } 529 } 530 break; 531 case CCW_CMD_SET_IND: 532 if (check_len) { 533 if (ccw.count != sizeof(indicators)) { 534 ret = -EINVAL; 535 break; 536 } 537 } else if (ccw.count < sizeof(indicators)) { 538 /* Can't execute command. */ 539 ret = -EINVAL; 540 break; 541 } 542 if (sch->thinint_active) { 543 /* Trigger a command reject. */ 544 ret = -ENOSYS; 545 break; 546 } 547 if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) { 548 /* More queues than indicator bits --> trigger a reject */ 549 ret = -ENOSYS; 550 break; 551 } 552 if (!ccw.cda) { 553 ret = -EFAULT; 554 } else { 555 ccw_dstream_read(&sch->cds, indicators); 556 indicators = be64_to_cpu(indicators); 557 dev->indicators = get_indicator(indicators, sizeof(uint64_t)); 558 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 559 ret = 0; 560 } 561 break; 562 case CCW_CMD_SET_CONF_IND: 563 if (check_len) { 564 if (ccw.count != sizeof(indicators)) { 565 ret = -EINVAL; 566 break; 567 } 568 } else if (ccw.count < sizeof(indicators)) { 569 /* Can't execute command. */ 570 ret = -EINVAL; 571 break; 572 } 573 if (!ccw.cda) { 574 ret = -EFAULT; 575 } else { 576 ccw_dstream_read(&sch->cds, indicators); 577 indicators = be64_to_cpu(indicators); 578 dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); 579 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 580 ret = 0; 581 } 582 break; 583 case CCW_CMD_READ_VQ_CONF: 584 if (check_len) { 585 if (ccw.count != sizeof(vq_config)) { 586 ret = -EINVAL; 587 break; 588 } 589 } else if (ccw.count < sizeof(vq_config)) { 590 /* Can't execute command. */ 591 ret = -EINVAL; 592 break; 593 } 594 if (!ccw.cda) { 595 ret = -EFAULT; 596 } else { 597 ccw_dstream_read(&sch->cds, vq_config.index); 598 vq_config.index = be16_to_cpu(vq_config.index); 599 if (vq_config.index >= VIRTIO_QUEUE_MAX) { 600 ret = -EINVAL; 601 break; 602 } 603 vq_config.num_max = virtio_queue_get_num(vdev, 604 vq_config.index); 605 vq_config.num_max = cpu_to_be16(vq_config.num_max); 606 ccw_dstream_write(&sch->cds, vq_config.num_max); 607 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); 608 ret = 0; 609 } 610 break; 611 case CCW_CMD_SET_IND_ADAPTER: 612 if (check_len) { 613 if (ccw.count != sizeof(thinint)) { 614 ret = -EINVAL; 615 break; 616 } 617 } else if (ccw.count < sizeof(thinint)) { 618 /* Can't execute command. */ 619 ret = -EINVAL; 620 break; 621 } 622 if (!ccw.cda) { 623 ret = -EFAULT; 624 } else if (dev->indicators && !sch->thinint_active) { 625 /* Trigger a command reject. */ 626 ret = -ENOSYS; 627 } else { 628 if (ccw_dstream_read(&sch->cds, thinint)) { 629 ret = -EFAULT; 630 } else { 631 thinint.ind_bit = be64_to_cpu(thinint.ind_bit); 632 thinint.summary_indicator = 633 be64_to_cpu(thinint.summary_indicator); 634 thinint.device_indicator = 635 be64_to_cpu(thinint.device_indicator); 636 637 dev->summary_indicator = 638 get_indicator(thinint.summary_indicator, sizeof(uint8_t)); 639 dev->indicators = 640 get_indicator(thinint.device_indicator, 641 thinint.ind_bit / 8 + 1); 642 dev->thinint_isc = thinint.isc; 643 dev->routes.adapter.ind_offset = thinint.ind_bit; 644 dev->routes.adapter.summary_offset = 7; 645 dev->routes.adapter.adapter_id = css_get_adapter_id( 646 CSS_IO_ADAPTER_VIRTIO, 647 dev->thinint_isc); 648 sch->thinint_active = ((dev->indicators != NULL) && 649 (dev->summary_indicator != NULL)); 650 sch->curr_status.scsw.count = ccw.count - sizeof(thinint); 651 ret = 0; 652 } 653 } 654 break; 655 case CCW_CMD_SET_VIRTIO_REV: 656 len = sizeof(revinfo); 657 if (ccw.count < len) { 658 ret = -EINVAL; 659 break; 660 } 661 if (!ccw.cda) { 662 ret = -EFAULT; 663 break; 664 } 665 ccw_dstream_read_buf(&sch->cds, &revinfo, 4); 666 revinfo.revision = be16_to_cpu(revinfo.revision); 667 revinfo.length = be16_to_cpu(revinfo.length); 668 if (ccw.count < len + revinfo.length || 669 (check_len && ccw.count > len + revinfo.length)) { 670 ret = -EINVAL; 671 break; 672 } 673 /* 674 * Once we start to support revisions with additional data, we'll 675 * need to fetch it here. Nothing to do for now, though. 676 */ 677 if (dev->revision >= 0 || 678 revinfo.revision > virtio_ccw_rev_max(dev) || 679 (dev->force_revision_1 && !revinfo.revision)) { 680 ret = -ENOSYS; 681 break; 682 } 683 ret = 0; 684 dev->revision = revinfo.revision; 685 break; 686 default: 687 ret = -ENOSYS; 688 break; 689 } 690 return ret; 691 } 692 693 static void virtio_sch_disable_cb(SubchDev *sch) 694 { 695 VirtioCcwDevice *dev = sch->driver_data; 696 697 dev->revision = -1; 698 } 699 700 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) 701 { 702 VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 703 CcwDevice *ccw_dev = CCW_DEVICE(dev); 704 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 705 SubchDev *sch; 706 Error *err = NULL; 707 int i; 708 709 sch = css_create_sch(ccw_dev->devno, errp); 710 if (!sch) { 711 return; 712 } 713 if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) { 714 error_setg(&err, "Invalid value of property max_rev " 715 "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); 716 goto out_err; 717 } 718 719 sch->driver_data = dev; 720 sch->ccw_cb = virtio_ccw_cb; 721 sch->disable_cb = virtio_sch_disable_cb; 722 sch->id.reserved = 0xff; 723 sch->id.cu_type = VIRTIO_CCW_CU_TYPE; 724 sch->do_subchannel_work = do_subchannel_work_virtual; 725 ccw_dev->sch = sch; 726 dev->indicators = NULL; 727 dev->revision = -1; 728 for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) { 729 dev->routes.gsi[i] = -1; 730 } 731 css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); 732 733 trace_virtio_ccw_new_device( 734 sch->cssid, sch->ssid, sch->schid, sch->devno, 735 ccw_dev->devno.valid ? "user-configured" : "auto-configured"); 736 737 if (kvm_enabled() && !kvm_eventfds_enabled()) { 738 dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; 739 } 740 741 if (k->realize) { 742 k->realize(dev, &err); 743 if (err) { 744 goto out_err; 745 } 746 } 747 748 ck->realize(ccw_dev, &err); 749 if (err) { 750 goto out_err; 751 } 752 753 return; 754 755 out_err: 756 error_propagate(errp, err); 757 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 758 ccw_dev->sch = NULL; 759 g_free(sch); 760 } 761 762 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev) 763 { 764 VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 765 CcwDevice *ccw_dev = CCW_DEVICE(dev); 766 SubchDev *sch = ccw_dev->sch; 767 768 if (dc->unrealize) { 769 dc->unrealize(dev); 770 } 771 772 if (sch) { 773 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 774 g_free(sch); 775 ccw_dev->sch = NULL; 776 } 777 if (dev->indicators) { 778 release_indicator(&dev->routes.adapter, dev->indicators); 779 dev->indicators = NULL; 780 } 781 } 782 783 /* DeviceState to VirtioCcwDevice. Note: used on datapath, 784 * be careful and test performance if you change this. 785 */ 786 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d) 787 { 788 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 789 790 return container_of(ccw_dev, VirtioCcwDevice, parent_obj); 791 } 792 793 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, 794 uint8_t to_be_set) 795 { 796 uint8_t expected, actual; 797 hwaddr len = 1; 798 /* avoid multiple fetches */ 799 uint8_t volatile *ind_addr; 800 801 ind_addr = cpu_physical_memory_map(ind_loc, &len, true); 802 if (!ind_addr) { 803 error_report("%s(%x.%x.%04x): unable to access indicator", 804 __func__, sch->cssid, sch->ssid, sch->schid); 805 return -1; 806 } 807 actual = *ind_addr; 808 do { 809 expected = actual; 810 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); 811 } while (actual != expected); 812 trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set); 813 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); 814 815 return actual; 816 } 817 818 static void virtio_ccw_notify(DeviceState *d, uint16_t vector) 819 { 820 VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); 821 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 822 SubchDev *sch = ccw_dev->sch; 823 uint64_t indicators; 824 825 if (vector == VIRTIO_NO_VECTOR) { 826 return; 827 } 828 /* 829 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue 830 * vector == VIRTIO_QUEUE_MAX: configuration change notification 831 * bits beyond that are unused and should never be notified for 832 */ 833 assert(vector <= VIRTIO_QUEUE_MAX); 834 835 if (vector < VIRTIO_QUEUE_MAX) { 836 if (!dev->indicators) { 837 return; 838 } 839 if (sch->thinint_active) { 840 /* 841 * In the adapter interrupt case, indicators points to a 842 * memory area that may be (way) larger than 64 bit and 843 * ind_bit indicates the start of the indicators in a big 844 * endian notation. 845 */ 846 uint64_t ind_bit = dev->routes.adapter.ind_offset; 847 848 virtio_set_ind_atomic(sch, dev->indicators->addr + 849 (ind_bit + vector) / 8, 850 0x80 >> ((ind_bit + vector) % 8)); 851 if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr, 852 0x01)) { 853 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc); 854 } 855 } else { 856 assert(vector < NR_CLASSIC_INDICATOR_BITS); 857 indicators = address_space_ldq(&address_space_memory, 858 dev->indicators->addr, 859 MEMTXATTRS_UNSPECIFIED, 860 NULL); 861 indicators |= 1ULL << vector; 862 address_space_stq(&address_space_memory, dev->indicators->addr, 863 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 864 css_conditional_io_interrupt(sch); 865 } 866 } else { 867 if (!dev->indicators2) { 868 return; 869 } 870 indicators = address_space_ldq(&address_space_memory, 871 dev->indicators2->addr, 872 MEMTXATTRS_UNSPECIFIED, 873 NULL); 874 indicators |= 1ULL; 875 address_space_stq(&address_space_memory, dev->indicators2->addr, 876 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 877 css_conditional_io_interrupt(sch); 878 } 879 } 880 881 static void virtio_ccw_reset(DeviceState *d) 882 { 883 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 884 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 885 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 886 887 virtio_ccw_reset_virtio(dev, vdev); 888 if (vdc->parent_reset) { 889 vdc->parent_reset(d); 890 } 891 } 892 893 static void virtio_ccw_vmstate_change(DeviceState *d, bool running) 894 { 895 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 896 897 if (running) { 898 virtio_ccw_start_ioeventfd(dev); 899 } else { 900 virtio_ccw_stop_ioeventfd(dev); 901 } 902 } 903 904 static bool virtio_ccw_query_guest_notifiers(DeviceState *d) 905 { 906 CcwDevice *dev = CCW_DEVICE(d); 907 908 return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); 909 } 910 911 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) 912 { 913 int r; 914 CcwDevice *ccw_dev = CCW_DEVICE(dev); 915 916 if (!ccw_dev->sch->thinint_active) { 917 return -EINVAL; 918 } 919 920 r = map_indicator(&dev->routes.adapter, dev->summary_indicator); 921 if (r) { 922 return r; 923 } 924 r = map_indicator(&dev->routes.adapter, dev->indicators); 925 if (r) { 926 return r; 927 } 928 dev->routes.adapter.summary_addr = dev->summary_indicator->map; 929 dev->routes.adapter.ind_addr = dev->indicators->map; 930 931 return 0; 932 } 933 934 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs) 935 { 936 int i; 937 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 938 int ret; 939 S390FLICState *fs = s390_get_flic(); 940 S390FLICStateClass *fsc = s390_get_flic_class(fs); 941 942 ret = virtio_ccw_get_mappings(dev); 943 if (ret) { 944 return ret; 945 } 946 for (i = 0; i < nvqs; i++) { 947 if (!virtio_queue_get_num(vdev, i)) { 948 break; 949 } 950 } 951 dev->routes.num_routes = i; 952 return fsc->add_adapter_routes(fs, &dev->routes); 953 } 954 955 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs) 956 { 957 S390FLICState *fs = s390_get_flic(); 958 S390FLICStateClass *fsc = s390_get_flic_class(fs); 959 960 fsc->release_adapter_routes(fs, &dev->routes); 961 } 962 963 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n) 964 { 965 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 966 VirtQueue *vq = virtio_get_queue(vdev, n); 967 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 968 969 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL, 970 dev->routes.gsi[n]); 971 } 972 973 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n) 974 { 975 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 976 VirtQueue *vq = virtio_get_queue(vdev, n); 977 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 978 int ret; 979 980 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier, 981 dev->routes.gsi[n]); 982 assert(ret == 0); 983 } 984 985 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n, 986 bool assign, bool with_irqfd) 987 { 988 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 989 VirtQueue *vq = virtio_get_queue(vdev, n); 990 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 991 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 992 993 if (assign) { 994 int r = event_notifier_init(notifier, 0); 995 996 if (r < 0) { 997 return r; 998 } 999 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 1000 if (with_irqfd) { 1001 r = virtio_ccw_add_irqfd(dev, n); 1002 if (r) { 1003 virtio_queue_set_guest_notifier_fd_handler(vq, false, 1004 with_irqfd); 1005 return r; 1006 } 1007 } 1008 /* 1009 * We do not support individual masking for channel devices, so we 1010 * need to manually trigger any guest masking callbacks here. 1011 */ 1012 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 1013 k->guest_notifier_mask(vdev, n, false); 1014 } 1015 /* get lost events and re-inject */ 1016 if (k->guest_notifier_pending && 1017 k->guest_notifier_pending(vdev, n)) { 1018 event_notifier_set(notifier); 1019 } 1020 } else { 1021 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 1022 k->guest_notifier_mask(vdev, n, true); 1023 } 1024 if (with_irqfd) { 1025 virtio_ccw_remove_irqfd(dev, n); 1026 } 1027 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 1028 event_notifier_cleanup(notifier); 1029 } 1030 return 0; 1031 } 1032 1033 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs, 1034 bool assigned) 1035 { 1036 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1037 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1038 CcwDevice *ccw_dev = CCW_DEVICE(d); 1039 bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled(); 1040 int r, n; 1041 1042 if (with_irqfd && assigned) { 1043 /* irq routes need to be set up before assigning irqfds */ 1044 r = virtio_ccw_setup_irqroutes(dev, nvqs); 1045 if (r < 0) { 1046 goto irqroute_error; 1047 } 1048 } 1049 for (n = 0; n < nvqs; n++) { 1050 if (!virtio_queue_get_num(vdev, n)) { 1051 break; 1052 } 1053 r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd); 1054 if (r < 0) { 1055 goto assign_error; 1056 } 1057 } 1058 if (with_irqfd && !assigned) { 1059 /* release irq routes after irqfds have been released */ 1060 virtio_ccw_release_irqroutes(dev, nvqs); 1061 } 1062 return 0; 1063 1064 assign_error: 1065 while (--n >= 0) { 1066 virtio_ccw_set_guest_notifier(dev, n, !assigned, false); 1067 } 1068 irqroute_error: 1069 if (with_irqfd && assigned) { 1070 virtio_ccw_release_irqroutes(dev, nvqs); 1071 } 1072 return r; 1073 } 1074 1075 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f) 1076 { 1077 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1078 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1079 1080 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 1081 } 1082 1083 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) 1084 { 1085 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1086 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1087 uint16_t vector; 1088 1089 qemu_get_be16s(f, &vector); 1090 virtio_queue_set_vector(vdev, n , vector); 1091 1092 return 0; 1093 } 1094 1095 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) 1096 { 1097 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1098 vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); 1099 } 1100 1101 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) 1102 { 1103 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1104 return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); 1105 } 1106 1107 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) 1108 { 1109 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1110 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1111 1112 if (dev->max_rev >= 1) { 1113 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1114 } 1115 } 1116 1117 /* This is called by virtio-bus just after the device is plugged. */ 1118 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) 1119 { 1120 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1121 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1122 CcwDevice *ccw_dev = CCW_DEVICE(d); 1123 SubchDev *sch = ccw_dev->sch; 1124 int n = virtio_get_num_queues(vdev); 1125 S390FLICState *flic = s390_get_flic(); 1126 1127 if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1128 dev->max_rev = 0; 1129 } 1130 1131 if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) { 1132 /* 1133 * To avoid migration issues, we allow legacy mode when legacy 1134 * check is disabled in the old machine types (< 5.1). 1135 */ 1136 if (virtio_legacy_check_disabled(vdev)) { 1137 warn_report("device requires revision >= 1, but for backward " 1138 "compatibility max_revision=0 is allowed"); 1139 } else { 1140 error_setg(errp, "Invalid value of property max_rev " 1141 "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); 1142 return; 1143 } 1144 } 1145 1146 if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) { 1147 error_setg(errp, "The number of virtqueues %d " 1148 "exceeds virtio limit %d", n, 1149 VIRTIO_QUEUE_MAX); 1150 return; 1151 } 1152 if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) { 1153 error_setg(errp, "The number of virtqueues %d " 1154 "exceeds flic adapter route limit %d", n, 1155 flic->adapter_routes_max_batch); 1156 return; 1157 } 1158 1159 sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus); 1160 1161 1162 css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1163 d->hotplugged, 1); 1164 } 1165 1166 static void virtio_ccw_device_unplugged(DeviceState *d) 1167 { 1168 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1169 1170 virtio_ccw_stop_ioeventfd(dev); 1171 } 1172 /**************** Virtio-ccw Bus Device Descriptions *******************/ 1173 1174 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp) 1175 { 1176 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1177 1178 virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev); 1179 virtio_ccw_device_realize(_dev, errp); 1180 } 1181 1182 static void virtio_ccw_busdev_unrealize(DeviceState *dev) 1183 { 1184 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1185 1186 virtio_ccw_device_unrealize(_dev); 1187 } 1188 1189 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, 1190 DeviceState *dev, Error **errp) 1191 { 1192 VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev); 1193 1194 virtio_ccw_stop_ioeventfd(_dev); 1195 } 1196 1197 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) 1198 { 1199 DeviceClass *dc = DEVICE_CLASS(klass); 1200 CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); 1201 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass); 1202 1203 k->unplug = virtio_ccw_busdev_unplug; 1204 dc->realize = virtio_ccw_busdev_realize; 1205 dc->unrealize = virtio_ccw_busdev_unrealize; 1206 dc->bus_type = TYPE_VIRTUAL_CSS_BUS; 1207 device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset); 1208 } 1209 1210 static const TypeInfo virtio_ccw_device_info = { 1211 .name = TYPE_VIRTIO_CCW_DEVICE, 1212 .parent = TYPE_CCW_DEVICE, 1213 .instance_size = sizeof(VirtioCcwDevice), 1214 .class_init = virtio_ccw_device_class_init, 1215 .class_size = sizeof(VirtIOCCWDeviceClass), 1216 .abstract = true, 1217 }; 1218 1219 /* virtio-ccw-bus */ 1220 1221 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 1222 VirtioCcwDevice *dev) 1223 { 1224 DeviceState *qdev = DEVICE(dev); 1225 char virtio_bus_name[] = "virtio-bus"; 1226 1227 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS, 1228 qdev, virtio_bus_name); 1229 } 1230 1231 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) 1232 { 1233 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 1234 BusClass *bus_class = BUS_CLASS(klass); 1235 1236 bus_class->max_dev = 1; 1237 k->notify = virtio_ccw_notify; 1238 k->vmstate_change = virtio_ccw_vmstate_change; 1239 k->query_guest_notifiers = virtio_ccw_query_guest_notifiers; 1240 k->set_guest_notifiers = virtio_ccw_set_guest_notifiers; 1241 k->save_queue = virtio_ccw_save_queue; 1242 k->load_queue = virtio_ccw_load_queue; 1243 k->save_config = virtio_ccw_save_config; 1244 k->load_config = virtio_ccw_load_config; 1245 k->pre_plugged = virtio_ccw_pre_plugged; 1246 k->device_plugged = virtio_ccw_device_plugged; 1247 k->device_unplugged = virtio_ccw_device_unplugged; 1248 k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled; 1249 k->ioeventfd_assign = virtio_ccw_ioeventfd_assign; 1250 } 1251 1252 static const TypeInfo virtio_ccw_bus_info = { 1253 .name = TYPE_VIRTIO_CCW_BUS, 1254 .parent = TYPE_VIRTIO_BUS, 1255 .instance_size = sizeof(VirtioCcwBusState), 1256 .class_size = sizeof(VirtioCcwBusClass), 1257 .class_init = virtio_ccw_bus_class_init, 1258 }; 1259 1260 static void virtio_ccw_register(void) 1261 { 1262 type_register_static(&virtio_ccw_bus_info); 1263 type_register_static(&virtio_ccw_device_info); 1264 } 1265 1266 type_init(virtio_ccw_register) 1267