1 /* 2 * virtio ccw target implementation 3 * 4 * Copyright 2012,2015 IBM Corp. 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Pierre Morel <pmorel@linux.vnet.ibm.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or (at 9 * your option) any later version. See the COPYING file in the top-level 10 * directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "sysemu/kvm.h" 16 #include "net/net.h" 17 #include "hw/virtio/virtio.h" 18 #include "migration/qemu-file-types.h" 19 #include "hw/virtio/virtio-net.h" 20 #include "hw/sysbus.h" 21 #include "qemu/bitops.h" 22 #include "qemu/error-report.h" 23 #include "qemu/module.h" 24 #include "hw/virtio/virtio-access.h" 25 #include "hw/virtio/virtio-bus.h" 26 #include "hw/s390x/adapter.h" 27 #include "hw/s390x/s390_flic.h" 28 29 #include "hw/s390x/ioinst.h" 30 #include "hw/s390x/css.h" 31 #include "virtio-ccw.h" 32 #include "trace.h" 33 #include "hw/s390x/css-bridge.h" 34 #include "hw/s390x/s390-virtio-ccw.h" 35 36 #define NR_CLASSIC_INDICATOR_BITS 64 37 38 bool have_virtio_ccw = true; 39 40 static int virtio_ccw_dev_post_load(void *opaque, int version_id) 41 { 42 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque); 43 CcwDevice *ccw_dev = CCW_DEVICE(dev); 44 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 45 46 ccw_dev->sch->driver_data = dev; 47 if (ccw_dev->sch->thinint_active) { 48 dev->routes.adapter.adapter_id = css_get_adapter_id( 49 CSS_IO_ADAPTER_VIRTIO, 50 dev->thinint_isc); 51 } 52 /* Re-fill subch_id after loading the subchannel states.*/ 53 if (ck->refill_ids) { 54 ck->refill_ids(ccw_dev); 55 } 56 return 0; 57 } 58 59 typedef struct VirtioCcwDeviceTmp { 60 VirtioCcwDevice *parent; 61 uint16_t config_vector; 62 } VirtioCcwDeviceTmp; 63 64 static int virtio_ccw_dev_tmp_pre_save(void *opaque) 65 { 66 VirtioCcwDeviceTmp *tmp = opaque; 67 VirtioCcwDevice *dev = tmp->parent; 68 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 69 70 tmp->config_vector = vdev->config_vector; 71 72 return 0; 73 } 74 75 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id) 76 { 77 VirtioCcwDeviceTmp *tmp = opaque; 78 VirtioCcwDevice *dev = tmp->parent; 79 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 80 81 vdev->config_vector = tmp->config_vector; 82 return 0; 83 } 84 85 const VMStateDescription vmstate_virtio_ccw_dev_tmp = { 86 .name = "s390_virtio_ccw_dev_tmp", 87 .pre_save = virtio_ccw_dev_tmp_pre_save, 88 .post_load = virtio_ccw_dev_tmp_post_load, 89 .fields = (VMStateField[]) { 90 VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), 91 VMSTATE_END_OF_LIST() 92 } 93 }; 94 95 const VMStateDescription vmstate_virtio_ccw_dev = { 96 .name = "s390_virtio_ccw_dev", 97 .version_id = 1, 98 .minimum_version_id = 1, 99 .post_load = virtio_ccw_dev_post_load, 100 .fields = (VMStateField[]) { 101 VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), 102 VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), 103 VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), 104 VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice), 105 /* 106 * Ugly hack because VirtIODevice does not migrate itself. 107 * This also makes legacy via vmstate_save_state possible. 108 */ 109 VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp, 110 vmstate_virtio_ccw_dev_tmp), 111 VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes, 112 AdapterRoutes), 113 VMSTATE_UINT8(thinint_isc, VirtioCcwDevice), 114 VMSTATE_INT32(revision, VirtioCcwDevice), 115 VMSTATE_END_OF_LIST() 116 } 117 }; 118 119 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 120 VirtioCcwDevice *dev); 121 122 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) 123 { 124 VirtIODevice *vdev = NULL; 125 VirtioCcwDevice *dev = sch->driver_data; 126 127 if (dev) { 128 vdev = virtio_bus_get_device(&dev->bus); 129 } 130 return vdev; 131 } 132 133 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) 134 { 135 virtio_bus_start_ioeventfd(&dev->bus); 136 } 137 138 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) 139 { 140 virtio_bus_stop_ioeventfd(&dev->bus); 141 } 142 143 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d) 144 { 145 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 146 147 return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0; 148 } 149 150 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 151 int n, bool assign) 152 { 153 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 154 CcwDevice *ccw_dev = CCW_DEVICE(dev); 155 SubchDev *sch = ccw_dev->sch; 156 uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; 157 158 return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); 159 } 160 161 /* Communication blocks used by several channel commands. */ 162 typedef struct VqInfoBlockLegacy { 163 uint64_t queue; 164 uint32_t align; 165 uint16_t index; 166 uint16_t num; 167 } QEMU_PACKED VqInfoBlockLegacy; 168 169 typedef struct VqInfoBlock { 170 uint64_t desc; 171 uint32_t res0; 172 uint16_t index; 173 uint16_t num; 174 uint64_t avail; 175 uint64_t used; 176 } QEMU_PACKED VqInfoBlock; 177 178 typedef struct VqConfigBlock { 179 uint16_t index; 180 uint16_t num_max; 181 } QEMU_PACKED VqConfigBlock; 182 183 typedef struct VirtioFeatDesc { 184 uint32_t features; 185 uint8_t index; 186 } QEMU_PACKED VirtioFeatDesc; 187 188 typedef struct VirtioThinintInfo { 189 hwaddr summary_indicator; 190 hwaddr device_indicator; 191 uint64_t ind_bit; 192 uint8_t isc; 193 } QEMU_PACKED VirtioThinintInfo; 194 195 typedef struct VirtioRevInfo { 196 uint16_t revision; 197 uint16_t length; 198 uint8_t data[]; 199 } QEMU_PACKED VirtioRevInfo; 200 201 /* Specify where the virtqueues for the subchannel are in guest memory. */ 202 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, 203 VqInfoBlockLegacy *linfo) 204 { 205 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 206 uint16_t index = info ? info->index : linfo->index; 207 uint16_t num = info ? info->num : linfo->num; 208 uint64_t desc = info ? info->desc : linfo->queue; 209 210 if (index >= VIRTIO_QUEUE_MAX) { 211 return -EINVAL; 212 } 213 214 /* Current code in virtio.c relies on 4K alignment. */ 215 if (linfo && desc && (linfo->align != 4096)) { 216 return -EINVAL; 217 } 218 219 if (!vdev) { 220 return -EINVAL; 221 } 222 223 if (info) { 224 virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); 225 } else { 226 virtio_queue_set_addr(vdev, index, desc); 227 } 228 if (!desc) { 229 virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); 230 } else { 231 if (info) { 232 /* virtio-1 allows changing the ring size. */ 233 if (virtio_queue_get_max_num(vdev, index) < num) { 234 /* Fail if we exceed the maximum number. */ 235 return -EINVAL; 236 } 237 virtio_queue_set_num(vdev, index, num); 238 } else if (virtio_queue_get_num(vdev, index) > num) { 239 /* Fail if we don't have a big enough queue. */ 240 return -EINVAL; 241 } 242 /* We ignore possible increased num for legacy for compatibility. */ 243 virtio_queue_set_vector(vdev, index, index); 244 } 245 /* tell notify handler in case of config change */ 246 vdev->config_vector = VIRTIO_QUEUE_MAX; 247 return 0; 248 } 249 250 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) 251 { 252 CcwDevice *ccw_dev = CCW_DEVICE(dev); 253 254 virtio_ccw_stop_ioeventfd(dev); 255 virtio_reset(vdev); 256 if (dev->indicators) { 257 release_indicator(&dev->routes.adapter, dev->indicators); 258 dev->indicators = NULL; 259 } 260 if (dev->indicators2) { 261 release_indicator(&dev->routes.adapter, dev->indicators2); 262 dev->indicators2 = NULL; 263 } 264 if (dev->summary_indicator) { 265 release_indicator(&dev->routes.adapter, dev->summary_indicator); 266 dev->summary_indicator = NULL; 267 } 268 ccw_dev->sch->thinint_active = false; 269 } 270 271 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, 272 bool is_legacy) 273 { 274 int ret; 275 VqInfoBlock info; 276 VqInfoBlockLegacy linfo; 277 size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info); 278 279 if (check_len) { 280 if (ccw.count != info_len) { 281 return -EINVAL; 282 } 283 } else if (ccw.count < info_len) { 284 /* Can't execute command. */ 285 return -EINVAL; 286 } 287 if (!ccw.cda) { 288 return -EFAULT; 289 } 290 if (is_legacy) { 291 ccw_dstream_read(&sch->cds, linfo); 292 linfo.queue = be64_to_cpu(linfo.queue); 293 linfo.align = be32_to_cpu(linfo.align); 294 linfo.index = be16_to_cpu(linfo.index); 295 linfo.num = be16_to_cpu(linfo.num); 296 ret = virtio_ccw_set_vqs(sch, NULL, &linfo); 297 } else { 298 ccw_dstream_read(&sch->cds, info); 299 info.desc = be64_to_cpu(info.desc); 300 info.index = be16_to_cpu(info.index); 301 info.num = be16_to_cpu(info.num); 302 info.avail = be64_to_cpu(info.avail); 303 info.used = be64_to_cpu(info.used); 304 ret = virtio_ccw_set_vqs(sch, &info, NULL); 305 } 306 sch->curr_status.scsw.count = 0; 307 return ret; 308 } 309 310 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) 311 { 312 int ret; 313 VirtioRevInfo revinfo; 314 uint8_t status; 315 VirtioFeatDesc features; 316 hwaddr indicators; 317 VqConfigBlock vq_config; 318 VirtioCcwDevice *dev = sch->driver_data; 319 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 320 bool check_len; 321 int len; 322 VirtioThinintInfo thinint; 323 324 if (!dev) { 325 return -EINVAL; 326 } 327 328 trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, 329 ccw.cmd_code); 330 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); 331 332 if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) { 333 if (dev->force_revision_1) { 334 /* 335 * virtio-1 drivers must start with negotiating to a revision >= 1, 336 * so post a command reject for all other commands 337 */ 338 return -ENOSYS; 339 } else { 340 /* 341 * If the driver issues any command that is not SET_VIRTIO_REV, 342 * we'll have to operate the device in legacy mode. 343 */ 344 dev->revision = 0; 345 } 346 } 347 348 /* Look at the command. */ 349 switch (ccw.cmd_code) { 350 case CCW_CMD_SET_VQ: 351 ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1); 352 break; 353 case CCW_CMD_VDEV_RESET: 354 virtio_ccw_reset_virtio(dev, vdev); 355 ret = 0; 356 break; 357 case CCW_CMD_READ_FEAT: 358 if (check_len) { 359 if (ccw.count != sizeof(features)) { 360 ret = -EINVAL; 361 break; 362 } 363 } else if (ccw.count < sizeof(features)) { 364 /* Can't execute command. */ 365 ret = -EINVAL; 366 break; 367 } 368 if (!ccw.cda) { 369 ret = -EFAULT; 370 } else { 371 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 372 373 ccw_dstream_advance(&sch->cds, sizeof(features.features)); 374 ccw_dstream_read(&sch->cds, features.index); 375 if (features.index == 0) { 376 if (dev->revision >= 1) { 377 /* Don't offer legacy features for modern devices. */ 378 features.features = (uint32_t) 379 (vdev->host_features & ~vdc->legacy_features); 380 } else { 381 features.features = (uint32_t)vdev->host_features; 382 } 383 } else if ((features.index == 1) && (dev->revision >= 1)) { 384 /* 385 * Only offer feature bits beyond 31 if the guest has 386 * negotiated at least revision 1. 387 */ 388 features.features = (uint32_t)(vdev->host_features >> 32); 389 } else { 390 /* Return zeroes if the guest supports more feature bits. */ 391 features.features = 0; 392 } 393 ccw_dstream_rewind(&sch->cds); 394 features.features = cpu_to_le32(features.features); 395 ccw_dstream_write(&sch->cds, features.features); 396 sch->curr_status.scsw.count = ccw.count - sizeof(features); 397 ret = 0; 398 } 399 break; 400 case CCW_CMD_WRITE_FEAT: 401 if (check_len) { 402 if (ccw.count != sizeof(features)) { 403 ret = -EINVAL; 404 break; 405 } 406 } else if (ccw.count < sizeof(features)) { 407 /* Can't execute command. */ 408 ret = -EINVAL; 409 break; 410 } 411 if (!ccw.cda) { 412 ret = -EFAULT; 413 } else { 414 ccw_dstream_read(&sch->cds, features); 415 features.features = le32_to_cpu(features.features); 416 if (features.index == 0) { 417 virtio_set_features(vdev, 418 (vdev->guest_features & 0xffffffff00000000ULL) | 419 features.features); 420 } else if ((features.index == 1) && (dev->revision >= 1)) { 421 /* 422 * If the guest did not negotiate at least revision 1, 423 * we did not offer it any feature bits beyond 31. Such a 424 * guest passing us any bit here is therefore buggy. 425 */ 426 virtio_set_features(vdev, 427 (vdev->guest_features & 0x00000000ffffffffULL) | 428 ((uint64_t)features.features << 32)); 429 } else { 430 /* 431 * If the guest supports more feature bits, assert that it 432 * passes us zeroes for those we don't support. 433 */ 434 if (features.features) { 435 qemu_log_mask(LOG_GUEST_ERROR, 436 "Guest bug: features[%i]=%x (expected 0)", 437 features.index, features.features); 438 /* XXX: do a unit check here? */ 439 } 440 } 441 sch->curr_status.scsw.count = ccw.count - sizeof(features); 442 ret = 0; 443 } 444 break; 445 case CCW_CMD_READ_CONF: 446 if (check_len) { 447 if (ccw.count > vdev->config_len) { 448 ret = -EINVAL; 449 break; 450 } 451 } 452 len = MIN(ccw.count, vdev->config_len); 453 if (!ccw.cda) { 454 ret = -EFAULT; 455 } else { 456 virtio_bus_get_vdev_config(&dev->bus, vdev->config); 457 ccw_dstream_write_buf(&sch->cds, vdev->config, len); 458 sch->curr_status.scsw.count = ccw.count - len; 459 ret = 0; 460 } 461 break; 462 case CCW_CMD_WRITE_CONF: 463 if (check_len) { 464 if (ccw.count > vdev->config_len) { 465 ret = -EINVAL; 466 break; 467 } 468 } 469 len = MIN(ccw.count, vdev->config_len); 470 if (!ccw.cda) { 471 ret = -EFAULT; 472 } else { 473 ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len); 474 if (!ret) { 475 virtio_bus_set_vdev_config(&dev->bus, vdev->config); 476 sch->curr_status.scsw.count = ccw.count - len; 477 } 478 } 479 break; 480 case CCW_CMD_READ_STATUS: 481 if (check_len) { 482 if (ccw.count != sizeof(status)) { 483 ret = -EINVAL; 484 break; 485 } 486 } else if (ccw.count < sizeof(status)) { 487 /* Can't execute command. */ 488 ret = -EINVAL; 489 break; 490 } 491 if (!ccw.cda) { 492 ret = -EFAULT; 493 } else { 494 address_space_stb(&address_space_memory, ccw.cda, vdev->status, 495 MEMTXATTRS_UNSPECIFIED, NULL); 496 sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status); 497 ret = 0; 498 } 499 break; 500 case CCW_CMD_WRITE_STATUS: 501 if (check_len) { 502 if (ccw.count != sizeof(status)) { 503 ret = -EINVAL; 504 break; 505 } 506 } else if (ccw.count < sizeof(status)) { 507 /* Can't execute command. */ 508 ret = -EINVAL; 509 break; 510 } 511 if (!ccw.cda) { 512 ret = -EFAULT; 513 } else { 514 ccw_dstream_read(&sch->cds, status); 515 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { 516 virtio_ccw_stop_ioeventfd(dev); 517 } 518 if (virtio_set_status(vdev, status) == 0) { 519 if (vdev->status == 0) { 520 virtio_ccw_reset_virtio(dev, vdev); 521 } 522 if (status & VIRTIO_CONFIG_S_DRIVER_OK) { 523 virtio_ccw_start_ioeventfd(dev); 524 } 525 sch->curr_status.scsw.count = ccw.count - sizeof(status); 526 ret = 0; 527 } else { 528 /* Trigger a command reject. */ 529 ret = -ENOSYS; 530 } 531 } 532 break; 533 case CCW_CMD_SET_IND: 534 if (check_len) { 535 if (ccw.count != sizeof(indicators)) { 536 ret = -EINVAL; 537 break; 538 } 539 } else if (ccw.count < sizeof(indicators)) { 540 /* Can't execute command. */ 541 ret = -EINVAL; 542 break; 543 } 544 if (sch->thinint_active) { 545 /* Trigger a command reject. */ 546 ret = -ENOSYS; 547 break; 548 } 549 if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) { 550 /* More queues than indicator bits --> trigger a reject */ 551 ret = -ENOSYS; 552 break; 553 } 554 if (!ccw.cda) { 555 ret = -EFAULT; 556 } else { 557 ccw_dstream_read(&sch->cds, indicators); 558 indicators = be64_to_cpu(indicators); 559 dev->indicators = get_indicator(indicators, sizeof(uint64_t)); 560 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 561 ret = 0; 562 } 563 break; 564 case CCW_CMD_SET_CONF_IND: 565 if (check_len) { 566 if (ccw.count != sizeof(indicators)) { 567 ret = -EINVAL; 568 break; 569 } 570 } else if (ccw.count < sizeof(indicators)) { 571 /* Can't execute command. */ 572 ret = -EINVAL; 573 break; 574 } 575 if (!ccw.cda) { 576 ret = -EFAULT; 577 } else { 578 ccw_dstream_read(&sch->cds, indicators); 579 indicators = be64_to_cpu(indicators); 580 dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); 581 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 582 ret = 0; 583 } 584 break; 585 case CCW_CMD_READ_VQ_CONF: 586 if (check_len) { 587 if (ccw.count != sizeof(vq_config)) { 588 ret = -EINVAL; 589 break; 590 } 591 } else if (ccw.count < sizeof(vq_config)) { 592 /* Can't execute command. */ 593 ret = -EINVAL; 594 break; 595 } 596 if (!ccw.cda) { 597 ret = -EFAULT; 598 } else { 599 ccw_dstream_read(&sch->cds, vq_config.index); 600 vq_config.index = be16_to_cpu(vq_config.index); 601 if (vq_config.index >= VIRTIO_QUEUE_MAX) { 602 ret = -EINVAL; 603 break; 604 } 605 vq_config.num_max = virtio_queue_get_num(vdev, 606 vq_config.index); 607 vq_config.num_max = cpu_to_be16(vq_config.num_max); 608 ccw_dstream_write(&sch->cds, vq_config.num_max); 609 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); 610 ret = 0; 611 } 612 break; 613 case CCW_CMD_SET_IND_ADAPTER: 614 if (check_len) { 615 if (ccw.count != sizeof(thinint)) { 616 ret = -EINVAL; 617 break; 618 } 619 } else if (ccw.count < sizeof(thinint)) { 620 /* Can't execute command. */ 621 ret = -EINVAL; 622 break; 623 } 624 if (!ccw.cda) { 625 ret = -EFAULT; 626 } else if (dev->indicators && !sch->thinint_active) { 627 /* Trigger a command reject. */ 628 ret = -ENOSYS; 629 } else { 630 if (ccw_dstream_read(&sch->cds, thinint)) { 631 ret = -EFAULT; 632 } else { 633 thinint.ind_bit = be64_to_cpu(thinint.ind_bit); 634 thinint.summary_indicator = 635 be64_to_cpu(thinint.summary_indicator); 636 thinint.device_indicator = 637 be64_to_cpu(thinint.device_indicator); 638 639 dev->summary_indicator = 640 get_indicator(thinint.summary_indicator, sizeof(uint8_t)); 641 dev->indicators = 642 get_indicator(thinint.device_indicator, 643 thinint.ind_bit / 8 + 1); 644 dev->thinint_isc = thinint.isc; 645 dev->routes.adapter.ind_offset = thinint.ind_bit; 646 dev->routes.adapter.summary_offset = 7; 647 dev->routes.adapter.adapter_id = css_get_adapter_id( 648 CSS_IO_ADAPTER_VIRTIO, 649 dev->thinint_isc); 650 sch->thinint_active = ((dev->indicators != NULL) && 651 (dev->summary_indicator != NULL)); 652 sch->curr_status.scsw.count = ccw.count - sizeof(thinint); 653 ret = 0; 654 } 655 } 656 break; 657 case CCW_CMD_SET_VIRTIO_REV: 658 len = sizeof(revinfo); 659 if (ccw.count < len) { 660 ret = -EINVAL; 661 break; 662 } 663 if (!ccw.cda) { 664 ret = -EFAULT; 665 break; 666 } 667 ccw_dstream_read_buf(&sch->cds, &revinfo, 4); 668 revinfo.revision = be16_to_cpu(revinfo.revision); 669 revinfo.length = be16_to_cpu(revinfo.length); 670 if (ccw.count < len + revinfo.length || 671 (check_len && ccw.count > len + revinfo.length)) { 672 ret = -EINVAL; 673 break; 674 } 675 /* 676 * Once we start to support revisions with additional data, we'll 677 * need to fetch it here. Nothing to do for now, though. 678 */ 679 if (dev->revision >= 0 || 680 revinfo.revision > virtio_ccw_rev_max(dev) || 681 (dev->force_revision_1 && !revinfo.revision)) { 682 ret = -ENOSYS; 683 break; 684 } 685 ret = 0; 686 dev->revision = revinfo.revision; 687 break; 688 default: 689 ret = -ENOSYS; 690 break; 691 } 692 return ret; 693 } 694 695 static void virtio_sch_disable_cb(SubchDev *sch) 696 { 697 VirtioCcwDevice *dev = sch->driver_data; 698 699 dev->revision = -1; 700 } 701 702 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) 703 { 704 VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 705 CcwDevice *ccw_dev = CCW_DEVICE(dev); 706 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 707 SubchDev *sch; 708 Error *err = NULL; 709 int i; 710 711 sch = css_create_sch(ccw_dev->devno, errp); 712 if (!sch) { 713 return; 714 } 715 if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) { 716 error_setg(&err, "Invalid value of property max_rev " 717 "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); 718 goto out_err; 719 } 720 721 sch->driver_data = dev; 722 sch->ccw_cb = virtio_ccw_cb; 723 sch->disable_cb = virtio_sch_disable_cb; 724 sch->id.reserved = 0xff; 725 sch->id.cu_type = VIRTIO_CCW_CU_TYPE; 726 sch->do_subchannel_work = do_subchannel_work_virtual; 727 ccw_dev->sch = sch; 728 dev->indicators = NULL; 729 dev->revision = -1; 730 for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) { 731 dev->routes.gsi[i] = -1; 732 } 733 css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); 734 735 trace_virtio_ccw_new_device( 736 sch->cssid, sch->ssid, sch->schid, sch->devno, 737 ccw_dev->devno.valid ? "user-configured" : "auto-configured"); 738 739 if (kvm_enabled() && !kvm_eventfds_enabled()) { 740 dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; 741 } 742 743 if (k->realize) { 744 k->realize(dev, &err); 745 if (err) { 746 goto out_err; 747 } 748 } 749 750 ck->realize(ccw_dev, &err); 751 if (err) { 752 goto out_err; 753 } 754 755 return; 756 757 out_err: 758 error_propagate(errp, err); 759 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 760 ccw_dev->sch = NULL; 761 g_free(sch); 762 } 763 764 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev) 765 { 766 VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 767 CcwDevice *ccw_dev = CCW_DEVICE(dev); 768 SubchDev *sch = ccw_dev->sch; 769 770 if (dc->unrealize) { 771 dc->unrealize(dev); 772 } 773 774 if (sch) { 775 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 776 g_free(sch); 777 ccw_dev->sch = NULL; 778 } 779 if (dev->indicators) { 780 release_indicator(&dev->routes.adapter, dev->indicators); 781 dev->indicators = NULL; 782 } 783 } 784 785 /* DeviceState to VirtioCcwDevice. Note: used on datapath, 786 * be careful and test performance if you change this. 787 */ 788 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d) 789 { 790 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 791 792 return container_of(ccw_dev, VirtioCcwDevice, parent_obj); 793 } 794 795 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, 796 uint8_t to_be_set) 797 { 798 uint8_t expected, actual; 799 hwaddr len = 1; 800 /* avoid multiple fetches */ 801 uint8_t volatile *ind_addr; 802 803 ind_addr = cpu_physical_memory_map(ind_loc, &len, true); 804 if (!ind_addr) { 805 error_report("%s(%x.%x.%04x): unable to access indicator", 806 __func__, sch->cssid, sch->ssid, sch->schid); 807 return -1; 808 } 809 actual = *ind_addr; 810 do { 811 expected = actual; 812 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); 813 } while (actual != expected); 814 trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set); 815 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); 816 817 return actual; 818 } 819 820 static void virtio_ccw_notify(DeviceState *d, uint16_t vector) 821 { 822 VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); 823 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 824 SubchDev *sch = ccw_dev->sch; 825 uint64_t indicators; 826 827 if (vector == VIRTIO_NO_VECTOR) { 828 return; 829 } 830 /* 831 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue 832 * vector == VIRTIO_QUEUE_MAX: configuration change notification 833 * bits beyond that are unused and should never be notified for 834 */ 835 assert(vector <= VIRTIO_QUEUE_MAX); 836 837 if (vector < VIRTIO_QUEUE_MAX) { 838 if (!dev->indicators) { 839 return; 840 } 841 if (sch->thinint_active) { 842 /* 843 * In the adapter interrupt case, indicators points to a 844 * memory area that may be (way) larger than 64 bit and 845 * ind_bit indicates the start of the indicators in a big 846 * endian notation. 847 */ 848 uint64_t ind_bit = dev->routes.adapter.ind_offset; 849 850 virtio_set_ind_atomic(sch, dev->indicators->addr + 851 (ind_bit + vector) / 8, 852 0x80 >> ((ind_bit + vector) % 8)); 853 if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr, 854 0x01)) { 855 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc); 856 } 857 } else { 858 assert(vector < NR_CLASSIC_INDICATOR_BITS); 859 indicators = address_space_ldq(&address_space_memory, 860 dev->indicators->addr, 861 MEMTXATTRS_UNSPECIFIED, 862 NULL); 863 indicators |= 1ULL << vector; 864 address_space_stq(&address_space_memory, dev->indicators->addr, 865 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 866 css_conditional_io_interrupt(sch); 867 } 868 } else { 869 if (!dev->indicators2) { 870 return; 871 } 872 indicators = address_space_ldq(&address_space_memory, 873 dev->indicators2->addr, 874 MEMTXATTRS_UNSPECIFIED, 875 NULL); 876 indicators |= 1ULL; 877 address_space_stq(&address_space_memory, dev->indicators2->addr, 878 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 879 css_conditional_io_interrupt(sch); 880 } 881 } 882 883 static void virtio_ccw_reset(DeviceState *d) 884 { 885 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 886 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 887 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 888 889 virtio_ccw_reset_virtio(dev, vdev); 890 if (vdc->parent_reset) { 891 vdc->parent_reset(d); 892 } 893 } 894 895 static void virtio_ccw_vmstate_change(DeviceState *d, bool running) 896 { 897 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 898 899 if (running) { 900 virtio_ccw_start_ioeventfd(dev); 901 } else { 902 virtio_ccw_stop_ioeventfd(dev); 903 } 904 } 905 906 static bool virtio_ccw_query_guest_notifiers(DeviceState *d) 907 { 908 CcwDevice *dev = CCW_DEVICE(d); 909 910 return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); 911 } 912 913 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) 914 { 915 int r; 916 CcwDevice *ccw_dev = CCW_DEVICE(dev); 917 918 if (!ccw_dev->sch->thinint_active) { 919 return -EINVAL; 920 } 921 922 r = map_indicator(&dev->routes.adapter, dev->summary_indicator); 923 if (r) { 924 return r; 925 } 926 r = map_indicator(&dev->routes.adapter, dev->indicators); 927 if (r) { 928 return r; 929 } 930 dev->routes.adapter.summary_addr = dev->summary_indicator->map; 931 dev->routes.adapter.ind_addr = dev->indicators->map; 932 933 return 0; 934 } 935 936 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs) 937 { 938 int i; 939 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 940 int ret; 941 S390FLICState *fs = s390_get_flic(); 942 S390FLICStateClass *fsc = s390_get_flic_class(fs); 943 944 ret = virtio_ccw_get_mappings(dev); 945 if (ret) { 946 return ret; 947 } 948 for (i = 0; i < nvqs; i++) { 949 if (!virtio_queue_get_num(vdev, i)) { 950 break; 951 } 952 } 953 dev->routes.num_routes = i; 954 return fsc->add_adapter_routes(fs, &dev->routes); 955 } 956 957 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs) 958 { 959 S390FLICState *fs = s390_get_flic(); 960 S390FLICStateClass *fsc = s390_get_flic_class(fs); 961 962 fsc->release_adapter_routes(fs, &dev->routes); 963 } 964 965 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n) 966 { 967 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 968 VirtQueue *vq = virtio_get_queue(vdev, n); 969 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 970 971 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL, 972 dev->routes.gsi[n]); 973 } 974 975 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n) 976 { 977 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 978 VirtQueue *vq = virtio_get_queue(vdev, n); 979 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 980 int ret; 981 982 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier, 983 dev->routes.gsi[n]); 984 assert(ret == 0); 985 } 986 987 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n, 988 bool assign, bool with_irqfd) 989 { 990 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 991 VirtQueue *vq = virtio_get_queue(vdev, n); 992 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 993 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 994 995 if (assign) { 996 int r = event_notifier_init(notifier, 0); 997 998 if (r < 0) { 999 return r; 1000 } 1001 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 1002 if (with_irqfd) { 1003 r = virtio_ccw_add_irqfd(dev, n); 1004 if (r) { 1005 virtio_queue_set_guest_notifier_fd_handler(vq, false, 1006 with_irqfd); 1007 return r; 1008 } 1009 } 1010 /* 1011 * We do not support individual masking for channel devices, so we 1012 * need to manually trigger any guest masking callbacks here. 1013 */ 1014 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 1015 k->guest_notifier_mask(vdev, n, false); 1016 } 1017 /* get lost events and re-inject */ 1018 if (k->guest_notifier_pending && 1019 k->guest_notifier_pending(vdev, n)) { 1020 event_notifier_set(notifier); 1021 } 1022 } else { 1023 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 1024 k->guest_notifier_mask(vdev, n, true); 1025 } 1026 if (with_irqfd) { 1027 virtio_ccw_remove_irqfd(dev, n); 1028 } 1029 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 1030 event_notifier_cleanup(notifier); 1031 } 1032 return 0; 1033 } 1034 1035 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs, 1036 bool assigned) 1037 { 1038 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1039 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1040 CcwDevice *ccw_dev = CCW_DEVICE(d); 1041 bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled(); 1042 int r, n; 1043 1044 if (with_irqfd && assigned) { 1045 /* irq routes need to be set up before assigning irqfds */ 1046 r = virtio_ccw_setup_irqroutes(dev, nvqs); 1047 if (r < 0) { 1048 goto irqroute_error; 1049 } 1050 } 1051 for (n = 0; n < nvqs; n++) { 1052 if (!virtio_queue_get_num(vdev, n)) { 1053 break; 1054 } 1055 r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd); 1056 if (r < 0) { 1057 goto assign_error; 1058 } 1059 } 1060 if (with_irqfd && !assigned) { 1061 /* release irq routes after irqfds have been released */ 1062 virtio_ccw_release_irqroutes(dev, nvqs); 1063 } 1064 return 0; 1065 1066 assign_error: 1067 while (--n >= 0) { 1068 virtio_ccw_set_guest_notifier(dev, n, !assigned, false); 1069 } 1070 irqroute_error: 1071 if (with_irqfd && assigned) { 1072 virtio_ccw_release_irqroutes(dev, nvqs); 1073 } 1074 return r; 1075 } 1076 1077 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f) 1078 { 1079 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1080 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1081 1082 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 1083 } 1084 1085 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) 1086 { 1087 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1088 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1089 uint16_t vector; 1090 1091 qemu_get_be16s(f, &vector); 1092 virtio_queue_set_vector(vdev, n , vector); 1093 1094 return 0; 1095 } 1096 1097 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) 1098 { 1099 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1100 vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); 1101 } 1102 1103 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) 1104 { 1105 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1106 return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); 1107 } 1108 1109 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) 1110 { 1111 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1112 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1113 1114 if (dev->max_rev >= 1) { 1115 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1116 } 1117 } 1118 1119 /* This is called by virtio-bus just after the device is plugged. */ 1120 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) 1121 { 1122 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1123 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1124 CcwDevice *ccw_dev = CCW_DEVICE(d); 1125 SubchDev *sch = ccw_dev->sch; 1126 int n = virtio_get_num_queues(vdev); 1127 S390FLICState *flic = s390_get_flic(); 1128 1129 if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1130 dev->max_rev = 0; 1131 } 1132 1133 if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) { 1134 /* 1135 * To avoid migration issues, we allow legacy mode when legacy 1136 * check is disabled in the old machine types (< 5.1). 1137 */ 1138 if (virtio_legacy_check_disabled(vdev)) { 1139 warn_report("device requires revision >= 1, but for backward " 1140 "compatibility max_revision=0 is allowed"); 1141 } else { 1142 error_setg(errp, "Invalid value of property max_rev " 1143 "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); 1144 return; 1145 } 1146 } 1147 1148 if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) { 1149 error_setg(errp, "The number of virtqueues %d " 1150 "exceeds virtio limit %d", n, 1151 VIRTIO_QUEUE_MAX); 1152 return; 1153 } 1154 if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) { 1155 error_setg(errp, "The number of virtqueues %d " 1156 "exceeds flic adapter route limit %d", n, 1157 flic->adapter_routes_max_batch); 1158 return; 1159 } 1160 1161 sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus); 1162 1163 1164 css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1165 d->hotplugged, 1); 1166 } 1167 1168 static void virtio_ccw_device_unplugged(DeviceState *d) 1169 { 1170 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1171 1172 virtio_ccw_stop_ioeventfd(dev); 1173 } 1174 /**************** Virtio-ccw Bus Device Descriptions *******************/ 1175 1176 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp) 1177 { 1178 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1179 1180 virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev); 1181 virtio_ccw_device_realize(_dev, errp); 1182 } 1183 1184 static void virtio_ccw_busdev_unrealize(DeviceState *dev) 1185 { 1186 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1187 1188 virtio_ccw_device_unrealize(_dev); 1189 } 1190 1191 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, 1192 DeviceState *dev, Error **errp) 1193 { 1194 VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev); 1195 1196 virtio_ccw_stop_ioeventfd(_dev); 1197 } 1198 1199 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) 1200 { 1201 DeviceClass *dc = DEVICE_CLASS(klass); 1202 CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); 1203 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass); 1204 1205 k->unplug = virtio_ccw_busdev_unplug; 1206 dc->realize = virtio_ccw_busdev_realize; 1207 dc->unrealize = virtio_ccw_busdev_unrealize; 1208 dc->bus_type = TYPE_VIRTUAL_CSS_BUS; 1209 device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset); 1210 } 1211 1212 static const TypeInfo virtio_ccw_device_info = { 1213 .name = TYPE_VIRTIO_CCW_DEVICE, 1214 .parent = TYPE_CCW_DEVICE, 1215 .instance_size = sizeof(VirtioCcwDevice), 1216 .class_init = virtio_ccw_device_class_init, 1217 .class_size = sizeof(VirtIOCCWDeviceClass), 1218 .abstract = true, 1219 }; 1220 1221 /* virtio-ccw-bus */ 1222 1223 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 1224 VirtioCcwDevice *dev) 1225 { 1226 DeviceState *qdev = DEVICE(dev); 1227 char virtio_bus_name[] = "virtio-bus"; 1228 1229 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS, 1230 qdev, virtio_bus_name); 1231 } 1232 1233 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) 1234 { 1235 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 1236 BusClass *bus_class = BUS_CLASS(klass); 1237 1238 bus_class->max_dev = 1; 1239 k->notify = virtio_ccw_notify; 1240 k->vmstate_change = virtio_ccw_vmstate_change; 1241 k->query_guest_notifiers = virtio_ccw_query_guest_notifiers; 1242 k->set_guest_notifiers = virtio_ccw_set_guest_notifiers; 1243 k->save_queue = virtio_ccw_save_queue; 1244 k->load_queue = virtio_ccw_load_queue; 1245 k->save_config = virtio_ccw_save_config; 1246 k->load_config = virtio_ccw_load_config; 1247 k->pre_plugged = virtio_ccw_pre_plugged; 1248 k->device_plugged = virtio_ccw_device_plugged; 1249 k->device_unplugged = virtio_ccw_device_unplugged; 1250 k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled; 1251 k->ioeventfd_assign = virtio_ccw_ioeventfd_assign; 1252 } 1253 1254 static const TypeInfo virtio_ccw_bus_info = { 1255 .name = TYPE_VIRTIO_CCW_BUS, 1256 .parent = TYPE_VIRTIO_BUS, 1257 .instance_size = sizeof(VirtioCcwBusState), 1258 .class_size = sizeof(VirtioCcwBusClass), 1259 .class_init = virtio_ccw_bus_class_init, 1260 }; 1261 1262 static void virtio_ccw_register(void) 1263 { 1264 type_register_static(&virtio_ccw_bus_info); 1265 type_register_static(&virtio_ccw_device_info); 1266 } 1267 1268 type_init(virtio_ccw_register) 1269