1 /* 2 * virtio ccw target implementation 3 * 4 * Copyright 2012,2015 IBM Corp. 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Pierre Morel <pmorel@linux.vnet.ibm.com> 7 * 8 * This work is licensed under the terms of the GNU GPL, version 2 or (at 9 * your option) any later version. See the COPYING file in the top-level 10 * directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qapi/error.h" 15 #include "hw/hw.h" 16 #include "sysemu/sysemu.h" 17 #include "sysemu/kvm.h" 18 #include "net/net.h" 19 #include "hw/virtio/virtio.h" 20 #include "migration/qemu-file-types.h" 21 #include "hw/virtio/virtio-net.h" 22 #include "hw/sysbus.h" 23 #include "qemu/bitops.h" 24 #include "qemu/error-report.h" 25 #include "qemu/module.h" 26 #include "hw/virtio/virtio-access.h" 27 #include "hw/virtio/virtio-bus.h" 28 #include "hw/s390x/adapter.h" 29 #include "hw/s390x/s390_flic.h" 30 31 #include "hw/s390x/ioinst.h" 32 #include "hw/s390x/css.h" 33 #include "virtio-ccw.h" 34 #include "trace.h" 35 #include "hw/s390x/css-bridge.h" 36 #include "hw/s390x/s390-virtio-ccw.h" 37 38 #define NR_CLASSIC_INDICATOR_BITS 64 39 40 static int virtio_ccw_dev_post_load(void *opaque, int version_id) 41 { 42 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque); 43 CcwDevice *ccw_dev = CCW_DEVICE(dev); 44 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 45 46 ccw_dev->sch->driver_data = dev; 47 if (ccw_dev->sch->thinint_active) { 48 dev->routes.adapter.adapter_id = css_get_adapter_id( 49 CSS_IO_ADAPTER_VIRTIO, 50 dev->thinint_isc); 51 } 52 /* Re-fill subch_id after loading the subchannel states.*/ 53 if (ck->refill_ids) { 54 ck->refill_ids(ccw_dev); 55 } 56 return 0; 57 } 58 59 typedef struct VirtioCcwDeviceTmp { 60 VirtioCcwDevice *parent; 61 uint16_t config_vector; 62 } VirtioCcwDeviceTmp; 63 64 static int virtio_ccw_dev_tmp_pre_save(void *opaque) 65 { 66 VirtioCcwDeviceTmp *tmp = opaque; 67 VirtioCcwDevice *dev = tmp->parent; 68 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 69 70 tmp->config_vector = vdev->config_vector; 71 72 return 0; 73 } 74 75 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id) 76 { 77 VirtioCcwDeviceTmp *tmp = opaque; 78 VirtioCcwDevice *dev = tmp->parent; 79 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 80 81 vdev->config_vector = tmp->config_vector; 82 return 0; 83 } 84 85 const VMStateDescription vmstate_virtio_ccw_dev_tmp = { 86 .name = "s390_virtio_ccw_dev_tmp", 87 .pre_save = virtio_ccw_dev_tmp_pre_save, 88 .post_load = virtio_ccw_dev_tmp_post_load, 89 .fields = (VMStateField[]) { 90 VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), 91 VMSTATE_END_OF_LIST() 92 } 93 }; 94 95 const VMStateDescription vmstate_virtio_ccw_dev = { 96 .name = "s390_virtio_ccw_dev", 97 .version_id = 1, 98 .minimum_version_id = 1, 99 .post_load = virtio_ccw_dev_post_load, 100 .fields = (VMStateField[]) { 101 VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), 102 VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), 103 VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), 104 VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice), 105 /* 106 * Ugly hack because VirtIODevice does not migrate itself. 107 * This also makes legacy via vmstate_save_state possible. 108 */ 109 VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp, 110 vmstate_virtio_ccw_dev_tmp), 111 VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes, 112 AdapterRoutes), 113 VMSTATE_UINT8(thinint_isc, VirtioCcwDevice), 114 VMSTATE_INT32(revision, VirtioCcwDevice), 115 VMSTATE_END_OF_LIST() 116 } 117 }; 118 119 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 120 VirtioCcwDevice *dev); 121 122 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) 123 { 124 VirtIODevice *vdev = NULL; 125 VirtioCcwDevice *dev = sch->driver_data; 126 127 if (dev) { 128 vdev = virtio_bus_get_device(&dev->bus); 129 } 130 return vdev; 131 } 132 133 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) 134 { 135 virtio_bus_start_ioeventfd(&dev->bus); 136 } 137 138 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) 139 { 140 virtio_bus_stop_ioeventfd(&dev->bus); 141 } 142 143 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d) 144 { 145 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 146 147 return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0; 148 } 149 150 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, 151 int n, bool assign) 152 { 153 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 154 CcwDevice *ccw_dev = CCW_DEVICE(dev); 155 SubchDev *sch = ccw_dev->sch; 156 uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; 157 158 return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); 159 } 160 161 /* Communication blocks used by several channel commands. */ 162 typedef struct VqInfoBlockLegacy { 163 uint64_t queue; 164 uint32_t align; 165 uint16_t index; 166 uint16_t num; 167 } QEMU_PACKED VqInfoBlockLegacy; 168 169 typedef struct VqInfoBlock { 170 uint64_t desc; 171 uint32_t res0; 172 uint16_t index; 173 uint16_t num; 174 uint64_t avail; 175 uint64_t used; 176 } QEMU_PACKED VqInfoBlock; 177 178 typedef struct VqConfigBlock { 179 uint16_t index; 180 uint16_t num_max; 181 } QEMU_PACKED VqConfigBlock; 182 183 typedef struct VirtioFeatDesc { 184 uint32_t features; 185 uint8_t index; 186 } QEMU_PACKED VirtioFeatDesc; 187 188 typedef struct VirtioThinintInfo { 189 hwaddr summary_indicator; 190 hwaddr device_indicator; 191 uint64_t ind_bit; 192 uint8_t isc; 193 } QEMU_PACKED VirtioThinintInfo; 194 195 typedef struct VirtioRevInfo { 196 uint16_t revision; 197 uint16_t length; 198 uint8_t data[0]; 199 } QEMU_PACKED VirtioRevInfo; 200 201 /* Specify where the virtqueues for the subchannel are in guest memory. */ 202 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, 203 VqInfoBlockLegacy *linfo) 204 { 205 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 206 uint16_t index = info ? info->index : linfo->index; 207 uint16_t num = info ? info->num : linfo->num; 208 uint64_t desc = info ? info->desc : linfo->queue; 209 210 if (index >= VIRTIO_QUEUE_MAX) { 211 return -EINVAL; 212 } 213 214 /* Current code in virtio.c relies on 4K alignment. */ 215 if (linfo && desc && (linfo->align != 4096)) { 216 return -EINVAL; 217 } 218 219 if (!vdev) { 220 return -EINVAL; 221 } 222 223 if (info) { 224 virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); 225 } else { 226 virtio_queue_set_addr(vdev, index, desc); 227 } 228 if (!desc) { 229 virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); 230 } else { 231 if (info) { 232 /* virtio-1 allows changing the ring size. */ 233 if (virtio_queue_get_max_num(vdev, index) < num) { 234 /* Fail if we exceed the maximum number. */ 235 return -EINVAL; 236 } 237 virtio_queue_set_num(vdev, index, num); 238 } else if (virtio_queue_get_num(vdev, index) > num) { 239 /* Fail if we don't have a big enough queue. */ 240 return -EINVAL; 241 } 242 /* We ignore possible increased num for legacy for compatibility. */ 243 virtio_queue_set_vector(vdev, index, index); 244 } 245 /* tell notify handler in case of config change */ 246 vdev->config_vector = VIRTIO_QUEUE_MAX; 247 return 0; 248 } 249 250 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) 251 { 252 CcwDevice *ccw_dev = CCW_DEVICE(dev); 253 254 virtio_ccw_stop_ioeventfd(dev); 255 virtio_reset(vdev); 256 if (dev->indicators) { 257 release_indicator(&dev->routes.adapter, dev->indicators); 258 dev->indicators = NULL; 259 } 260 if (dev->indicators2) { 261 release_indicator(&dev->routes.adapter, dev->indicators2); 262 dev->indicators2 = NULL; 263 } 264 if (dev->summary_indicator) { 265 release_indicator(&dev->routes.adapter, dev->summary_indicator); 266 dev->summary_indicator = NULL; 267 } 268 ccw_dev->sch->thinint_active = false; 269 } 270 271 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, 272 bool is_legacy) 273 { 274 int ret; 275 VqInfoBlock info; 276 VqInfoBlockLegacy linfo; 277 size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info); 278 279 if (check_len) { 280 if (ccw.count != info_len) { 281 return -EINVAL; 282 } 283 } else if (ccw.count < info_len) { 284 /* Can't execute command. */ 285 return -EINVAL; 286 } 287 if (!ccw.cda) { 288 return -EFAULT; 289 } 290 if (is_legacy) { 291 ccw_dstream_read(&sch->cds, linfo); 292 linfo.queue = be64_to_cpu(linfo.queue); 293 linfo.align = be32_to_cpu(linfo.align); 294 linfo.index = be16_to_cpu(linfo.index); 295 linfo.num = be16_to_cpu(linfo.num); 296 ret = virtio_ccw_set_vqs(sch, NULL, &linfo); 297 } else { 298 ccw_dstream_read(&sch->cds, info); 299 info.desc = be64_to_cpu(info.desc); 300 info.index = be16_to_cpu(info.index); 301 info.num = be16_to_cpu(info.num); 302 info.avail = be64_to_cpu(info.avail); 303 info.used = be64_to_cpu(info.used); 304 ret = virtio_ccw_set_vqs(sch, &info, NULL); 305 } 306 sch->curr_status.scsw.count = 0; 307 return ret; 308 } 309 310 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) 311 { 312 int ret; 313 VirtioRevInfo revinfo; 314 uint8_t status; 315 VirtioFeatDesc features; 316 hwaddr indicators; 317 VqConfigBlock vq_config; 318 VirtioCcwDevice *dev = sch->driver_data; 319 VirtIODevice *vdev = virtio_ccw_get_vdev(sch); 320 bool check_len; 321 int len; 322 VirtioThinintInfo thinint; 323 324 if (!dev) { 325 return -EINVAL; 326 } 327 328 trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, 329 ccw.cmd_code); 330 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); 331 332 if (dev->force_revision_1 && dev->revision < 0 && 333 ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) { 334 /* 335 * virtio-1 drivers must start with negotiating to a revision >= 1, 336 * so post a command reject for all other commands 337 */ 338 return -ENOSYS; 339 } 340 341 /* Look at the command. */ 342 switch (ccw.cmd_code) { 343 case CCW_CMD_SET_VQ: 344 ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1); 345 break; 346 case CCW_CMD_VDEV_RESET: 347 virtio_ccw_reset_virtio(dev, vdev); 348 ret = 0; 349 break; 350 case CCW_CMD_READ_FEAT: 351 if (check_len) { 352 if (ccw.count != sizeof(features)) { 353 ret = -EINVAL; 354 break; 355 } 356 } else if (ccw.count < sizeof(features)) { 357 /* Can't execute command. */ 358 ret = -EINVAL; 359 break; 360 } 361 if (!ccw.cda) { 362 ret = -EFAULT; 363 } else { 364 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 365 366 ccw_dstream_advance(&sch->cds, sizeof(features.features)); 367 ccw_dstream_read(&sch->cds, features.index); 368 if (features.index == 0) { 369 if (dev->revision >= 1) { 370 /* Don't offer legacy features for modern devices. */ 371 features.features = (uint32_t) 372 (vdev->host_features & ~vdc->legacy_features); 373 } else { 374 features.features = (uint32_t)vdev->host_features; 375 } 376 } else if ((features.index == 1) && (dev->revision >= 1)) { 377 /* 378 * Only offer feature bits beyond 31 if the guest has 379 * negotiated at least revision 1. 380 */ 381 features.features = (uint32_t)(vdev->host_features >> 32); 382 } else { 383 /* Return zeroes if the guest supports more feature bits. */ 384 features.features = 0; 385 } 386 ccw_dstream_rewind(&sch->cds); 387 features.features = cpu_to_le32(features.features); 388 ccw_dstream_write(&sch->cds, features.features); 389 sch->curr_status.scsw.count = ccw.count - sizeof(features); 390 ret = 0; 391 } 392 break; 393 case CCW_CMD_WRITE_FEAT: 394 if (check_len) { 395 if (ccw.count != sizeof(features)) { 396 ret = -EINVAL; 397 break; 398 } 399 } else if (ccw.count < sizeof(features)) { 400 /* Can't execute command. */ 401 ret = -EINVAL; 402 break; 403 } 404 if (!ccw.cda) { 405 ret = -EFAULT; 406 } else { 407 ccw_dstream_read(&sch->cds, features); 408 features.features = le32_to_cpu(features.features); 409 if (features.index == 0) { 410 virtio_set_features(vdev, 411 (vdev->guest_features & 0xffffffff00000000ULL) | 412 features.features); 413 } else if ((features.index == 1) && (dev->revision >= 1)) { 414 /* 415 * If the guest did not negotiate at least revision 1, 416 * we did not offer it any feature bits beyond 31. Such a 417 * guest passing us any bit here is therefore buggy. 418 */ 419 virtio_set_features(vdev, 420 (vdev->guest_features & 0x00000000ffffffffULL) | 421 ((uint64_t)features.features << 32)); 422 } else { 423 /* 424 * If the guest supports more feature bits, assert that it 425 * passes us zeroes for those we don't support. 426 */ 427 if (features.features) { 428 qemu_log_mask(LOG_GUEST_ERROR, 429 "Guest bug: features[%i]=%x (expected 0)", 430 features.index, features.features); 431 /* XXX: do a unit check here? */ 432 } 433 } 434 sch->curr_status.scsw.count = ccw.count - sizeof(features); 435 ret = 0; 436 } 437 break; 438 case CCW_CMD_READ_CONF: 439 if (check_len) { 440 if (ccw.count > vdev->config_len) { 441 ret = -EINVAL; 442 break; 443 } 444 } 445 len = MIN(ccw.count, vdev->config_len); 446 if (!ccw.cda) { 447 ret = -EFAULT; 448 } else { 449 virtio_bus_get_vdev_config(&dev->bus, vdev->config); 450 ccw_dstream_write_buf(&sch->cds, vdev->config, len); 451 sch->curr_status.scsw.count = ccw.count - len; 452 ret = 0; 453 } 454 break; 455 case CCW_CMD_WRITE_CONF: 456 if (check_len) { 457 if (ccw.count > vdev->config_len) { 458 ret = -EINVAL; 459 break; 460 } 461 } 462 len = MIN(ccw.count, vdev->config_len); 463 if (!ccw.cda) { 464 ret = -EFAULT; 465 } else { 466 ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len); 467 if (!ret) { 468 virtio_bus_set_vdev_config(&dev->bus, vdev->config); 469 sch->curr_status.scsw.count = ccw.count - len; 470 } 471 } 472 break; 473 case CCW_CMD_READ_STATUS: 474 if (check_len) { 475 if (ccw.count != sizeof(status)) { 476 ret = -EINVAL; 477 break; 478 } 479 } else if (ccw.count < sizeof(status)) { 480 /* Can't execute command. */ 481 ret = -EINVAL; 482 break; 483 } 484 if (!ccw.cda) { 485 ret = -EFAULT; 486 } else { 487 address_space_stb(&address_space_memory, ccw.cda, vdev->status, 488 MEMTXATTRS_UNSPECIFIED, NULL); 489 sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status); 490 ret = 0; 491 } 492 break; 493 case CCW_CMD_WRITE_STATUS: 494 if (check_len) { 495 if (ccw.count != sizeof(status)) { 496 ret = -EINVAL; 497 break; 498 } 499 } else if (ccw.count < sizeof(status)) { 500 /* Can't execute command. */ 501 ret = -EINVAL; 502 break; 503 } 504 if (!ccw.cda) { 505 ret = -EFAULT; 506 } else { 507 ccw_dstream_read(&sch->cds, status); 508 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { 509 virtio_ccw_stop_ioeventfd(dev); 510 } 511 if (virtio_set_status(vdev, status) == 0) { 512 if (vdev->status == 0) { 513 virtio_ccw_reset_virtio(dev, vdev); 514 } 515 if (status & VIRTIO_CONFIG_S_DRIVER_OK) { 516 virtio_ccw_start_ioeventfd(dev); 517 } 518 sch->curr_status.scsw.count = ccw.count - sizeof(status); 519 ret = 0; 520 } else { 521 /* Trigger a command reject. */ 522 ret = -ENOSYS; 523 } 524 } 525 break; 526 case CCW_CMD_SET_IND: 527 if (check_len) { 528 if (ccw.count != sizeof(indicators)) { 529 ret = -EINVAL; 530 break; 531 } 532 } else if (ccw.count < sizeof(indicators)) { 533 /* Can't execute command. */ 534 ret = -EINVAL; 535 break; 536 } 537 if (sch->thinint_active) { 538 /* Trigger a command reject. */ 539 ret = -ENOSYS; 540 break; 541 } 542 if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) { 543 /* More queues than indicator bits --> trigger a reject */ 544 ret = -ENOSYS; 545 break; 546 } 547 if (!ccw.cda) { 548 ret = -EFAULT; 549 } else { 550 ccw_dstream_read(&sch->cds, indicators); 551 indicators = be64_to_cpu(indicators); 552 dev->indicators = get_indicator(indicators, sizeof(uint64_t)); 553 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 554 ret = 0; 555 } 556 break; 557 case CCW_CMD_SET_CONF_IND: 558 if (check_len) { 559 if (ccw.count != sizeof(indicators)) { 560 ret = -EINVAL; 561 break; 562 } 563 } else if (ccw.count < sizeof(indicators)) { 564 /* Can't execute command. */ 565 ret = -EINVAL; 566 break; 567 } 568 if (!ccw.cda) { 569 ret = -EFAULT; 570 } else { 571 ccw_dstream_read(&sch->cds, indicators); 572 indicators = be64_to_cpu(indicators); 573 dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); 574 sch->curr_status.scsw.count = ccw.count - sizeof(indicators); 575 ret = 0; 576 } 577 break; 578 case CCW_CMD_READ_VQ_CONF: 579 if (check_len) { 580 if (ccw.count != sizeof(vq_config)) { 581 ret = -EINVAL; 582 break; 583 } 584 } else if (ccw.count < sizeof(vq_config)) { 585 /* Can't execute command. */ 586 ret = -EINVAL; 587 break; 588 } 589 if (!ccw.cda) { 590 ret = -EFAULT; 591 } else { 592 ccw_dstream_read(&sch->cds, vq_config.index); 593 vq_config.index = be16_to_cpu(vq_config.index); 594 if (vq_config.index >= VIRTIO_QUEUE_MAX) { 595 ret = -EINVAL; 596 break; 597 } 598 vq_config.num_max = virtio_queue_get_num(vdev, 599 vq_config.index); 600 vq_config.num_max = cpu_to_be16(vq_config.num_max); 601 ccw_dstream_write(&sch->cds, vq_config.num_max); 602 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); 603 ret = 0; 604 } 605 break; 606 case CCW_CMD_SET_IND_ADAPTER: 607 if (check_len) { 608 if (ccw.count != sizeof(thinint)) { 609 ret = -EINVAL; 610 break; 611 } 612 } else if (ccw.count < sizeof(thinint)) { 613 /* Can't execute command. */ 614 ret = -EINVAL; 615 break; 616 } 617 if (!ccw.cda) { 618 ret = -EFAULT; 619 } else if (dev->indicators && !sch->thinint_active) { 620 /* Trigger a command reject. */ 621 ret = -ENOSYS; 622 } else { 623 if (ccw_dstream_read(&sch->cds, thinint)) { 624 ret = -EFAULT; 625 } else { 626 thinint.ind_bit = be64_to_cpu(thinint.ind_bit); 627 thinint.summary_indicator = 628 be64_to_cpu(thinint.summary_indicator); 629 thinint.device_indicator = 630 be64_to_cpu(thinint.device_indicator); 631 632 dev->summary_indicator = 633 get_indicator(thinint.summary_indicator, sizeof(uint8_t)); 634 dev->indicators = 635 get_indicator(thinint.device_indicator, 636 thinint.ind_bit / 8 + 1); 637 dev->thinint_isc = thinint.isc; 638 dev->routes.adapter.ind_offset = thinint.ind_bit; 639 dev->routes.adapter.summary_offset = 7; 640 dev->routes.adapter.adapter_id = css_get_adapter_id( 641 CSS_IO_ADAPTER_VIRTIO, 642 dev->thinint_isc); 643 sch->thinint_active = ((dev->indicators != NULL) && 644 (dev->summary_indicator != NULL)); 645 sch->curr_status.scsw.count = ccw.count - sizeof(thinint); 646 ret = 0; 647 } 648 } 649 break; 650 case CCW_CMD_SET_VIRTIO_REV: 651 len = sizeof(revinfo); 652 if (ccw.count < len) { 653 ret = -EINVAL; 654 break; 655 } 656 if (!ccw.cda) { 657 ret = -EFAULT; 658 break; 659 } 660 ccw_dstream_read_buf(&sch->cds, &revinfo, 4); 661 revinfo.revision = be16_to_cpu(revinfo.revision); 662 revinfo.length = be16_to_cpu(revinfo.length); 663 if (ccw.count < len + revinfo.length || 664 (check_len && ccw.count > len + revinfo.length)) { 665 ret = -EINVAL; 666 break; 667 } 668 /* 669 * Once we start to support revisions with additional data, we'll 670 * need to fetch it here. Nothing to do for now, though. 671 */ 672 if (dev->revision >= 0 || 673 revinfo.revision > virtio_ccw_rev_max(dev) || 674 (dev->force_revision_1 && !revinfo.revision)) { 675 ret = -ENOSYS; 676 break; 677 } 678 ret = 0; 679 dev->revision = revinfo.revision; 680 break; 681 default: 682 ret = -ENOSYS; 683 break; 684 } 685 return ret; 686 } 687 688 static void virtio_sch_disable_cb(SubchDev *sch) 689 { 690 VirtioCcwDevice *dev = sch->driver_data; 691 692 dev->revision = -1; 693 } 694 695 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) 696 { 697 VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 698 CcwDevice *ccw_dev = CCW_DEVICE(dev); 699 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); 700 SubchDev *sch; 701 Error *err = NULL; 702 703 sch = css_create_sch(ccw_dev->devno, errp); 704 if (!sch) { 705 return; 706 } 707 if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) { 708 error_setg(&err, "Invalid value of property max_rev " 709 "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); 710 goto out_err; 711 } 712 713 sch->driver_data = dev; 714 sch->ccw_cb = virtio_ccw_cb; 715 sch->disable_cb = virtio_sch_disable_cb; 716 sch->id.reserved = 0xff; 717 sch->id.cu_type = VIRTIO_CCW_CU_TYPE; 718 sch->do_subchannel_work = do_subchannel_work_virtual; 719 ccw_dev->sch = sch; 720 dev->indicators = NULL; 721 dev->revision = -1; 722 css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); 723 724 trace_virtio_ccw_new_device( 725 sch->cssid, sch->ssid, sch->schid, sch->devno, 726 ccw_dev->devno.valid ? "user-configured" : "auto-configured"); 727 728 if (kvm_enabled() && !kvm_eventfds_enabled()) { 729 dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; 730 } 731 732 if (k->realize) { 733 k->realize(dev, &err); 734 if (err) { 735 goto out_err; 736 } 737 } 738 739 ck->realize(ccw_dev, &err); 740 if (err) { 741 goto out_err; 742 } 743 744 return; 745 746 out_err: 747 error_propagate(errp, err); 748 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 749 ccw_dev->sch = NULL; 750 g_free(sch); 751 } 752 753 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev, Error **errp) 754 { 755 VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 756 CcwDevice *ccw_dev = CCW_DEVICE(dev); 757 SubchDev *sch = ccw_dev->sch; 758 759 if (dc->unrealize) { 760 dc->unrealize(dev, errp); 761 } 762 763 if (sch) { 764 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); 765 g_free(sch); 766 ccw_dev->sch = NULL; 767 } 768 if (dev->indicators) { 769 release_indicator(&dev->routes.adapter, dev->indicators); 770 dev->indicators = NULL; 771 } 772 } 773 774 /* DeviceState to VirtioCcwDevice. Note: used on datapath, 775 * be careful and test performance if you change this. 776 */ 777 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d) 778 { 779 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 780 781 return container_of(ccw_dev, VirtioCcwDevice, parent_obj); 782 } 783 784 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, 785 uint8_t to_be_set) 786 { 787 uint8_t ind_old, ind_new; 788 hwaddr len = 1; 789 uint8_t *ind_addr; 790 791 ind_addr = cpu_physical_memory_map(ind_loc, &len, 1); 792 if (!ind_addr) { 793 error_report("%s(%x.%x.%04x): unable to access indicator", 794 __func__, sch->cssid, sch->ssid, sch->schid); 795 return -1; 796 } 797 do { 798 ind_old = *ind_addr; 799 ind_new = ind_old | to_be_set; 800 } while (atomic_cmpxchg(ind_addr, ind_old, ind_new) != ind_old); 801 trace_virtio_ccw_set_ind(ind_loc, ind_old, ind_new); 802 cpu_physical_memory_unmap(ind_addr, len, 1, len); 803 804 return ind_old; 805 } 806 807 static void virtio_ccw_notify(DeviceState *d, uint16_t vector) 808 { 809 VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); 810 CcwDevice *ccw_dev = to_ccw_dev_fast(d); 811 SubchDev *sch = ccw_dev->sch; 812 uint64_t indicators; 813 814 if (vector == VIRTIO_NO_VECTOR) { 815 return; 816 } 817 /* 818 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue 819 * vector == VIRTIO_QUEUE_MAX: configuration change notification 820 * bits beyond that are unused and should never be notified for 821 */ 822 assert(vector <= VIRTIO_QUEUE_MAX); 823 824 if (vector < VIRTIO_QUEUE_MAX) { 825 if (!dev->indicators) { 826 return; 827 } 828 if (sch->thinint_active) { 829 /* 830 * In the adapter interrupt case, indicators points to a 831 * memory area that may be (way) larger than 64 bit and 832 * ind_bit indicates the start of the indicators in a big 833 * endian notation. 834 */ 835 uint64_t ind_bit = dev->routes.adapter.ind_offset; 836 837 virtio_set_ind_atomic(sch, dev->indicators->addr + 838 (ind_bit + vector) / 8, 839 0x80 >> ((ind_bit + vector) % 8)); 840 if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr, 841 0x01)) { 842 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc); 843 } 844 } else { 845 assert(vector < NR_CLASSIC_INDICATOR_BITS); 846 indicators = address_space_ldq(&address_space_memory, 847 dev->indicators->addr, 848 MEMTXATTRS_UNSPECIFIED, 849 NULL); 850 indicators |= 1ULL << vector; 851 address_space_stq(&address_space_memory, dev->indicators->addr, 852 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 853 css_conditional_io_interrupt(sch); 854 } 855 } else { 856 if (!dev->indicators2) { 857 return; 858 } 859 indicators = address_space_ldq(&address_space_memory, 860 dev->indicators2->addr, 861 MEMTXATTRS_UNSPECIFIED, 862 NULL); 863 indicators |= 1ULL; 864 address_space_stq(&address_space_memory, dev->indicators2->addr, 865 indicators, MEMTXATTRS_UNSPECIFIED, NULL); 866 css_conditional_io_interrupt(sch); 867 } 868 } 869 870 static void virtio_ccw_reset(DeviceState *d) 871 { 872 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 873 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 874 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); 875 876 virtio_ccw_reset_virtio(dev, vdev); 877 if (vdc->parent_reset) { 878 vdc->parent_reset(d); 879 } 880 } 881 882 static void virtio_ccw_vmstate_change(DeviceState *d, bool running) 883 { 884 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 885 886 if (running) { 887 virtio_ccw_start_ioeventfd(dev); 888 } else { 889 virtio_ccw_stop_ioeventfd(dev); 890 } 891 } 892 893 static bool virtio_ccw_query_guest_notifiers(DeviceState *d) 894 { 895 CcwDevice *dev = CCW_DEVICE(d); 896 897 return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); 898 } 899 900 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) 901 { 902 int r; 903 CcwDevice *ccw_dev = CCW_DEVICE(dev); 904 905 if (!ccw_dev->sch->thinint_active) { 906 return -EINVAL; 907 } 908 909 r = map_indicator(&dev->routes.adapter, dev->summary_indicator); 910 if (r) { 911 return r; 912 } 913 r = map_indicator(&dev->routes.adapter, dev->indicators); 914 if (r) { 915 return r; 916 } 917 dev->routes.adapter.summary_addr = dev->summary_indicator->map; 918 dev->routes.adapter.ind_addr = dev->indicators->map; 919 920 return 0; 921 } 922 923 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs) 924 { 925 int i; 926 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 927 int ret; 928 S390FLICState *fs = s390_get_flic(); 929 S390FLICStateClass *fsc = s390_get_flic_class(fs); 930 931 ret = virtio_ccw_get_mappings(dev); 932 if (ret) { 933 return ret; 934 } 935 for (i = 0; i < nvqs; i++) { 936 if (!virtio_queue_get_num(vdev, i)) { 937 break; 938 } 939 } 940 dev->routes.num_routes = i; 941 return fsc->add_adapter_routes(fs, &dev->routes); 942 } 943 944 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs) 945 { 946 S390FLICState *fs = s390_get_flic(); 947 S390FLICStateClass *fsc = s390_get_flic_class(fs); 948 949 fsc->release_adapter_routes(fs, &dev->routes); 950 } 951 952 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n) 953 { 954 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 955 VirtQueue *vq = virtio_get_queue(vdev, n); 956 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 957 958 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL, 959 dev->routes.gsi[n]); 960 } 961 962 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n) 963 { 964 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 965 VirtQueue *vq = virtio_get_queue(vdev, n); 966 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 967 int ret; 968 969 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier, 970 dev->routes.gsi[n]); 971 assert(ret == 0); 972 } 973 974 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n, 975 bool assign, bool with_irqfd) 976 { 977 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 978 VirtQueue *vq = virtio_get_queue(vdev, n); 979 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 980 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 981 982 if (assign) { 983 int r = event_notifier_init(notifier, 0); 984 985 if (r < 0) { 986 return r; 987 } 988 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 989 if (with_irqfd) { 990 r = virtio_ccw_add_irqfd(dev, n); 991 if (r) { 992 virtio_queue_set_guest_notifier_fd_handler(vq, false, 993 with_irqfd); 994 return r; 995 } 996 } 997 /* 998 * We do not support individual masking for channel devices, so we 999 * need to manually trigger any guest masking callbacks here. 1000 */ 1001 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 1002 k->guest_notifier_mask(vdev, n, false); 1003 } 1004 /* get lost events and re-inject */ 1005 if (k->guest_notifier_pending && 1006 k->guest_notifier_pending(vdev, n)) { 1007 event_notifier_set(notifier); 1008 } 1009 } else { 1010 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { 1011 k->guest_notifier_mask(vdev, n, true); 1012 } 1013 if (with_irqfd) { 1014 virtio_ccw_remove_irqfd(dev, n); 1015 } 1016 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 1017 event_notifier_cleanup(notifier); 1018 } 1019 return 0; 1020 } 1021 1022 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs, 1023 bool assigned) 1024 { 1025 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1026 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1027 CcwDevice *ccw_dev = CCW_DEVICE(d); 1028 bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled(); 1029 int r, n; 1030 1031 if (with_irqfd && assigned) { 1032 /* irq routes need to be set up before assigning irqfds */ 1033 r = virtio_ccw_setup_irqroutes(dev, nvqs); 1034 if (r < 0) { 1035 goto irqroute_error; 1036 } 1037 } 1038 for (n = 0; n < nvqs; n++) { 1039 if (!virtio_queue_get_num(vdev, n)) { 1040 break; 1041 } 1042 r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd); 1043 if (r < 0) { 1044 goto assign_error; 1045 } 1046 } 1047 if (with_irqfd && !assigned) { 1048 /* release irq routes after irqfds have been released */ 1049 virtio_ccw_release_irqroutes(dev, nvqs); 1050 } 1051 return 0; 1052 1053 assign_error: 1054 while (--n >= 0) { 1055 virtio_ccw_set_guest_notifier(dev, n, !assigned, false); 1056 } 1057 irqroute_error: 1058 if (with_irqfd && assigned) { 1059 virtio_ccw_release_irqroutes(dev, nvqs); 1060 } 1061 return r; 1062 } 1063 1064 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f) 1065 { 1066 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1067 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1068 1069 qemu_put_be16(f, virtio_queue_vector(vdev, n)); 1070 } 1071 1072 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) 1073 { 1074 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1075 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1076 uint16_t vector; 1077 1078 qemu_get_be16s(f, &vector); 1079 virtio_queue_set_vector(vdev, n , vector); 1080 1081 return 0; 1082 } 1083 1084 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) 1085 { 1086 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1087 vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); 1088 } 1089 1090 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) 1091 { 1092 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1093 return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); 1094 } 1095 1096 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) 1097 { 1098 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1099 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1100 1101 if (dev->max_rev >= 1) { 1102 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 1103 } 1104 } 1105 1106 /* This is called by virtio-bus just after the device is plugged. */ 1107 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) 1108 { 1109 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1110 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); 1111 CcwDevice *ccw_dev = CCW_DEVICE(d); 1112 SubchDev *sch = ccw_dev->sch; 1113 int n = virtio_get_num_queues(vdev); 1114 S390FLICState *flic = s390_get_flic(); 1115 1116 if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { 1117 dev->max_rev = 0; 1118 } 1119 1120 if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) { 1121 error_setg(errp, "The number of virtqueues %d " 1122 "exceeds virtio limit %d", n, 1123 VIRTIO_QUEUE_MAX); 1124 return; 1125 } 1126 if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) { 1127 error_setg(errp, "The number of virtqueues %d " 1128 "exceeds flic adapter route limit %d", n, 1129 flic->adapter_routes_max_batch); 1130 return; 1131 } 1132 1133 sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus); 1134 1135 1136 css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1137 d->hotplugged, 1); 1138 } 1139 1140 static void virtio_ccw_device_unplugged(DeviceState *d) 1141 { 1142 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); 1143 1144 virtio_ccw_stop_ioeventfd(dev); 1145 } 1146 /**************** Virtio-ccw Bus Device Descriptions *******************/ 1147 1148 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp) 1149 { 1150 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1151 1152 virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev); 1153 virtio_ccw_device_realize(_dev, errp); 1154 } 1155 1156 static void virtio_ccw_busdev_unrealize(DeviceState *dev, Error **errp) 1157 { 1158 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; 1159 1160 virtio_ccw_device_unrealize(_dev, errp); 1161 } 1162 1163 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, 1164 DeviceState *dev, Error **errp) 1165 { 1166 VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev); 1167 1168 virtio_ccw_stop_ioeventfd(_dev); 1169 } 1170 1171 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) 1172 { 1173 DeviceClass *dc = DEVICE_CLASS(klass); 1174 CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); 1175 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass); 1176 1177 k->unplug = virtio_ccw_busdev_unplug; 1178 dc->realize = virtio_ccw_busdev_realize; 1179 dc->unrealize = virtio_ccw_busdev_unrealize; 1180 dc->bus_type = TYPE_VIRTUAL_CSS_BUS; 1181 device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset); 1182 } 1183 1184 static const TypeInfo virtio_ccw_device_info = { 1185 .name = TYPE_VIRTIO_CCW_DEVICE, 1186 .parent = TYPE_CCW_DEVICE, 1187 .instance_size = sizeof(VirtioCcwDevice), 1188 .class_init = virtio_ccw_device_class_init, 1189 .class_size = sizeof(VirtIOCCWDeviceClass), 1190 .abstract = true, 1191 }; 1192 1193 /* virtio-ccw-bus */ 1194 1195 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, 1196 VirtioCcwDevice *dev) 1197 { 1198 DeviceState *qdev = DEVICE(dev); 1199 char virtio_bus_name[] = "virtio-bus"; 1200 1201 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS, 1202 qdev, virtio_bus_name); 1203 } 1204 1205 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) 1206 { 1207 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 1208 BusClass *bus_class = BUS_CLASS(klass); 1209 1210 bus_class->max_dev = 1; 1211 k->notify = virtio_ccw_notify; 1212 k->vmstate_change = virtio_ccw_vmstate_change; 1213 k->query_guest_notifiers = virtio_ccw_query_guest_notifiers; 1214 k->set_guest_notifiers = virtio_ccw_set_guest_notifiers; 1215 k->save_queue = virtio_ccw_save_queue; 1216 k->load_queue = virtio_ccw_load_queue; 1217 k->save_config = virtio_ccw_save_config; 1218 k->load_config = virtio_ccw_load_config; 1219 k->pre_plugged = virtio_ccw_pre_plugged; 1220 k->device_plugged = virtio_ccw_device_plugged; 1221 k->device_unplugged = virtio_ccw_device_unplugged; 1222 k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled; 1223 k->ioeventfd_assign = virtio_ccw_ioeventfd_assign; 1224 } 1225 1226 static const TypeInfo virtio_ccw_bus_info = { 1227 .name = TYPE_VIRTIO_CCW_BUS, 1228 .parent = TYPE_VIRTIO_BUS, 1229 .instance_size = sizeof(VirtioCcwBusState), 1230 .class_init = virtio_ccw_bus_class_init, 1231 }; 1232 1233 static void virtio_ccw_register(void) 1234 { 1235 type_register_static(&virtio_ccw_bus_info); 1236 type_register_static(&virtio_ccw_device_info); 1237 } 1238 1239 type_init(virtio_ccw_register) 1240