1 /* 2 * Virtio MMIO bindings 3 * 4 * Copyright (c) 2011 Linaro Limited 5 * 6 * Author: 7 * Peter Maydell <peter.maydell@linaro.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "standard-headers/linux/virtio_mmio.h" 24 #include "hw/irq.h" 25 #include "hw/qdev-properties.h" 26 #include "hw/sysbus.h" 27 #include "hw/virtio/virtio.h" 28 #include "migration/qemu-file-types.h" 29 #include "qemu/host-utils.h" 30 #include "qemu/module.h" 31 #include "sysemu/kvm.h" 32 #include "sysemu/replay.h" 33 #include "hw/virtio/virtio-mmio.h" 34 #include "qemu/error-report.h" 35 #include "qemu/log.h" 36 #include "trace.h" 37 38 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) 39 { 40 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 41 42 return (proxy->flags & VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD) != 0; 43 } 44 45 static int virtio_mmio_ioeventfd_assign(DeviceState *d, 46 EventNotifier *notifier, 47 int n, bool assign) 48 { 49 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 50 51 if (assign) { 52 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, 53 true, n, notifier); 54 } else { 55 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, 56 true, n, notifier); 57 } 58 return 0; 59 } 60 61 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy) 62 { 63 virtio_bus_start_ioeventfd(&proxy->bus); 64 } 65 66 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy) 67 { 68 virtio_bus_stop_ioeventfd(&proxy->bus); 69 } 70 71 static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy) 72 { 73 int i; 74 75 virtio_bus_reset(&proxy->bus); 76 77 if (!proxy->legacy) { 78 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 79 proxy->vqs[i].enabled = 0; 80 } 81 } 82 } 83 84 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) 85 { 86 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; 87 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 88 89 trace_virtio_mmio_read(offset); 90 91 if (!vdev) { 92 /* If no backend is present, we treat most registers as 93 * read-as-zero, except for the magic number, version and 94 * vendor ID. This is not strictly sanctioned by the virtio 95 * spec, but it allows us to provide transports with no backend 96 * plugged in which don't confuse Linux's virtio code: the 97 * probe won't complain about the bad magic number, but the 98 * device ID of zero means no backend will claim it. 99 */ 100 switch (offset) { 101 case VIRTIO_MMIO_MAGIC_VALUE: 102 return VIRT_MAGIC; 103 case VIRTIO_MMIO_VERSION: 104 if (proxy->legacy) { 105 return VIRT_VERSION_LEGACY; 106 } else { 107 return VIRT_VERSION; 108 } 109 case VIRTIO_MMIO_VENDOR_ID: 110 return VIRT_VENDOR; 111 default: 112 return 0; 113 } 114 } 115 116 if (offset >= VIRTIO_MMIO_CONFIG) { 117 offset -= VIRTIO_MMIO_CONFIG; 118 if (proxy->legacy) { 119 switch (size) { 120 case 1: 121 return virtio_config_readb(vdev, offset); 122 case 2: 123 return virtio_config_readw(vdev, offset); 124 case 4: 125 return virtio_config_readl(vdev, offset); 126 default: 127 abort(); 128 } 129 } else { 130 switch (size) { 131 case 1: 132 return virtio_config_modern_readb(vdev, offset); 133 case 2: 134 return virtio_config_modern_readw(vdev, offset); 135 case 4: 136 return virtio_config_modern_readl(vdev, offset); 137 default: 138 abort(); 139 } 140 } 141 } 142 if (size != 4) { 143 qemu_log_mask(LOG_GUEST_ERROR, 144 "%s: wrong size access to register!\n", 145 __func__); 146 return 0; 147 } 148 switch (offset) { 149 case VIRTIO_MMIO_MAGIC_VALUE: 150 return VIRT_MAGIC; 151 case VIRTIO_MMIO_VERSION: 152 if (proxy->legacy) { 153 return VIRT_VERSION_LEGACY; 154 } else { 155 return VIRT_VERSION; 156 } 157 case VIRTIO_MMIO_DEVICE_ID: 158 return vdev->device_id; 159 case VIRTIO_MMIO_VENDOR_ID: 160 return VIRT_VENDOR; 161 case VIRTIO_MMIO_DEVICE_FEATURES: 162 if (proxy->legacy) { 163 if (proxy->host_features_sel) { 164 return 0; 165 } else { 166 return vdev->host_features; 167 } 168 } else { 169 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 170 return (vdev->host_features & ~vdc->legacy_features) 171 >> (32 * proxy->host_features_sel); 172 } 173 case VIRTIO_MMIO_QUEUE_NUM_MAX: 174 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) { 175 return 0; 176 } 177 return VIRTQUEUE_MAX_SIZE; 178 case VIRTIO_MMIO_QUEUE_PFN: 179 if (!proxy->legacy) { 180 qemu_log_mask(LOG_GUEST_ERROR, 181 "%s: read from legacy register (0x%" 182 HWADDR_PRIx ") in non-legacy mode\n", 183 __func__, offset); 184 return 0; 185 } 186 return virtio_queue_get_addr(vdev, vdev->queue_sel) 187 >> proxy->guest_page_shift; 188 case VIRTIO_MMIO_QUEUE_READY: 189 if (proxy->legacy) { 190 qemu_log_mask(LOG_GUEST_ERROR, 191 "%s: read from non-legacy register (0x%" 192 HWADDR_PRIx ") in legacy mode\n", 193 __func__, offset); 194 return 0; 195 } 196 return proxy->vqs[vdev->queue_sel].enabled; 197 case VIRTIO_MMIO_INTERRUPT_STATUS: 198 return qatomic_read(&vdev->isr); 199 case VIRTIO_MMIO_STATUS: 200 return vdev->status; 201 case VIRTIO_MMIO_CONFIG_GENERATION: 202 if (proxy->legacy) { 203 qemu_log_mask(LOG_GUEST_ERROR, 204 "%s: read from non-legacy register (0x%" 205 HWADDR_PRIx ") in legacy mode\n", 206 __func__, offset); 207 return 0; 208 } 209 return vdev->generation; 210 case VIRTIO_MMIO_SHM_LEN_LOW: 211 case VIRTIO_MMIO_SHM_LEN_HIGH: 212 /* 213 * VIRTIO_MMIO_SHM_SEL is unimplemented 214 * according to the linux driver, if region length is -1 215 * the shared memory doesn't exist 216 */ 217 return -1; 218 case VIRTIO_MMIO_DEVICE_FEATURES_SEL: 219 case VIRTIO_MMIO_DRIVER_FEATURES: 220 case VIRTIO_MMIO_DRIVER_FEATURES_SEL: 221 case VIRTIO_MMIO_GUEST_PAGE_SIZE: 222 case VIRTIO_MMIO_QUEUE_SEL: 223 case VIRTIO_MMIO_QUEUE_NUM: 224 case VIRTIO_MMIO_QUEUE_ALIGN: 225 case VIRTIO_MMIO_QUEUE_NOTIFY: 226 case VIRTIO_MMIO_INTERRUPT_ACK: 227 case VIRTIO_MMIO_QUEUE_DESC_LOW: 228 case VIRTIO_MMIO_QUEUE_DESC_HIGH: 229 case VIRTIO_MMIO_QUEUE_AVAIL_LOW: 230 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: 231 case VIRTIO_MMIO_QUEUE_USED_LOW: 232 case VIRTIO_MMIO_QUEUE_USED_HIGH: 233 qemu_log_mask(LOG_GUEST_ERROR, 234 "%s: read of write-only register (0x%" HWADDR_PRIx ")\n", 235 __func__, offset); 236 return 0; 237 default: 238 qemu_log_mask(LOG_GUEST_ERROR, 239 "%s: bad register offset (0x%" HWADDR_PRIx ")\n", 240 __func__, offset); 241 return 0; 242 } 243 return 0; 244 } 245 246 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, 247 unsigned size) 248 { 249 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; 250 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 251 uint16_t vq_idx; 252 253 trace_virtio_mmio_write_offset(offset, value); 254 255 if (!vdev) { 256 /* If no backend is present, we just make all registers 257 * write-ignored. This allows us to provide transports with 258 * no backend plugged in. 259 */ 260 return; 261 } 262 263 if (offset >= VIRTIO_MMIO_CONFIG) { 264 offset -= VIRTIO_MMIO_CONFIG; 265 if (proxy->legacy) { 266 switch (size) { 267 case 1: 268 virtio_config_writeb(vdev, offset, value); 269 break; 270 case 2: 271 virtio_config_writew(vdev, offset, value); 272 break; 273 case 4: 274 virtio_config_writel(vdev, offset, value); 275 break; 276 default: 277 abort(); 278 } 279 return; 280 } else { 281 switch (size) { 282 case 1: 283 virtio_config_modern_writeb(vdev, offset, value); 284 break; 285 case 2: 286 virtio_config_modern_writew(vdev, offset, value); 287 break; 288 case 4: 289 virtio_config_modern_writel(vdev, offset, value); 290 break; 291 default: 292 abort(); 293 } 294 return; 295 } 296 } 297 if (size != 4) { 298 qemu_log_mask(LOG_GUEST_ERROR, 299 "%s: wrong size access to register!\n", 300 __func__); 301 return; 302 } 303 switch (offset) { 304 case VIRTIO_MMIO_DEVICE_FEATURES_SEL: 305 if (value) { 306 proxy->host_features_sel = 1; 307 } else { 308 proxy->host_features_sel = 0; 309 } 310 break; 311 case VIRTIO_MMIO_DRIVER_FEATURES: 312 if (proxy->legacy) { 313 if (proxy->guest_features_sel) { 314 qemu_log_mask(LOG_GUEST_ERROR, 315 "%s: attempt to write guest features with " 316 "guest_features_sel > 0 in legacy mode\n", 317 __func__); 318 } else { 319 virtio_set_features(vdev, value); 320 } 321 } else { 322 proxy->guest_features[proxy->guest_features_sel] = value; 323 } 324 break; 325 case VIRTIO_MMIO_DRIVER_FEATURES_SEL: 326 if (value) { 327 proxy->guest_features_sel = 1; 328 } else { 329 proxy->guest_features_sel = 0; 330 } 331 break; 332 case VIRTIO_MMIO_GUEST_PAGE_SIZE: 333 if (!proxy->legacy) { 334 qemu_log_mask(LOG_GUEST_ERROR, 335 "%s: write to legacy register (0x%" 336 HWADDR_PRIx ") in non-legacy mode\n", 337 __func__, offset); 338 return; 339 } 340 proxy->guest_page_shift = ctz32(value); 341 if (proxy->guest_page_shift > 31) { 342 proxy->guest_page_shift = 0; 343 } 344 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift); 345 break; 346 case VIRTIO_MMIO_QUEUE_SEL: 347 if (value < VIRTIO_QUEUE_MAX) { 348 vdev->queue_sel = value; 349 } 350 break; 351 case VIRTIO_MMIO_QUEUE_NUM: 352 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE); 353 virtio_queue_set_num(vdev, vdev->queue_sel, value); 354 355 if (proxy->legacy) { 356 virtio_queue_update_rings(vdev, vdev->queue_sel); 357 } else { 358 virtio_init_region_cache(vdev, vdev->queue_sel); 359 proxy->vqs[vdev->queue_sel].num = value; 360 } 361 break; 362 case VIRTIO_MMIO_QUEUE_ALIGN: 363 if (!proxy->legacy) { 364 qemu_log_mask(LOG_GUEST_ERROR, 365 "%s: write to legacy register (0x%" 366 HWADDR_PRIx ") in non-legacy mode\n", 367 __func__, offset); 368 return; 369 } 370 virtio_queue_set_align(vdev, vdev->queue_sel, value); 371 break; 372 case VIRTIO_MMIO_QUEUE_PFN: 373 if (!proxy->legacy) { 374 qemu_log_mask(LOG_GUEST_ERROR, 375 "%s: write to legacy register (0x%" 376 HWADDR_PRIx ") in non-legacy mode\n", 377 __func__, offset); 378 return; 379 } 380 if (value == 0) { 381 virtio_mmio_soft_reset(proxy); 382 } else { 383 virtio_queue_set_addr(vdev, vdev->queue_sel, 384 value << proxy->guest_page_shift); 385 } 386 break; 387 case VIRTIO_MMIO_QUEUE_READY: 388 if (proxy->legacy) { 389 qemu_log_mask(LOG_GUEST_ERROR, 390 "%s: write to non-legacy register (0x%" 391 HWADDR_PRIx ") in legacy mode\n", 392 __func__, offset); 393 return; 394 } 395 if (value) { 396 virtio_queue_set_num(vdev, vdev->queue_sel, 397 proxy->vqs[vdev->queue_sel].num); 398 virtio_queue_set_rings(vdev, vdev->queue_sel, 399 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 400 proxy->vqs[vdev->queue_sel].desc[0], 401 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 402 proxy->vqs[vdev->queue_sel].avail[0], 403 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 404 proxy->vqs[vdev->queue_sel].used[0]); 405 proxy->vqs[vdev->queue_sel].enabled = 1; 406 } else { 407 proxy->vqs[vdev->queue_sel].enabled = 0; 408 } 409 break; 410 case VIRTIO_MMIO_QUEUE_NOTIFY: 411 vq_idx = value; 412 if (vq_idx < VIRTIO_QUEUE_MAX && virtio_queue_get_num(vdev, vq_idx)) { 413 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA)) { 414 VirtQueue *vq = virtio_get_queue(vdev, vq_idx); 415 416 virtio_queue_set_shadow_avail_idx(vq, (value >> 16) & 0xFFFF); 417 } 418 virtio_queue_notify(vdev, vq_idx); 419 } 420 break; 421 case VIRTIO_MMIO_INTERRUPT_ACK: 422 qatomic_and(&vdev->isr, ~value); 423 virtio_update_irq(vdev); 424 break; 425 case VIRTIO_MMIO_STATUS: 426 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) { 427 virtio_mmio_stop_ioeventfd(proxy); 428 } 429 430 if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) { 431 virtio_set_features(vdev, 432 ((uint64_t)proxy->guest_features[1]) << 32 | 433 proxy->guest_features[0]); 434 } 435 436 virtio_set_status(vdev, value & 0xff); 437 438 if (value & VIRTIO_CONFIG_S_DRIVER_OK) { 439 virtio_mmio_start_ioeventfd(proxy); 440 } 441 442 if (vdev->status == 0) { 443 virtio_mmio_soft_reset(proxy); 444 } 445 break; 446 case VIRTIO_MMIO_QUEUE_DESC_LOW: 447 if (proxy->legacy) { 448 qemu_log_mask(LOG_GUEST_ERROR, 449 "%s: write to non-legacy register (0x%" 450 HWADDR_PRIx ") in legacy mode\n", 451 __func__, offset); 452 return; 453 } 454 proxy->vqs[vdev->queue_sel].desc[0] = value; 455 break; 456 case VIRTIO_MMIO_QUEUE_DESC_HIGH: 457 if (proxy->legacy) { 458 qemu_log_mask(LOG_GUEST_ERROR, 459 "%s: write to non-legacy register (0x%" 460 HWADDR_PRIx ") in legacy mode\n", 461 __func__, offset); 462 return; 463 } 464 proxy->vqs[vdev->queue_sel].desc[1] = value; 465 break; 466 case VIRTIO_MMIO_QUEUE_AVAIL_LOW: 467 if (proxy->legacy) { 468 qemu_log_mask(LOG_GUEST_ERROR, 469 "%s: write to non-legacy register (0x%" 470 HWADDR_PRIx ") in legacy mode\n", 471 __func__, offset); 472 return; 473 } 474 proxy->vqs[vdev->queue_sel].avail[0] = value; 475 break; 476 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: 477 if (proxy->legacy) { 478 qemu_log_mask(LOG_GUEST_ERROR, 479 "%s: write to non-legacy register (0x%" 480 HWADDR_PRIx ") in legacy mode\n", 481 __func__, offset); 482 return; 483 } 484 proxy->vqs[vdev->queue_sel].avail[1] = value; 485 break; 486 case VIRTIO_MMIO_QUEUE_USED_LOW: 487 if (proxy->legacy) { 488 qemu_log_mask(LOG_GUEST_ERROR, 489 "%s: write to non-legacy register (0x%" 490 HWADDR_PRIx ") in legacy mode\n", 491 __func__, offset); 492 return; 493 } 494 proxy->vqs[vdev->queue_sel].used[0] = value; 495 break; 496 case VIRTIO_MMIO_QUEUE_USED_HIGH: 497 if (proxy->legacy) { 498 qemu_log_mask(LOG_GUEST_ERROR, 499 "%s: write to non-legacy register (0x%" 500 HWADDR_PRIx ") in legacy mode\n", 501 __func__, offset); 502 return; 503 } 504 proxy->vqs[vdev->queue_sel].used[1] = value; 505 break; 506 case VIRTIO_MMIO_MAGIC_VALUE: 507 case VIRTIO_MMIO_VERSION: 508 case VIRTIO_MMIO_DEVICE_ID: 509 case VIRTIO_MMIO_VENDOR_ID: 510 case VIRTIO_MMIO_DEVICE_FEATURES: 511 case VIRTIO_MMIO_QUEUE_NUM_MAX: 512 case VIRTIO_MMIO_INTERRUPT_STATUS: 513 case VIRTIO_MMIO_CONFIG_GENERATION: 514 qemu_log_mask(LOG_GUEST_ERROR, 515 "%s: write to read-only register (0x%" HWADDR_PRIx ")\n", 516 __func__, offset); 517 break; 518 519 default: 520 qemu_log_mask(LOG_GUEST_ERROR, 521 "%s: bad register offset (0x%" HWADDR_PRIx ")\n", 522 __func__, offset); 523 } 524 } 525 526 static const MemoryRegionOps virtio_legacy_mem_ops = { 527 .read = virtio_mmio_read, 528 .write = virtio_mmio_write, 529 .endianness = DEVICE_NATIVE_ENDIAN, 530 }; 531 532 static const MemoryRegionOps virtio_mem_ops = { 533 .read = virtio_mmio_read, 534 .write = virtio_mmio_write, 535 .endianness = DEVICE_LITTLE_ENDIAN, 536 }; 537 538 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) 539 { 540 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 541 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 542 int level; 543 544 if (!vdev) { 545 return; 546 } 547 level = (qatomic_read(&vdev->isr) != 0); 548 trace_virtio_mmio_setting_irq(level); 549 qemu_set_irq(proxy->irq, level); 550 } 551 552 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f) 553 { 554 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 555 556 proxy->host_features_sel = qemu_get_be32(f); 557 proxy->guest_features_sel = qemu_get_be32(f); 558 proxy->guest_page_shift = qemu_get_be32(f); 559 return 0; 560 } 561 562 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f) 563 { 564 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 565 566 qemu_put_be32(f, proxy->host_features_sel); 567 qemu_put_be32(f, proxy->guest_features_sel); 568 qemu_put_be32(f, proxy->guest_page_shift); 569 } 570 571 static const VMStateDescription vmstate_virtio_mmio_queue_state = { 572 .name = "virtio_mmio/queue_state", 573 .version_id = 1, 574 .minimum_version_id = 1, 575 .fields = (const VMStateField[]) { 576 VMSTATE_UINT16(num, VirtIOMMIOQueue), 577 VMSTATE_BOOL(enabled, VirtIOMMIOQueue), 578 VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2), 579 VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2), 580 VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2), 581 VMSTATE_END_OF_LIST() 582 } 583 }; 584 585 static const VMStateDescription vmstate_virtio_mmio_state_sub = { 586 .name = "virtio_mmio/state", 587 .version_id = 1, 588 .minimum_version_id = 1, 589 .fields = (const VMStateField[]) { 590 VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2), 591 VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0, 592 vmstate_virtio_mmio_queue_state, 593 VirtIOMMIOQueue), 594 VMSTATE_END_OF_LIST() 595 } 596 }; 597 598 static const VMStateDescription vmstate_virtio_mmio = { 599 .name = "virtio_mmio", 600 .version_id = 1, 601 .minimum_version_id = 1, 602 .fields = (const VMStateField[]) { 603 VMSTATE_END_OF_LIST() 604 }, 605 .subsections = (const VMStateDescription * const []) { 606 &vmstate_virtio_mmio_state_sub, 607 NULL 608 } 609 }; 610 611 static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f) 612 { 613 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 614 615 vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL); 616 } 617 618 static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f) 619 { 620 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 621 622 return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1); 623 } 624 625 static bool virtio_mmio_has_extra_state(DeviceState *opaque) 626 { 627 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 628 629 return !proxy->legacy; 630 } 631 632 static void virtio_mmio_reset(DeviceState *d) 633 { 634 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 635 int i; 636 637 virtio_mmio_soft_reset(proxy); 638 639 proxy->host_features_sel = 0; 640 proxy->guest_features_sel = 0; 641 proxy->guest_page_shift = 0; 642 643 if (!proxy->legacy) { 644 proxy->guest_features[0] = proxy->guest_features[1] = 0; 645 646 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 647 proxy->vqs[i].num = 0; 648 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; 649 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; 650 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; 651 } 652 } 653 } 654 655 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, 656 bool with_irqfd) 657 { 658 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 659 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 660 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 661 VirtQueue *vq = virtio_get_queue(vdev, n); 662 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 663 664 if (assign) { 665 int r = event_notifier_init(notifier, 0); 666 if (r < 0) { 667 return r; 668 } 669 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 670 } else { 671 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 672 event_notifier_cleanup(notifier); 673 } 674 675 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { 676 vdc->guest_notifier_mask(vdev, n, !assign); 677 } 678 679 return 0; 680 } 681 static int virtio_mmio_set_config_guest_notifier(DeviceState *d, bool assign, 682 bool with_irqfd) 683 { 684 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 685 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 686 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 687 EventNotifier *notifier = virtio_config_get_guest_notifier(vdev); 688 int r = 0; 689 690 if (assign) { 691 r = event_notifier_init(notifier, 0); 692 if (r < 0) { 693 return r; 694 } 695 virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd); 696 } else { 697 virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd); 698 event_notifier_cleanup(notifier); 699 } 700 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { 701 vdc->guest_notifier_mask(vdev, VIRTIO_CONFIG_IRQ_IDX, !assign); 702 } 703 return r; 704 } 705 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs, 706 bool assign) 707 { 708 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 709 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 710 /* TODO: need to check if kvm-arm supports irqfd */ 711 bool with_irqfd = false; 712 int r, n; 713 714 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 715 716 for (n = 0; n < nvqs; n++) { 717 if (!virtio_queue_get_num(vdev, n)) { 718 break; 719 } 720 721 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd); 722 if (r < 0) { 723 goto assign_error; 724 } 725 } 726 r = virtio_mmio_set_config_guest_notifier(d, assign, with_irqfd); 727 if (r < 0) { 728 goto assign_error; 729 } 730 731 return 0; 732 733 assign_error: 734 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 735 assert(assign); 736 while (--n >= 0) { 737 virtio_mmio_set_guest_notifier(d, n, !assign, false); 738 } 739 return r; 740 } 741 742 static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp) 743 { 744 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 745 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 746 747 if (!proxy->legacy) { 748 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 749 } 750 } 751 752 /* virtio-mmio device */ 753 754 static Property virtio_mmio_properties[] = { 755 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy, 756 format_transport_address, true), 757 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true), 758 DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy, flags, 759 VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT, true), 760 DEFINE_PROP_END_OF_LIST(), 761 }; 762 763 static void virtio_mmio_realizefn(DeviceState *d, Error **errp) 764 { 765 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 766 SysBusDevice *sbd = SYS_BUS_DEVICE(d); 767 768 qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL); 769 sysbus_init_irq(sbd, &proxy->irq); 770 771 /* fd-based ioevents can't be synchronized in record/replay */ 772 if (replay_mode != REPLAY_MODE_NONE) { 773 proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD; 774 } 775 776 if (proxy->legacy) { 777 memory_region_init_io(&proxy->iomem, OBJECT(d), 778 &virtio_legacy_mem_ops, proxy, 779 TYPE_VIRTIO_MMIO, 0x200); 780 } else { 781 memory_region_init_io(&proxy->iomem, OBJECT(d), 782 &virtio_mem_ops, proxy, 783 TYPE_VIRTIO_MMIO, 0x200); 784 } 785 sysbus_init_mmio(sbd, &proxy->iomem); 786 } 787 788 static void virtio_mmio_class_init(ObjectClass *klass, void *data) 789 { 790 DeviceClass *dc = DEVICE_CLASS(klass); 791 792 dc->realize = virtio_mmio_realizefn; 793 dc->reset = virtio_mmio_reset; 794 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 795 device_class_set_props(dc, virtio_mmio_properties); 796 } 797 798 static const TypeInfo virtio_mmio_info = { 799 .name = TYPE_VIRTIO_MMIO, 800 .parent = TYPE_SYS_BUS_DEVICE, 801 .instance_size = sizeof(VirtIOMMIOProxy), 802 .class_init = virtio_mmio_class_init, 803 }; 804 805 /* virtio-mmio-bus. */ 806 807 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev) 808 { 809 BusState *virtio_mmio_bus; 810 VirtIOMMIOProxy *virtio_mmio_proxy; 811 char *proxy_path; 812 char *path; 813 MemoryRegionSection section; 814 815 virtio_mmio_bus = qdev_get_parent_bus(dev); 816 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent); 817 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy)); 818 819 /* 820 * If @format_transport_address is false, then we just perform the same as 821 * virtio_bus_get_dev_path(): we delegate the address formatting for the 822 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy 823 * (i.e., the device that implements the virtio-mmio bus) resides on. In 824 * this case the base address of the virtio-mmio transport will be 825 * invisible. 826 */ 827 if (!virtio_mmio_proxy->format_transport_address) { 828 return proxy_path; 829 } 830 831 /* Otherwise, we append the base address of the transport. */ 832 section = memory_region_find(&virtio_mmio_proxy->iomem, 0, 0x200); 833 assert(section.mr); 834 835 if (proxy_path) { 836 path = g_strdup_printf("%s/virtio-mmio@" HWADDR_FMT_plx, proxy_path, 837 section.offset_within_address_space); 838 } else { 839 path = g_strdup_printf("virtio-mmio@" HWADDR_FMT_plx, 840 section.offset_within_address_space); 841 } 842 memory_region_unref(section.mr); 843 844 g_free(proxy_path); 845 return path; 846 } 847 848 static void virtio_mmio_vmstate_change(DeviceState *d, bool running) 849 { 850 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 851 852 if (running) { 853 virtio_mmio_start_ioeventfd(proxy); 854 } else { 855 virtio_mmio_stop_ioeventfd(proxy); 856 } 857 } 858 859 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) 860 { 861 BusClass *bus_class = BUS_CLASS(klass); 862 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 863 864 k->notify = virtio_mmio_update_irq; 865 k->save_config = virtio_mmio_save_config; 866 k->load_config = virtio_mmio_load_config; 867 k->save_extra_state = virtio_mmio_save_extra_state; 868 k->load_extra_state = virtio_mmio_load_extra_state; 869 k->has_extra_state = virtio_mmio_has_extra_state; 870 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers; 871 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled; 872 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; 873 k->pre_plugged = virtio_mmio_pre_plugged; 874 k->vmstate_change = virtio_mmio_vmstate_change; 875 k->has_variable_vring_alignment = true; 876 bus_class->max_dev = 1; 877 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path; 878 } 879 880 static const TypeInfo virtio_mmio_bus_info = { 881 .name = TYPE_VIRTIO_MMIO_BUS, 882 .parent = TYPE_VIRTIO_BUS, 883 .instance_size = sizeof(VirtioBusState), 884 .class_init = virtio_mmio_bus_class_init, 885 }; 886 887 static void virtio_mmio_register_types(void) 888 { 889 type_register_static(&virtio_mmio_bus_info); 890 type_register_static(&virtio_mmio_info); 891 } 892 893 type_init(virtio_mmio_register_types) 894