1 /* 2 * Virtio MMIO bindings 3 * 4 * Copyright (c) 2011 Linaro Limited 5 * 6 * Author: 7 * Peter Maydell <peter.maydell@linaro.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "standard-headers/linux/virtio_mmio.h" 24 #include "hw/irq.h" 25 #include "hw/qdev-properties.h" 26 #include "hw/sysbus.h" 27 #include "hw/virtio/virtio.h" 28 #include "migration/qemu-file-types.h" 29 #include "qemu/host-utils.h" 30 #include "qemu/module.h" 31 #include "sysemu/kvm.h" 32 #include "hw/virtio/virtio-mmio.h" 33 #include "qemu/error-report.h" 34 #include "qemu/log.h" 35 #include "trace.h" 36 37 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) 38 { 39 return kvm_eventfds_enabled(); 40 } 41 42 static int virtio_mmio_ioeventfd_assign(DeviceState *d, 43 EventNotifier *notifier, 44 int n, bool assign) 45 { 46 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 47 48 if (assign) { 49 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, 50 true, n, notifier); 51 } else { 52 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, 53 true, n, notifier); 54 } 55 return 0; 56 } 57 58 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy) 59 { 60 virtio_bus_start_ioeventfd(&proxy->bus); 61 } 62 63 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy) 64 { 65 virtio_bus_stop_ioeventfd(&proxy->bus); 66 } 67 68 static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy) 69 { 70 int i; 71 72 if (proxy->legacy) { 73 return; 74 } 75 76 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 77 proxy->vqs[i].enabled = 0; 78 } 79 } 80 81 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) 82 { 83 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; 84 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 85 86 trace_virtio_mmio_read(offset); 87 88 if (!vdev) { 89 /* If no backend is present, we treat most registers as 90 * read-as-zero, except for the magic number, version and 91 * vendor ID. This is not strictly sanctioned by the virtio 92 * spec, but it allows us to provide transports with no backend 93 * plugged in which don't confuse Linux's virtio code: the 94 * probe won't complain about the bad magic number, but the 95 * device ID of zero means no backend will claim it. 96 */ 97 switch (offset) { 98 case VIRTIO_MMIO_MAGIC_VALUE: 99 return VIRT_MAGIC; 100 case VIRTIO_MMIO_VERSION: 101 if (proxy->legacy) { 102 return VIRT_VERSION_LEGACY; 103 } else { 104 return VIRT_VERSION; 105 } 106 case VIRTIO_MMIO_VENDOR_ID: 107 return VIRT_VENDOR; 108 default: 109 return 0; 110 } 111 } 112 113 if (offset >= VIRTIO_MMIO_CONFIG) { 114 offset -= VIRTIO_MMIO_CONFIG; 115 switch (size) { 116 case 1: 117 return virtio_config_readb(vdev, offset); 118 case 2: 119 return virtio_config_readw(vdev, offset); 120 case 4: 121 return virtio_config_readl(vdev, offset); 122 default: 123 abort(); 124 } 125 } 126 if (size != 4) { 127 qemu_log_mask(LOG_GUEST_ERROR, 128 "%s: wrong size access to register!\n", 129 __func__); 130 return 0; 131 } 132 switch (offset) { 133 case VIRTIO_MMIO_MAGIC_VALUE: 134 return VIRT_MAGIC; 135 case VIRTIO_MMIO_VERSION: 136 if (proxy->legacy) { 137 return VIRT_VERSION_LEGACY; 138 } else { 139 return VIRT_VERSION; 140 } 141 case VIRTIO_MMIO_DEVICE_ID: 142 return vdev->device_id; 143 case VIRTIO_MMIO_VENDOR_ID: 144 return VIRT_VENDOR; 145 case VIRTIO_MMIO_DEVICE_FEATURES: 146 if (proxy->legacy) { 147 if (proxy->host_features_sel) { 148 return 0; 149 } else { 150 return vdev->host_features; 151 } 152 } else { 153 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 154 return (vdev->host_features & ~vdc->legacy_features) 155 >> (32 * proxy->host_features_sel); 156 } 157 case VIRTIO_MMIO_QUEUE_NUM_MAX: 158 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) { 159 return 0; 160 } 161 return VIRTQUEUE_MAX_SIZE; 162 case VIRTIO_MMIO_QUEUE_PFN: 163 if (!proxy->legacy) { 164 qemu_log_mask(LOG_GUEST_ERROR, 165 "%s: read from legacy register (0x%" 166 HWADDR_PRIx ") in non-legacy mode\n", 167 __func__, offset); 168 return 0; 169 } 170 return virtio_queue_get_addr(vdev, vdev->queue_sel) 171 >> proxy->guest_page_shift; 172 case VIRTIO_MMIO_QUEUE_READY: 173 if (proxy->legacy) { 174 qemu_log_mask(LOG_GUEST_ERROR, 175 "%s: read from non-legacy register (0x%" 176 HWADDR_PRIx ") in legacy mode\n", 177 __func__, offset); 178 return 0; 179 } 180 return proxy->vqs[vdev->queue_sel].enabled; 181 case VIRTIO_MMIO_INTERRUPT_STATUS: 182 return qatomic_read(&vdev->isr); 183 case VIRTIO_MMIO_STATUS: 184 return vdev->status; 185 case VIRTIO_MMIO_CONFIG_GENERATION: 186 if (proxy->legacy) { 187 qemu_log_mask(LOG_GUEST_ERROR, 188 "%s: read from non-legacy register (0x%" 189 HWADDR_PRIx ") in legacy mode\n", 190 __func__, offset); 191 return 0; 192 } 193 return vdev->generation; 194 case VIRTIO_MMIO_SHM_LEN_LOW: 195 case VIRTIO_MMIO_SHM_LEN_HIGH: 196 /* 197 * VIRTIO_MMIO_SHM_SEL is unimplemented 198 * according to the linux driver, if region length is -1 199 * the shared memory doesn't exist 200 */ 201 return -1; 202 case VIRTIO_MMIO_DEVICE_FEATURES_SEL: 203 case VIRTIO_MMIO_DRIVER_FEATURES: 204 case VIRTIO_MMIO_DRIVER_FEATURES_SEL: 205 case VIRTIO_MMIO_GUEST_PAGE_SIZE: 206 case VIRTIO_MMIO_QUEUE_SEL: 207 case VIRTIO_MMIO_QUEUE_NUM: 208 case VIRTIO_MMIO_QUEUE_ALIGN: 209 case VIRTIO_MMIO_QUEUE_NOTIFY: 210 case VIRTIO_MMIO_INTERRUPT_ACK: 211 case VIRTIO_MMIO_QUEUE_DESC_LOW: 212 case VIRTIO_MMIO_QUEUE_DESC_HIGH: 213 case VIRTIO_MMIO_QUEUE_AVAIL_LOW: 214 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: 215 case VIRTIO_MMIO_QUEUE_USED_LOW: 216 case VIRTIO_MMIO_QUEUE_USED_HIGH: 217 qemu_log_mask(LOG_GUEST_ERROR, 218 "%s: read of write-only register (0x%" HWADDR_PRIx ")\n", 219 __func__, offset); 220 return 0; 221 default: 222 qemu_log_mask(LOG_GUEST_ERROR, 223 "%s: bad register offset (0x%" HWADDR_PRIx ")\n", 224 __func__, offset); 225 return 0; 226 } 227 return 0; 228 } 229 230 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, 231 unsigned size) 232 { 233 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; 234 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 235 236 trace_virtio_mmio_write_offset(offset, value); 237 238 if (!vdev) { 239 /* If no backend is present, we just make all registers 240 * write-ignored. This allows us to provide transports with 241 * no backend plugged in. 242 */ 243 return; 244 } 245 246 if (offset >= VIRTIO_MMIO_CONFIG) { 247 offset -= VIRTIO_MMIO_CONFIG; 248 switch (size) { 249 case 1: 250 virtio_config_writeb(vdev, offset, value); 251 break; 252 case 2: 253 virtio_config_writew(vdev, offset, value); 254 break; 255 case 4: 256 virtio_config_writel(vdev, offset, value); 257 break; 258 default: 259 abort(); 260 } 261 return; 262 } 263 if (size != 4) { 264 qemu_log_mask(LOG_GUEST_ERROR, 265 "%s: wrong size access to register!\n", 266 __func__); 267 return; 268 } 269 switch (offset) { 270 case VIRTIO_MMIO_DEVICE_FEATURES_SEL: 271 if (value) { 272 proxy->host_features_sel = 1; 273 } else { 274 proxy->host_features_sel = 0; 275 } 276 break; 277 case VIRTIO_MMIO_DRIVER_FEATURES: 278 if (proxy->legacy) { 279 if (proxy->guest_features_sel) { 280 qemu_log_mask(LOG_GUEST_ERROR, 281 "%s: attempt to write guest features with " 282 "guest_features_sel > 0 in legacy mode\n", 283 __func__); 284 } else { 285 virtio_set_features(vdev, value); 286 } 287 } else { 288 proxy->guest_features[proxy->guest_features_sel] = value; 289 } 290 break; 291 case VIRTIO_MMIO_DRIVER_FEATURES_SEL: 292 if (value) { 293 proxy->guest_features_sel = 1; 294 } else { 295 proxy->guest_features_sel = 0; 296 } 297 break; 298 case VIRTIO_MMIO_GUEST_PAGE_SIZE: 299 if (!proxy->legacy) { 300 qemu_log_mask(LOG_GUEST_ERROR, 301 "%s: write to legacy register (0x%" 302 HWADDR_PRIx ") in non-legacy mode\n", 303 __func__, offset); 304 return; 305 } 306 proxy->guest_page_shift = ctz32(value); 307 if (proxy->guest_page_shift > 31) { 308 proxy->guest_page_shift = 0; 309 } 310 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift); 311 break; 312 case VIRTIO_MMIO_QUEUE_SEL: 313 if (value < VIRTIO_QUEUE_MAX) { 314 vdev->queue_sel = value; 315 } 316 break; 317 case VIRTIO_MMIO_QUEUE_NUM: 318 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE); 319 virtio_queue_set_num(vdev, vdev->queue_sel, value); 320 321 if (proxy->legacy) { 322 virtio_queue_update_rings(vdev, vdev->queue_sel); 323 } else { 324 proxy->vqs[vdev->queue_sel].num = value; 325 } 326 break; 327 case VIRTIO_MMIO_QUEUE_ALIGN: 328 if (!proxy->legacy) { 329 qemu_log_mask(LOG_GUEST_ERROR, 330 "%s: write to legacy register (0x%" 331 HWADDR_PRIx ") in non-legacy mode\n", 332 __func__, offset); 333 return; 334 } 335 virtio_queue_set_align(vdev, vdev->queue_sel, value); 336 break; 337 case VIRTIO_MMIO_QUEUE_PFN: 338 if (!proxy->legacy) { 339 qemu_log_mask(LOG_GUEST_ERROR, 340 "%s: write to legacy register (0x%" 341 HWADDR_PRIx ") in non-legacy mode\n", 342 __func__, offset); 343 return; 344 } 345 if (value == 0) { 346 virtio_reset(vdev); 347 } else { 348 virtio_queue_set_addr(vdev, vdev->queue_sel, 349 value << proxy->guest_page_shift); 350 } 351 break; 352 case VIRTIO_MMIO_QUEUE_READY: 353 if (proxy->legacy) { 354 qemu_log_mask(LOG_GUEST_ERROR, 355 "%s: write to non-legacy register (0x%" 356 HWADDR_PRIx ") in legacy mode\n", 357 __func__, offset); 358 return; 359 } 360 if (value) { 361 virtio_queue_set_num(vdev, vdev->queue_sel, 362 proxy->vqs[vdev->queue_sel].num); 363 virtio_queue_set_rings(vdev, vdev->queue_sel, 364 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | 365 proxy->vqs[vdev->queue_sel].desc[0], 366 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | 367 proxy->vqs[vdev->queue_sel].avail[0], 368 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | 369 proxy->vqs[vdev->queue_sel].used[0]); 370 proxy->vqs[vdev->queue_sel].enabled = 1; 371 } else { 372 proxy->vqs[vdev->queue_sel].enabled = 0; 373 } 374 break; 375 case VIRTIO_MMIO_QUEUE_NOTIFY: 376 if (value < VIRTIO_QUEUE_MAX) { 377 virtio_queue_notify(vdev, value); 378 } 379 break; 380 case VIRTIO_MMIO_INTERRUPT_ACK: 381 qatomic_and(&vdev->isr, ~value); 382 virtio_update_irq(vdev); 383 break; 384 case VIRTIO_MMIO_STATUS: 385 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) { 386 virtio_mmio_stop_ioeventfd(proxy); 387 } 388 389 if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) { 390 virtio_set_features(vdev, 391 ((uint64_t)proxy->guest_features[1]) << 32 | 392 proxy->guest_features[0]); 393 } 394 395 virtio_set_status(vdev, value & 0xff); 396 397 if (value & VIRTIO_CONFIG_S_DRIVER_OK) { 398 virtio_mmio_start_ioeventfd(proxy); 399 } 400 401 if (vdev->status == 0) { 402 virtio_reset(vdev); 403 virtio_mmio_soft_reset(proxy); 404 } 405 break; 406 case VIRTIO_MMIO_QUEUE_DESC_LOW: 407 if (proxy->legacy) { 408 qemu_log_mask(LOG_GUEST_ERROR, 409 "%s: write to non-legacy register (0x%" 410 HWADDR_PRIx ") in legacy mode\n", 411 __func__, offset); 412 return; 413 } 414 proxy->vqs[vdev->queue_sel].desc[0] = value; 415 break; 416 case VIRTIO_MMIO_QUEUE_DESC_HIGH: 417 if (proxy->legacy) { 418 qemu_log_mask(LOG_GUEST_ERROR, 419 "%s: write to non-legacy register (0x%" 420 HWADDR_PRIx ") in legacy mode\n", 421 __func__, offset); 422 return; 423 } 424 proxy->vqs[vdev->queue_sel].desc[1] = value; 425 break; 426 case VIRTIO_MMIO_QUEUE_AVAIL_LOW: 427 if (proxy->legacy) { 428 qemu_log_mask(LOG_GUEST_ERROR, 429 "%s: write to non-legacy register (0x%" 430 HWADDR_PRIx ") in legacy mode\n", 431 __func__, offset); 432 return; 433 } 434 proxy->vqs[vdev->queue_sel].avail[0] = value; 435 break; 436 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: 437 if (proxy->legacy) { 438 qemu_log_mask(LOG_GUEST_ERROR, 439 "%s: write to non-legacy register (0x%" 440 HWADDR_PRIx ") in legacy mode\n", 441 __func__, offset); 442 return; 443 } 444 proxy->vqs[vdev->queue_sel].avail[1] = value; 445 break; 446 case VIRTIO_MMIO_QUEUE_USED_LOW: 447 if (proxy->legacy) { 448 qemu_log_mask(LOG_GUEST_ERROR, 449 "%s: write to non-legacy register (0x%" 450 HWADDR_PRIx ") in legacy mode\n", 451 __func__, offset); 452 return; 453 } 454 proxy->vqs[vdev->queue_sel].used[0] = value; 455 break; 456 case VIRTIO_MMIO_QUEUE_USED_HIGH: 457 if (proxy->legacy) { 458 qemu_log_mask(LOG_GUEST_ERROR, 459 "%s: write to non-legacy register (0x%" 460 HWADDR_PRIx ") in legacy mode\n", 461 __func__, offset); 462 return; 463 } 464 proxy->vqs[vdev->queue_sel].used[1] = value; 465 break; 466 case VIRTIO_MMIO_MAGIC_VALUE: 467 case VIRTIO_MMIO_VERSION: 468 case VIRTIO_MMIO_DEVICE_ID: 469 case VIRTIO_MMIO_VENDOR_ID: 470 case VIRTIO_MMIO_DEVICE_FEATURES: 471 case VIRTIO_MMIO_QUEUE_NUM_MAX: 472 case VIRTIO_MMIO_INTERRUPT_STATUS: 473 case VIRTIO_MMIO_CONFIG_GENERATION: 474 qemu_log_mask(LOG_GUEST_ERROR, 475 "%s: write to read-only register (0x%" HWADDR_PRIx ")\n", 476 __func__, offset); 477 break; 478 479 default: 480 qemu_log_mask(LOG_GUEST_ERROR, 481 "%s: bad register offset (0x%" HWADDR_PRIx ")\n", 482 __func__, offset); 483 } 484 } 485 486 static const MemoryRegionOps virtio_legacy_mem_ops = { 487 .read = virtio_mmio_read, 488 .write = virtio_mmio_write, 489 .endianness = DEVICE_NATIVE_ENDIAN, 490 }; 491 492 static const MemoryRegionOps virtio_mem_ops = { 493 .read = virtio_mmio_read, 494 .write = virtio_mmio_write, 495 .endianness = DEVICE_LITTLE_ENDIAN, 496 }; 497 498 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) 499 { 500 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 501 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 502 int level; 503 504 if (!vdev) { 505 return; 506 } 507 level = (qatomic_read(&vdev->isr) != 0); 508 trace_virtio_mmio_setting_irq(level); 509 qemu_set_irq(proxy->irq, level); 510 } 511 512 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f) 513 { 514 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 515 516 proxy->host_features_sel = qemu_get_be32(f); 517 proxy->guest_features_sel = qemu_get_be32(f); 518 proxy->guest_page_shift = qemu_get_be32(f); 519 return 0; 520 } 521 522 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f) 523 { 524 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 525 526 qemu_put_be32(f, proxy->host_features_sel); 527 qemu_put_be32(f, proxy->guest_features_sel); 528 qemu_put_be32(f, proxy->guest_page_shift); 529 } 530 531 static const VMStateDescription vmstate_virtio_mmio_queue_state = { 532 .name = "virtio_mmio/queue_state", 533 .version_id = 1, 534 .minimum_version_id = 1, 535 .fields = (VMStateField[]) { 536 VMSTATE_UINT16(num, VirtIOMMIOQueue), 537 VMSTATE_BOOL(enabled, VirtIOMMIOQueue), 538 VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2), 539 VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2), 540 VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2), 541 VMSTATE_END_OF_LIST() 542 } 543 }; 544 545 static const VMStateDescription vmstate_virtio_mmio_state_sub = { 546 .name = "virtio_mmio/state", 547 .version_id = 1, 548 .minimum_version_id = 1, 549 .fields = (VMStateField[]) { 550 VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2), 551 VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0, 552 vmstate_virtio_mmio_queue_state, 553 VirtIOMMIOQueue), 554 VMSTATE_END_OF_LIST() 555 } 556 }; 557 558 static const VMStateDescription vmstate_virtio_mmio = { 559 .name = "virtio_mmio", 560 .version_id = 1, 561 .minimum_version_id = 1, 562 .minimum_version_id_old = 1, 563 .fields = (VMStateField[]) { 564 VMSTATE_END_OF_LIST() 565 }, 566 .subsections = (const VMStateDescription * []) { 567 &vmstate_virtio_mmio_state_sub, 568 NULL 569 } 570 }; 571 572 static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f) 573 { 574 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 575 576 vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL); 577 } 578 579 static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f) 580 { 581 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 582 583 return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1); 584 } 585 586 static bool virtio_mmio_has_extra_state(DeviceState *opaque) 587 { 588 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); 589 590 return !proxy->legacy; 591 } 592 593 static void virtio_mmio_reset(DeviceState *d) 594 { 595 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 596 int i; 597 598 virtio_mmio_stop_ioeventfd(proxy); 599 virtio_bus_reset(&proxy->bus); 600 proxy->host_features_sel = 0; 601 proxy->guest_features_sel = 0; 602 proxy->guest_page_shift = 0; 603 604 if (!proxy->legacy) { 605 proxy->guest_features[0] = proxy->guest_features[1] = 0; 606 607 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 608 proxy->vqs[i].enabled = 0; 609 proxy->vqs[i].num = 0; 610 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; 611 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; 612 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; 613 } 614 } 615 } 616 617 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, 618 bool with_irqfd) 619 { 620 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 621 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 622 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 623 VirtQueue *vq = virtio_get_queue(vdev, n); 624 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); 625 626 if (assign) { 627 int r = event_notifier_init(notifier, 0); 628 if (r < 0) { 629 return r; 630 } 631 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); 632 } else { 633 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); 634 event_notifier_cleanup(notifier); 635 } 636 637 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { 638 vdc->guest_notifier_mask(vdev, n, !assign); 639 } 640 641 return 0; 642 } 643 644 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs, 645 bool assign) 646 { 647 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 648 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 649 /* TODO: need to check if kvm-arm supports irqfd */ 650 bool with_irqfd = false; 651 int r, n; 652 653 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); 654 655 for (n = 0; n < nvqs; n++) { 656 if (!virtio_queue_get_num(vdev, n)) { 657 break; 658 } 659 660 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd); 661 if (r < 0) { 662 goto assign_error; 663 } 664 } 665 666 return 0; 667 668 assign_error: 669 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ 670 assert(assign); 671 while (--n >= 0) { 672 virtio_mmio_set_guest_notifier(d, n, !assign, false); 673 } 674 return r; 675 } 676 677 static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp) 678 { 679 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 680 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); 681 682 if (!proxy->legacy) { 683 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); 684 } 685 } 686 687 /* virtio-mmio device */ 688 689 static Property virtio_mmio_properties[] = { 690 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy, 691 format_transport_address, true), 692 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true), 693 DEFINE_PROP_END_OF_LIST(), 694 }; 695 696 static void virtio_mmio_realizefn(DeviceState *d, Error **errp) 697 { 698 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); 699 SysBusDevice *sbd = SYS_BUS_DEVICE(d); 700 701 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, 702 d, NULL); 703 sysbus_init_irq(sbd, &proxy->irq); 704 if (proxy->legacy) { 705 memory_region_init_io(&proxy->iomem, OBJECT(d), 706 &virtio_legacy_mem_ops, proxy, 707 TYPE_VIRTIO_MMIO, 0x200); 708 } else { 709 memory_region_init_io(&proxy->iomem, OBJECT(d), 710 &virtio_mem_ops, proxy, 711 TYPE_VIRTIO_MMIO, 0x200); 712 } 713 sysbus_init_mmio(sbd, &proxy->iomem); 714 } 715 716 static void virtio_mmio_class_init(ObjectClass *klass, void *data) 717 { 718 DeviceClass *dc = DEVICE_CLASS(klass); 719 720 dc->realize = virtio_mmio_realizefn; 721 dc->reset = virtio_mmio_reset; 722 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 723 device_class_set_props(dc, virtio_mmio_properties); 724 } 725 726 static const TypeInfo virtio_mmio_info = { 727 .name = TYPE_VIRTIO_MMIO, 728 .parent = TYPE_SYS_BUS_DEVICE, 729 .instance_size = sizeof(VirtIOMMIOProxy), 730 .class_init = virtio_mmio_class_init, 731 }; 732 733 /* virtio-mmio-bus. */ 734 735 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev) 736 { 737 BusState *virtio_mmio_bus; 738 VirtIOMMIOProxy *virtio_mmio_proxy; 739 char *proxy_path; 740 SysBusDevice *proxy_sbd; 741 char *path; 742 743 virtio_mmio_bus = qdev_get_parent_bus(dev); 744 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent); 745 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy)); 746 747 /* 748 * If @format_transport_address is false, then we just perform the same as 749 * virtio_bus_get_dev_path(): we delegate the address formatting for the 750 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy 751 * (i.e., the device that implements the virtio-mmio bus) resides on. In 752 * this case the base address of the virtio-mmio transport will be 753 * invisible. 754 */ 755 if (!virtio_mmio_proxy->format_transport_address) { 756 return proxy_path; 757 } 758 759 /* Otherwise, we append the base address of the transport. */ 760 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy); 761 assert(proxy_sbd->num_mmio == 1); 762 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem); 763 764 if (proxy_path) { 765 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path, 766 proxy_sbd->mmio[0].addr); 767 } else { 768 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx, 769 proxy_sbd->mmio[0].addr); 770 } 771 g_free(proxy_path); 772 return path; 773 } 774 775 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) 776 { 777 BusClass *bus_class = BUS_CLASS(klass); 778 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); 779 780 k->notify = virtio_mmio_update_irq; 781 k->save_config = virtio_mmio_save_config; 782 k->load_config = virtio_mmio_load_config; 783 k->save_extra_state = virtio_mmio_save_extra_state; 784 k->load_extra_state = virtio_mmio_load_extra_state; 785 k->has_extra_state = virtio_mmio_has_extra_state; 786 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers; 787 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled; 788 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; 789 k->pre_plugged = virtio_mmio_pre_plugged; 790 k->has_variable_vring_alignment = true; 791 bus_class->max_dev = 1; 792 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path; 793 } 794 795 static const TypeInfo virtio_mmio_bus_info = { 796 .name = TYPE_VIRTIO_MMIO_BUS, 797 .parent = TYPE_VIRTIO_BUS, 798 .instance_size = sizeof(VirtioBusState), 799 .class_init = virtio_mmio_bus_class_init, 800 }; 801 802 static void virtio_mmio_register_types(void) 803 { 804 type_register_static(&virtio_mmio_bus_info); 805 type_register_static(&virtio_mmio_info); 806 } 807 808 type_init(virtio_mmio_register_types) 809