1 /* 2 * Virtio Support 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include <inttypes.h> 15 16 #include "trace.h" 17 #include "exec/address-spaces.h" 18 #include "qemu/error-report.h" 19 #include "hw/virtio/virtio.h" 20 #include "qemu/atomic.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/migration.h" 23 #include "hw/virtio/virtio-access.h" 24 25 /* 26 * The alignment to use between consumer and producer parts of vring. 27 * x86 pagesize again. This is the default, used by transports like PCI 28 * which don't provide a means for the guest to tell the host the alignment. 29 */ 30 #define VIRTIO_PCI_VRING_ALIGN 4096 31 32 typedef struct VRingDesc 33 { 34 uint64_t addr; 35 uint32_t len; 36 uint16_t flags; 37 uint16_t next; 38 } VRingDesc; 39 40 typedef struct VRingAvail 41 { 42 uint16_t flags; 43 uint16_t idx; 44 uint16_t ring[0]; 45 } VRingAvail; 46 47 typedef struct VRingUsedElem 48 { 49 uint32_t id; 50 uint32_t len; 51 } VRingUsedElem; 52 53 typedef struct VRingUsed 54 { 55 uint16_t flags; 56 uint16_t idx; 57 VRingUsedElem ring[0]; 58 } VRingUsed; 59 60 typedef struct VRing 61 { 62 unsigned int num; 63 unsigned int align; 64 hwaddr desc; 65 hwaddr avail; 66 hwaddr used; 67 } VRing; 68 69 struct VirtQueue 70 { 71 VRing vring; 72 uint16_t last_avail_idx; 73 /* Last used index value we have signalled on */ 74 uint16_t signalled_used; 75 76 /* Last used index value we have signalled on */ 77 bool signalled_used_valid; 78 79 /* Notification enabled? */ 80 bool notification; 81 82 uint16_t queue_index; 83 84 int inuse; 85 86 uint16_t vector; 87 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); 88 VirtIODevice *vdev; 89 EventNotifier guest_notifier; 90 EventNotifier host_notifier; 91 QLIST_ENTRY(VirtQueue) node; 92 }; 93 94 /* virt queue functions */ 95 void virtio_queue_update_rings(VirtIODevice *vdev, int n) 96 { 97 VRing *vring = &vdev->vq[n].vring; 98 99 if (!vring->desc) { 100 /* not yet setup -> nothing to do */ 101 return; 102 } 103 vring->avail = vring->desc + vring->num * sizeof(VRingDesc); 104 vring->used = vring_align(vring->avail + 105 offsetof(VRingAvail, ring[vring->num]), 106 vring->align); 107 } 108 109 static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa, 110 int i) 111 { 112 hwaddr pa; 113 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); 114 return virtio_ldq_phys(vdev, pa); 115 } 116 117 static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i) 118 { 119 hwaddr pa; 120 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); 121 return virtio_ldl_phys(vdev, pa); 122 } 123 124 static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa, 125 int i) 126 { 127 hwaddr pa; 128 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); 129 return virtio_lduw_phys(vdev, pa); 130 } 131 132 static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa, 133 int i) 134 { 135 hwaddr pa; 136 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); 137 return virtio_lduw_phys(vdev, pa); 138 } 139 140 static inline uint16_t vring_avail_flags(VirtQueue *vq) 141 { 142 hwaddr pa; 143 pa = vq->vring.avail + offsetof(VRingAvail, flags); 144 return virtio_lduw_phys(vq->vdev, pa); 145 } 146 147 static inline uint16_t vring_avail_idx(VirtQueue *vq) 148 { 149 hwaddr pa; 150 pa = vq->vring.avail + offsetof(VRingAvail, idx); 151 return virtio_lduw_phys(vq->vdev, pa); 152 } 153 154 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) 155 { 156 hwaddr pa; 157 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); 158 return virtio_lduw_phys(vq->vdev, pa); 159 } 160 161 static inline uint16_t vring_get_used_event(VirtQueue *vq) 162 { 163 return vring_avail_ring(vq, vq->vring.num); 164 } 165 166 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) 167 { 168 hwaddr pa; 169 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); 170 virtio_stl_phys(vq->vdev, pa, val); 171 } 172 173 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) 174 { 175 hwaddr pa; 176 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); 177 virtio_stl_phys(vq->vdev, pa, val); 178 } 179 180 static uint16_t vring_used_idx(VirtQueue *vq) 181 { 182 hwaddr pa; 183 pa = vq->vring.used + offsetof(VRingUsed, idx); 184 return virtio_lduw_phys(vq->vdev, pa); 185 } 186 187 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) 188 { 189 hwaddr pa; 190 pa = vq->vring.used + offsetof(VRingUsed, idx); 191 virtio_stw_phys(vq->vdev, pa, val); 192 } 193 194 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) 195 { 196 VirtIODevice *vdev = vq->vdev; 197 hwaddr pa; 198 pa = vq->vring.used + offsetof(VRingUsed, flags); 199 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); 200 } 201 202 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) 203 { 204 VirtIODevice *vdev = vq->vdev; 205 hwaddr pa; 206 pa = vq->vring.used + offsetof(VRingUsed, flags); 207 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); 208 } 209 210 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) 211 { 212 hwaddr pa; 213 if (!vq->notification) { 214 return; 215 } 216 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); 217 virtio_stw_phys(vq->vdev, pa, val); 218 } 219 220 void virtio_queue_set_notification(VirtQueue *vq, int enable) 221 { 222 vq->notification = enable; 223 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 224 vring_set_avail_event(vq, vring_avail_idx(vq)); 225 } else if (enable) { 226 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 227 } else { 228 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 229 } 230 if (enable) { 231 /* Expose avail event/used flags before caller checks the avail idx. */ 232 smp_mb(); 233 } 234 } 235 236 int virtio_queue_ready(VirtQueue *vq) 237 { 238 return vq->vring.avail != 0; 239 } 240 241 int virtio_queue_empty(VirtQueue *vq) 242 { 243 return vring_avail_idx(vq) == vq->last_avail_idx; 244 } 245 246 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, 247 unsigned int len, unsigned int idx) 248 { 249 unsigned int offset; 250 int i; 251 252 trace_virtqueue_fill(vq, elem, len, idx); 253 254 offset = 0; 255 for (i = 0; i < elem->in_num; i++) { 256 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); 257 258 cpu_physical_memory_unmap(elem->in_sg[i].iov_base, 259 elem->in_sg[i].iov_len, 260 1, size); 261 262 offset += size; 263 } 264 265 for (i = 0; i < elem->out_num; i++) 266 cpu_physical_memory_unmap(elem->out_sg[i].iov_base, 267 elem->out_sg[i].iov_len, 268 0, elem->out_sg[i].iov_len); 269 270 idx = (idx + vring_used_idx(vq)) % vq->vring.num; 271 272 /* Get a pointer to the next entry in the used ring. */ 273 vring_used_ring_id(vq, idx, elem->index); 274 vring_used_ring_len(vq, idx, len); 275 } 276 277 void virtqueue_flush(VirtQueue *vq, unsigned int count) 278 { 279 uint16_t old, new; 280 /* Make sure buffer is written before we update index. */ 281 smp_wmb(); 282 trace_virtqueue_flush(vq, count); 283 old = vring_used_idx(vq); 284 new = old + count; 285 vring_used_idx_set(vq, new); 286 vq->inuse -= count; 287 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) 288 vq->signalled_used_valid = false; 289 } 290 291 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, 292 unsigned int len) 293 { 294 virtqueue_fill(vq, elem, len, 0); 295 virtqueue_flush(vq, 1); 296 } 297 298 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) 299 { 300 uint16_t num_heads = vring_avail_idx(vq) - idx; 301 302 /* Check it isn't doing very strange things with descriptor numbers. */ 303 if (num_heads > vq->vring.num) { 304 error_report("Guest moved used index from %u to %u", 305 idx, vring_avail_idx(vq)); 306 exit(1); 307 } 308 /* On success, callers read a descriptor at vq->last_avail_idx. 309 * Make sure descriptor read does not bypass avail index read. */ 310 if (num_heads) { 311 smp_rmb(); 312 } 313 314 return num_heads; 315 } 316 317 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) 318 { 319 unsigned int head; 320 321 /* Grab the next descriptor number they're advertising, and increment 322 * the index we've seen. */ 323 head = vring_avail_ring(vq, idx % vq->vring.num); 324 325 /* If their number is silly, that's a fatal mistake. */ 326 if (head >= vq->vring.num) { 327 error_report("Guest says index %u is available", head); 328 exit(1); 329 } 330 331 return head; 332 } 333 334 static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa, 335 unsigned int i, unsigned int max) 336 { 337 unsigned int next; 338 339 /* If this descriptor says it doesn't chain, we're done. */ 340 if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) { 341 return max; 342 } 343 344 /* Check they're not leading us off end of descriptors. */ 345 next = vring_desc_next(vdev, desc_pa, i); 346 /* Make sure compiler knows to grab that: we don't want it changing! */ 347 smp_wmb(); 348 349 if (next >= max) { 350 error_report("Desc next is %u", next); 351 exit(1); 352 } 353 354 return next; 355 } 356 357 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, 358 unsigned int *out_bytes, 359 unsigned max_in_bytes, unsigned max_out_bytes) 360 { 361 unsigned int idx; 362 unsigned int total_bufs, in_total, out_total; 363 364 idx = vq->last_avail_idx; 365 366 total_bufs = in_total = out_total = 0; 367 while (virtqueue_num_heads(vq, idx)) { 368 VirtIODevice *vdev = vq->vdev; 369 unsigned int max, num_bufs, indirect = 0; 370 hwaddr desc_pa; 371 int i; 372 373 max = vq->vring.num; 374 num_bufs = total_bufs; 375 i = virtqueue_get_head(vq, idx++); 376 desc_pa = vq->vring.desc; 377 378 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 379 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 380 error_report("Invalid size for indirect buffer table"); 381 exit(1); 382 } 383 384 /* If we've got too many, that implies a descriptor loop. */ 385 if (num_bufs >= max) { 386 error_report("Looped descriptor"); 387 exit(1); 388 } 389 390 /* loop over the indirect descriptor table */ 391 indirect = 1; 392 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 393 desc_pa = vring_desc_addr(vdev, desc_pa, i); 394 num_bufs = i = 0; 395 } 396 397 do { 398 /* If we've got too many, that implies a descriptor loop. */ 399 if (++num_bufs > max) { 400 error_report("Looped descriptor"); 401 exit(1); 402 } 403 404 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 405 in_total += vring_desc_len(vdev, desc_pa, i); 406 } else { 407 out_total += vring_desc_len(vdev, desc_pa, i); 408 } 409 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 410 goto done; 411 } 412 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 413 414 if (!indirect) 415 total_bufs = num_bufs; 416 else 417 total_bufs++; 418 } 419 done: 420 if (in_bytes) { 421 *in_bytes = in_total; 422 } 423 if (out_bytes) { 424 *out_bytes = out_total; 425 } 426 } 427 428 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, 429 unsigned int out_bytes) 430 { 431 unsigned int in_total, out_total; 432 433 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); 434 return in_bytes <= in_total && out_bytes <= out_total; 435 } 436 437 void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, 438 size_t num_sg, int is_write) 439 { 440 unsigned int i; 441 hwaddr len; 442 443 if (num_sg > VIRTQUEUE_MAX_SIZE) { 444 error_report("virtio: map attempt out of bounds: %zd > %d", 445 num_sg, VIRTQUEUE_MAX_SIZE); 446 exit(1); 447 } 448 449 for (i = 0; i < num_sg; i++) { 450 len = sg[i].iov_len; 451 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); 452 if (sg[i].iov_base == NULL || len != sg[i].iov_len) { 453 error_report("virtio: error trying to map MMIO memory"); 454 exit(1); 455 } 456 } 457 } 458 459 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) 460 { 461 unsigned int i, head, max; 462 hwaddr desc_pa = vq->vring.desc; 463 VirtIODevice *vdev = vq->vdev; 464 465 if (!virtqueue_num_heads(vq, vq->last_avail_idx)) 466 return 0; 467 468 /* When we start there are none of either input nor output. */ 469 elem->out_num = elem->in_num = 0; 470 471 max = vq->vring.num; 472 473 i = head = virtqueue_get_head(vq, vq->last_avail_idx++); 474 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 475 vring_set_avail_event(vq, vq->last_avail_idx); 476 } 477 478 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 479 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 480 error_report("Invalid size for indirect buffer table"); 481 exit(1); 482 } 483 484 /* loop over the indirect descriptor table */ 485 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 486 desc_pa = vring_desc_addr(vdev, desc_pa, i); 487 i = 0; 488 } 489 490 /* Collect all the descriptors */ 491 do { 492 struct iovec *sg; 493 494 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 495 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) { 496 error_report("Too many write descriptors in indirect table"); 497 exit(1); 498 } 499 elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i); 500 sg = &elem->in_sg[elem->in_num++]; 501 } else { 502 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) { 503 error_report("Too many read descriptors in indirect table"); 504 exit(1); 505 } 506 elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i); 507 sg = &elem->out_sg[elem->out_num++]; 508 } 509 510 sg->iov_len = vring_desc_len(vdev, desc_pa, i); 511 512 /* If we've got too many, that implies a descriptor loop. */ 513 if ((elem->in_num + elem->out_num) > max) { 514 error_report("Looped descriptor"); 515 exit(1); 516 } 517 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 518 519 /* Now map what we have collected */ 520 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1); 521 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0); 522 523 elem->index = head; 524 525 vq->inuse++; 526 527 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 528 return elem->in_num + elem->out_num; 529 } 530 531 /* virtio device */ 532 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) 533 { 534 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 535 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 536 537 if (k->notify) { 538 k->notify(qbus->parent, vector); 539 } 540 } 541 542 void virtio_update_irq(VirtIODevice *vdev) 543 { 544 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 545 } 546 547 static int virtio_validate_features(VirtIODevice *vdev) 548 { 549 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 550 551 if (k->validate_features) { 552 return k->validate_features(vdev); 553 } else { 554 return 0; 555 } 556 } 557 558 int virtio_set_status(VirtIODevice *vdev, uint8_t val) 559 { 560 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 561 trace_virtio_set_status(vdev, val); 562 563 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 564 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && 565 val & VIRTIO_CONFIG_S_FEATURES_OK) { 566 int ret = virtio_validate_features(vdev); 567 568 if (ret) { 569 return ret; 570 } 571 } 572 } 573 if (k->set_status) { 574 k->set_status(vdev, val); 575 } 576 vdev->status = val; 577 return 0; 578 } 579 580 bool target_words_bigendian(void); 581 static enum virtio_device_endian virtio_default_endian(void) 582 { 583 if (target_words_bigendian()) { 584 return VIRTIO_DEVICE_ENDIAN_BIG; 585 } else { 586 return VIRTIO_DEVICE_ENDIAN_LITTLE; 587 } 588 } 589 590 static enum virtio_device_endian virtio_current_cpu_endian(void) 591 { 592 CPUClass *cc = CPU_GET_CLASS(current_cpu); 593 594 if (cc->virtio_is_big_endian(current_cpu)) { 595 return VIRTIO_DEVICE_ENDIAN_BIG; 596 } else { 597 return VIRTIO_DEVICE_ENDIAN_LITTLE; 598 } 599 } 600 601 void virtio_reset(void *opaque) 602 { 603 VirtIODevice *vdev = opaque; 604 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 605 int i; 606 607 virtio_set_status(vdev, 0); 608 if (current_cpu) { 609 /* Guest initiated reset */ 610 vdev->device_endian = virtio_current_cpu_endian(); 611 } else { 612 /* System reset */ 613 vdev->device_endian = virtio_default_endian(); 614 } 615 616 if (k->reset) { 617 k->reset(vdev); 618 } 619 620 vdev->guest_features = 0; 621 vdev->queue_sel = 0; 622 vdev->status = 0; 623 vdev->isr = 0; 624 vdev->config_vector = VIRTIO_NO_VECTOR; 625 virtio_notify_vector(vdev, vdev->config_vector); 626 627 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { 628 vdev->vq[i].vring.desc = 0; 629 vdev->vq[i].vring.avail = 0; 630 vdev->vq[i].vring.used = 0; 631 vdev->vq[i].last_avail_idx = 0; 632 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); 633 vdev->vq[i].signalled_used = 0; 634 vdev->vq[i].signalled_used_valid = false; 635 vdev->vq[i].notification = true; 636 } 637 } 638 639 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) 640 { 641 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 642 uint8_t val; 643 644 if (addr + sizeof(val) > vdev->config_len) { 645 return (uint32_t)-1; 646 } 647 648 k->get_config(vdev, vdev->config); 649 650 val = ldub_p(vdev->config + addr); 651 return val; 652 } 653 654 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) 655 { 656 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 657 uint16_t val; 658 659 if (addr + sizeof(val) > vdev->config_len) { 660 return (uint32_t)-1; 661 } 662 663 k->get_config(vdev, vdev->config); 664 665 val = lduw_p(vdev->config + addr); 666 return val; 667 } 668 669 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) 670 { 671 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 672 uint32_t val; 673 674 if (addr + sizeof(val) > vdev->config_len) { 675 return (uint32_t)-1; 676 } 677 678 k->get_config(vdev, vdev->config); 679 680 val = ldl_p(vdev->config + addr); 681 return val; 682 } 683 684 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) 685 { 686 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 687 uint8_t val = data; 688 689 if (addr + sizeof(val) > vdev->config_len) { 690 return; 691 } 692 693 stb_p(vdev->config + addr, val); 694 695 if (k->set_config) { 696 k->set_config(vdev, vdev->config); 697 } 698 } 699 700 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) 701 { 702 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 703 uint16_t val = data; 704 705 if (addr + sizeof(val) > vdev->config_len) { 706 return; 707 } 708 709 stw_p(vdev->config + addr, val); 710 711 if (k->set_config) { 712 k->set_config(vdev, vdev->config); 713 } 714 } 715 716 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) 717 { 718 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 719 uint32_t val = data; 720 721 if (addr + sizeof(val) > vdev->config_len) { 722 return; 723 } 724 725 stl_p(vdev->config + addr, val); 726 727 if (k->set_config) { 728 k->set_config(vdev, vdev->config); 729 } 730 } 731 732 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr) 733 { 734 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 735 uint8_t val; 736 737 if (addr + sizeof(val) > vdev->config_len) { 738 return (uint32_t)-1; 739 } 740 741 k->get_config(vdev, vdev->config); 742 743 val = ldub_p(vdev->config + addr); 744 return val; 745 } 746 747 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr) 748 { 749 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 750 uint16_t val; 751 752 if (addr + sizeof(val) > vdev->config_len) { 753 return (uint32_t)-1; 754 } 755 756 k->get_config(vdev, vdev->config); 757 758 val = lduw_le_p(vdev->config + addr); 759 return val; 760 } 761 762 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr) 763 { 764 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 765 uint32_t val; 766 767 if (addr + sizeof(val) > vdev->config_len) { 768 return (uint32_t)-1; 769 } 770 771 k->get_config(vdev, vdev->config); 772 773 val = ldl_le_p(vdev->config + addr); 774 return val; 775 } 776 777 void virtio_config_modern_writeb(VirtIODevice *vdev, 778 uint32_t addr, uint32_t data) 779 { 780 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 781 uint8_t val = data; 782 783 if (addr + sizeof(val) > vdev->config_len) { 784 return; 785 } 786 787 stb_p(vdev->config + addr, val); 788 789 if (k->set_config) { 790 k->set_config(vdev, vdev->config); 791 } 792 } 793 794 void virtio_config_modern_writew(VirtIODevice *vdev, 795 uint32_t addr, uint32_t data) 796 { 797 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 798 uint16_t val = data; 799 800 if (addr + sizeof(val) > vdev->config_len) { 801 return; 802 } 803 804 stw_le_p(vdev->config + addr, val); 805 806 if (k->set_config) { 807 k->set_config(vdev, vdev->config); 808 } 809 } 810 811 void virtio_config_modern_writel(VirtIODevice *vdev, 812 uint32_t addr, uint32_t data) 813 { 814 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 815 uint32_t val = data; 816 817 if (addr + sizeof(val) > vdev->config_len) { 818 return; 819 } 820 821 stl_le_p(vdev->config + addr, val); 822 823 if (k->set_config) { 824 k->set_config(vdev, vdev->config); 825 } 826 } 827 828 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) 829 { 830 vdev->vq[n].vring.desc = addr; 831 virtio_queue_update_rings(vdev, n); 832 } 833 834 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) 835 { 836 return vdev->vq[n].vring.desc; 837 } 838 839 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, 840 hwaddr avail, hwaddr used) 841 { 842 vdev->vq[n].vring.desc = desc; 843 vdev->vq[n].vring.avail = avail; 844 vdev->vq[n].vring.used = used; 845 } 846 847 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) 848 { 849 /* Don't allow guest to flip queue between existent and 850 * nonexistent states, or to set it to an invalid size. 851 */ 852 if (!!num != !!vdev->vq[n].vring.num || 853 num > VIRTQUEUE_MAX_SIZE || 854 num < 0) { 855 return; 856 } 857 vdev->vq[n].vring.num = num; 858 } 859 860 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) 861 { 862 return QLIST_FIRST(&vdev->vector_queues[vector]); 863 } 864 865 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) 866 { 867 return QLIST_NEXT(vq, node); 868 } 869 870 int virtio_queue_get_num(VirtIODevice *vdev, int n) 871 { 872 return vdev->vq[n].vring.num; 873 } 874 875 int virtio_get_num_queues(VirtIODevice *vdev) 876 { 877 int i; 878 879 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 880 if (!virtio_queue_get_num(vdev, i)) { 881 break; 882 } 883 } 884 885 return i; 886 } 887 888 int virtio_queue_get_id(VirtQueue *vq) 889 { 890 VirtIODevice *vdev = vq->vdev; 891 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]); 892 return vq - &vdev->vq[0]; 893 } 894 895 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) 896 { 897 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 898 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 899 900 /* virtio-1 compliant devices cannot change the alignment */ 901 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 902 error_report("tried to modify queue alignment for virtio-1 device"); 903 return; 904 } 905 /* Check that the transport told us it was going to do this 906 * (so a buggy transport will immediately assert rather than 907 * silently failing to migrate this state) 908 */ 909 assert(k->has_variable_vring_alignment); 910 911 vdev->vq[n].vring.align = align; 912 virtio_queue_update_rings(vdev, n); 913 } 914 915 void virtio_queue_notify_vq(VirtQueue *vq) 916 { 917 if (vq->vring.desc && vq->handle_output) { 918 VirtIODevice *vdev = vq->vdev; 919 920 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 921 vq->handle_output(vdev, vq); 922 } 923 } 924 925 void virtio_queue_notify(VirtIODevice *vdev, int n) 926 { 927 virtio_queue_notify_vq(&vdev->vq[n]); 928 } 929 930 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) 931 { 932 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : 933 VIRTIO_NO_VECTOR; 934 } 935 936 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) 937 { 938 VirtQueue *vq = &vdev->vq[n]; 939 940 if (n < VIRTIO_QUEUE_MAX) { 941 if (vdev->vector_queues && 942 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { 943 QLIST_REMOVE(vq, node); 944 } 945 vdev->vq[n].vector = vector; 946 if (vdev->vector_queues && 947 vector != VIRTIO_NO_VECTOR) { 948 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); 949 } 950 } 951 } 952 953 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, 954 void (*handle_output)(VirtIODevice *, VirtQueue *)) 955 { 956 int i; 957 958 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 959 if (vdev->vq[i].vring.num == 0) 960 break; 961 } 962 963 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) 964 abort(); 965 966 vdev->vq[i].vring.num = queue_size; 967 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; 968 vdev->vq[i].handle_output = handle_output; 969 970 return &vdev->vq[i]; 971 } 972 973 void virtio_del_queue(VirtIODevice *vdev, int n) 974 { 975 if (n < 0 || n >= VIRTIO_QUEUE_MAX) { 976 abort(); 977 } 978 979 vdev->vq[n].vring.num = 0; 980 } 981 982 void virtio_irq(VirtQueue *vq) 983 { 984 trace_virtio_irq(vq); 985 vq->vdev->isr |= 0x01; 986 virtio_notify_vector(vq->vdev, vq->vector); 987 } 988 989 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) 990 { 991 uint16_t old, new; 992 bool v; 993 /* We need to expose used array entries before checking used event. */ 994 smp_mb(); 995 /* Always notify when queue is empty (when feature acknowledge) */ 996 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && 997 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) { 998 return true; 999 } 1000 1001 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 1002 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 1003 } 1004 1005 v = vq->signalled_used_valid; 1006 vq->signalled_used_valid = true; 1007 old = vq->signalled_used; 1008 new = vq->signalled_used = vring_used_idx(vq); 1009 return !v || vring_need_event(vring_get_used_event(vq), new, old); 1010 } 1011 1012 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) 1013 { 1014 if (!vring_notify(vdev, vq)) { 1015 return; 1016 } 1017 1018 trace_virtio_notify(vdev, vq); 1019 vdev->isr |= 0x01; 1020 virtio_notify_vector(vdev, vq->vector); 1021 } 1022 1023 void virtio_notify_config(VirtIODevice *vdev) 1024 { 1025 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 1026 return; 1027 1028 vdev->isr |= 0x03; 1029 vdev->generation++; 1030 virtio_notify_vector(vdev, vdev->config_vector); 1031 } 1032 1033 static bool virtio_device_endian_needed(void *opaque) 1034 { 1035 VirtIODevice *vdev = opaque; 1036 1037 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); 1038 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1039 return vdev->device_endian != virtio_default_endian(); 1040 } 1041 /* Devices conforming to VIRTIO 1.0 or later are always LE. */ 1042 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; 1043 } 1044 1045 static bool virtio_64bit_features_needed(void *opaque) 1046 { 1047 VirtIODevice *vdev = opaque; 1048 1049 return (vdev->host_features >> 32) != 0; 1050 } 1051 1052 static bool virtio_virtqueue_needed(void *opaque) 1053 { 1054 VirtIODevice *vdev = opaque; 1055 1056 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1); 1057 } 1058 1059 static void put_virtqueue_state(QEMUFile *f, void *pv, size_t size) 1060 { 1061 VirtIODevice *vdev = pv; 1062 int i; 1063 1064 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1065 qemu_put_be64(f, vdev->vq[i].vring.avail); 1066 qemu_put_be64(f, vdev->vq[i].vring.used); 1067 } 1068 } 1069 1070 static int get_virtqueue_state(QEMUFile *f, void *pv, size_t size) 1071 { 1072 VirtIODevice *vdev = pv; 1073 int i; 1074 1075 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1076 vdev->vq[i].vring.avail = qemu_get_be64(f); 1077 vdev->vq[i].vring.used = qemu_get_be64(f); 1078 } 1079 return 0; 1080 } 1081 1082 static VMStateInfo vmstate_info_virtqueue = { 1083 .name = "virtqueue_state", 1084 .get = get_virtqueue_state, 1085 .put = put_virtqueue_state, 1086 }; 1087 1088 static const VMStateDescription vmstate_virtio_virtqueues = { 1089 .name = "virtio/virtqueues", 1090 .version_id = 1, 1091 .minimum_version_id = 1, 1092 .needed = &virtio_virtqueue_needed, 1093 .fields = (VMStateField[]) { 1094 { 1095 .name = "virtqueues", 1096 .version_id = 0, 1097 .field_exists = NULL, 1098 .size = 0, 1099 .info = &vmstate_info_virtqueue, 1100 .flags = VMS_SINGLE, 1101 .offset = 0, 1102 }, 1103 VMSTATE_END_OF_LIST() 1104 } 1105 }; 1106 1107 static const VMStateDescription vmstate_virtio_device_endian = { 1108 .name = "virtio/device_endian", 1109 .version_id = 1, 1110 .minimum_version_id = 1, 1111 .needed = &virtio_device_endian_needed, 1112 .fields = (VMStateField[]) { 1113 VMSTATE_UINT8(device_endian, VirtIODevice), 1114 VMSTATE_END_OF_LIST() 1115 } 1116 }; 1117 1118 static const VMStateDescription vmstate_virtio_64bit_features = { 1119 .name = "virtio/64bit_features", 1120 .version_id = 1, 1121 .minimum_version_id = 1, 1122 .needed = &virtio_64bit_features_needed, 1123 .fields = (VMStateField[]) { 1124 VMSTATE_UINT64(guest_features, VirtIODevice), 1125 VMSTATE_END_OF_LIST() 1126 } 1127 }; 1128 1129 static const VMStateDescription vmstate_virtio = { 1130 .name = "virtio", 1131 .version_id = 1, 1132 .minimum_version_id = 1, 1133 .minimum_version_id_old = 1, 1134 .fields = (VMStateField[]) { 1135 VMSTATE_END_OF_LIST() 1136 }, 1137 .subsections = (const VMStateDescription*[]) { 1138 &vmstate_virtio_device_endian, 1139 &vmstate_virtio_64bit_features, 1140 &vmstate_virtio_virtqueues, 1141 NULL 1142 } 1143 }; 1144 1145 void virtio_save(VirtIODevice *vdev, QEMUFile *f) 1146 { 1147 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1148 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1149 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1150 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); 1151 int i; 1152 1153 if (k->save_config) { 1154 k->save_config(qbus->parent, f); 1155 } 1156 1157 qemu_put_8s(f, &vdev->status); 1158 qemu_put_8s(f, &vdev->isr); 1159 qemu_put_be16s(f, &vdev->queue_sel); 1160 qemu_put_be32s(f, &guest_features_lo); 1161 qemu_put_be32(f, vdev->config_len); 1162 qemu_put_buffer(f, vdev->config, vdev->config_len); 1163 1164 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1165 if (vdev->vq[i].vring.num == 0) 1166 break; 1167 } 1168 1169 qemu_put_be32(f, i); 1170 1171 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1172 if (vdev->vq[i].vring.num == 0) 1173 break; 1174 1175 qemu_put_be32(f, vdev->vq[i].vring.num); 1176 if (k->has_variable_vring_alignment) { 1177 qemu_put_be32(f, vdev->vq[i].vring.align); 1178 } 1179 /* XXX virtio-1 devices */ 1180 qemu_put_be64(f, vdev->vq[i].vring.desc); 1181 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); 1182 if (k->save_queue) { 1183 k->save_queue(qbus->parent, i, f); 1184 } 1185 } 1186 1187 if (vdc->save != NULL) { 1188 vdc->save(vdev, f); 1189 } 1190 1191 /* Subsections */ 1192 vmstate_save_state(f, &vmstate_virtio, vdev, NULL); 1193 } 1194 1195 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) 1196 { 1197 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1198 bool bad = (val & ~(vdev->host_features)) != 0; 1199 1200 val &= vdev->host_features; 1201 if (k->set_features) { 1202 k->set_features(vdev, val); 1203 } 1204 vdev->guest_features = val; 1205 return bad ? -1 : 0; 1206 } 1207 1208 int virtio_set_features(VirtIODevice *vdev, uint64_t val) 1209 { 1210 /* 1211 * The driver must not attempt to set features after feature negotiation 1212 * has finished. 1213 */ 1214 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { 1215 return -EINVAL; 1216 } 1217 return virtio_set_features_nocheck(vdev, val); 1218 } 1219 1220 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) 1221 { 1222 int i, ret; 1223 int32_t config_len; 1224 uint32_t num; 1225 uint32_t features; 1226 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1227 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1228 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1229 1230 /* 1231 * We poison the endianness to ensure it does not get used before 1232 * subsections have been loaded. 1233 */ 1234 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; 1235 1236 if (k->load_config) { 1237 ret = k->load_config(qbus->parent, f); 1238 if (ret) 1239 return ret; 1240 } 1241 1242 qemu_get_8s(f, &vdev->status); 1243 qemu_get_8s(f, &vdev->isr); 1244 qemu_get_be16s(f, &vdev->queue_sel); 1245 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { 1246 return -1; 1247 } 1248 qemu_get_be32s(f, &features); 1249 1250 config_len = qemu_get_be32(f); 1251 1252 /* 1253 * There are cases where the incoming config can be bigger or smaller 1254 * than what we have; so load what we have space for, and skip 1255 * any excess that's in the stream. 1256 */ 1257 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); 1258 1259 while (config_len > vdev->config_len) { 1260 qemu_get_byte(f); 1261 config_len--; 1262 } 1263 1264 num = qemu_get_be32(f); 1265 1266 if (num > VIRTIO_QUEUE_MAX) { 1267 error_report("Invalid number of PCI queues: 0x%x", num); 1268 return -1; 1269 } 1270 1271 for (i = 0; i < num; i++) { 1272 vdev->vq[i].vring.num = qemu_get_be32(f); 1273 if (k->has_variable_vring_alignment) { 1274 vdev->vq[i].vring.align = qemu_get_be32(f); 1275 } 1276 vdev->vq[i].vring.desc = qemu_get_be64(f); 1277 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); 1278 vdev->vq[i].signalled_used_valid = false; 1279 vdev->vq[i].notification = true; 1280 1281 if (vdev->vq[i].vring.desc) { 1282 /* XXX virtio-1 devices */ 1283 virtio_queue_update_rings(vdev, i); 1284 } else if (vdev->vq[i].last_avail_idx) { 1285 error_report("VQ %d address 0x0 " 1286 "inconsistent with Host index 0x%x", 1287 i, vdev->vq[i].last_avail_idx); 1288 return -1; 1289 } 1290 if (k->load_queue) { 1291 ret = k->load_queue(qbus->parent, i, f); 1292 if (ret) 1293 return ret; 1294 } 1295 } 1296 1297 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 1298 1299 if (vdc->load != NULL) { 1300 ret = vdc->load(vdev, f, version_id); 1301 if (ret) { 1302 return ret; 1303 } 1304 } 1305 1306 /* Subsections */ 1307 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); 1308 if (ret) { 1309 return ret; 1310 } 1311 1312 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { 1313 vdev->device_endian = virtio_default_endian(); 1314 } 1315 1316 if (virtio_64bit_features_needed(vdev)) { 1317 /* 1318 * Subsection load filled vdev->guest_features. Run them 1319 * through virtio_set_features to sanity-check them against 1320 * host_features. 1321 */ 1322 uint64_t features64 = vdev->guest_features; 1323 if (virtio_set_features_nocheck(vdev, features64) < 0) { 1324 error_report("Features 0x%" PRIx64 " unsupported. " 1325 "Allowed features: 0x%" PRIx64, 1326 features64, vdev->host_features); 1327 return -1; 1328 } 1329 } else { 1330 if (virtio_set_features_nocheck(vdev, features) < 0) { 1331 error_report("Features 0x%x unsupported. " 1332 "Allowed features: 0x%" PRIx64, 1333 features, vdev->host_features); 1334 return -1; 1335 } 1336 } 1337 1338 for (i = 0; i < num; i++) { 1339 if (vdev->vq[i].vring.desc) { 1340 uint16_t nheads; 1341 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; 1342 /* Check it isn't doing strange things with descriptor numbers. */ 1343 if (nheads > vdev->vq[i].vring.num) { 1344 error_report("VQ %d size 0x%x Guest index 0x%x " 1345 "inconsistent with Host index 0x%x: delta 0x%x", 1346 i, vdev->vq[i].vring.num, 1347 vring_avail_idx(&vdev->vq[i]), 1348 vdev->vq[i].last_avail_idx, nheads); 1349 return -1; 1350 } 1351 } 1352 } 1353 1354 return 0; 1355 } 1356 1357 void virtio_cleanup(VirtIODevice *vdev) 1358 { 1359 qemu_del_vm_change_state_handler(vdev->vmstate); 1360 g_free(vdev->config); 1361 g_free(vdev->vq); 1362 g_free(vdev->vector_queues); 1363 } 1364 1365 static void virtio_vmstate_change(void *opaque, int running, RunState state) 1366 { 1367 VirtIODevice *vdev = opaque; 1368 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1369 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1370 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); 1371 vdev->vm_running = running; 1372 1373 if (backend_run) { 1374 virtio_set_status(vdev, vdev->status); 1375 } 1376 1377 if (k->vmstate_change) { 1378 k->vmstate_change(qbus->parent, backend_run); 1379 } 1380 1381 if (!backend_run) { 1382 virtio_set_status(vdev, vdev->status); 1383 } 1384 } 1385 1386 void virtio_instance_init_common(Object *proxy_obj, void *data, 1387 size_t vdev_size, const char *vdev_name) 1388 { 1389 DeviceState *vdev = data; 1390 1391 object_initialize(vdev, vdev_size, vdev_name); 1392 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL); 1393 object_unref(OBJECT(vdev)); 1394 qdev_alias_all_properties(vdev, proxy_obj); 1395 } 1396 1397 void virtio_init(VirtIODevice *vdev, const char *name, 1398 uint16_t device_id, size_t config_size) 1399 { 1400 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1401 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1402 int i; 1403 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; 1404 1405 if (nvectors) { 1406 vdev->vector_queues = 1407 g_malloc0(sizeof(*vdev->vector_queues) * nvectors); 1408 } 1409 1410 vdev->device_id = device_id; 1411 vdev->status = 0; 1412 vdev->isr = 0; 1413 vdev->queue_sel = 0; 1414 vdev->config_vector = VIRTIO_NO_VECTOR; 1415 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); 1416 vdev->vm_running = runstate_is_running(); 1417 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1418 vdev->vq[i].vector = VIRTIO_NO_VECTOR; 1419 vdev->vq[i].vdev = vdev; 1420 vdev->vq[i].queue_index = i; 1421 } 1422 1423 vdev->name = name; 1424 vdev->config_len = config_size; 1425 if (vdev->config_len) { 1426 vdev->config = g_malloc0(config_size); 1427 } else { 1428 vdev->config = NULL; 1429 } 1430 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, 1431 vdev); 1432 vdev->device_endian = virtio_default_endian(); 1433 } 1434 1435 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) 1436 { 1437 return vdev->vq[n].vring.desc; 1438 } 1439 1440 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) 1441 { 1442 return vdev->vq[n].vring.avail; 1443 } 1444 1445 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) 1446 { 1447 return vdev->vq[n].vring.used; 1448 } 1449 1450 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n) 1451 { 1452 return vdev->vq[n].vring.desc; 1453 } 1454 1455 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) 1456 { 1457 return sizeof(VRingDesc) * vdev->vq[n].vring.num; 1458 } 1459 1460 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) 1461 { 1462 return offsetof(VRingAvail, ring) + 1463 sizeof(uint64_t) * vdev->vq[n].vring.num; 1464 } 1465 1466 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) 1467 { 1468 return offsetof(VRingUsed, ring) + 1469 sizeof(VRingUsedElem) * vdev->vq[n].vring.num; 1470 } 1471 1472 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n) 1473 { 1474 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc + 1475 virtio_queue_get_used_size(vdev, n); 1476 } 1477 1478 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) 1479 { 1480 return vdev->vq[n].last_avail_idx; 1481 } 1482 1483 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) 1484 { 1485 vdev->vq[n].last_avail_idx = idx; 1486 } 1487 1488 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) 1489 { 1490 vdev->vq[n].signalled_used_valid = false; 1491 } 1492 1493 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) 1494 { 1495 return vdev->vq + n; 1496 } 1497 1498 uint16_t virtio_get_queue_index(VirtQueue *vq) 1499 { 1500 return vq->queue_index; 1501 } 1502 1503 static void virtio_queue_guest_notifier_read(EventNotifier *n) 1504 { 1505 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); 1506 if (event_notifier_test_and_clear(n)) { 1507 virtio_irq(vq); 1508 } 1509 } 1510 1511 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, 1512 bool with_irqfd) 1513 { 1514 if (assign && !with_irqfd) { 1515 event_notifier_set_handler(&vq->guest_notifier, 1516 virtio_queue_guest_notifier_read); 1517 } else { 1518 event_notifier_set_handler(&vq->guest_notifier, NULL); 1519 } 1520 if (!assign) { 1521 /* Test and clear notifier before closing it, 1522 * in case poll callback didn't have time to run. */ 1523 virtio_queue_guest_notifier_read(&vq->guest_notifier); 1524 } 1525 } 1526 1527 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) 1528 { 1529 return &vq->guest_notifier; 1530 } 1531 1532 static void virtio_queue_host_notifier_read(EventNotifier *n) 1533 { 1534 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 1535 if (event_notifier_test_and_clear(n)) { 1536 virtio_queue_notify_vq(vq); 1537 } 1538 } 1539 1540 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign, 1541 bool set_handler) 1542 { 1543 if (assign && set_handler) { 1544 event_notifier_set_handler(&vq->host_notifier, 1545 virtio_queue_host_notifier_read); 1546 } else { 1547 event_notifier_set_handler(&vq->host_notifier, NULL); 1548 } 1549 if (!assign) { 1550 /* Test and clear notifier before after disabling event, 1551 * in case poll callback didn't have time to run. */ 1552 virtio_queue_host_notifier_read(&vq->host_notifier); 1553 } 1554 } 1555 1556 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) 1557 { 1558 return &vq->host_notifier; 1559 } 1560 1561 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) 1562 { 1563 g_free(vdev->bus_name); 1564 vdev->bus_name = g_strdup(bus_name); 1565 } 1566 1567 static void virtio_device_realize(DeviceState *dev, Error **errp) 1568 { 1569 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1570 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1571 Error *err = NULL; 1572 1573 if (vdc->realize != NULL) { 1574 vdc->realize(dev, &err); 1575 if (err != NULL) { 1576 error_propagate(errp, err); 1577 return; 1578 } 1579 } 1580 1581 virtio_bus_device_plugged(vdev, &err); 1582 if (err != NULL) { 1583 error_propagate(errp, err); 1584 return; 1585 } 1586 } 1587 1588 static void virtio_device_unrealize(DeviceState *dev, Error **errp) 1589 { 1590 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1591 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1592 Error *err = NULL; 1593 1594 virtio_bus_device_unplugged(vdev); 1595 1596 if (vdc->unrealize != NULL) { 1597 vdc->unrealize(dev, &err); 1598 if (err != NULL) { 1599 error_propagate(errp, err); 1600 return; 1601 } 1602 } 1603 1604 g_free(vdev->bus_name); 1605 vdev->bus_name = NULL; 1606 } 1607 1608 static Property virtio_properties[] = { 1609 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), 1610 DEFINE_PROP_END_OF_LIST(), 1611 }; 1612 1613 static void virtio_device_class_init(ObjectClass *klass, void *data) 1614 { 1615 /* Set the default value here. */ 1616 DeviceClass *dc = DEVICE_CLASS(klass); 1617 1618 dc->realize = virtio_device_realize; 1619 dc->unrealize = virtio_device_unrealize; 1620 dc->bus_type = TYPE_VIRTIO_BUS; 1621 dc->props = virtio_properties; 1622 } 1623 1624 static const TypeInfo virtio_device_info = { 1625 .name = TYPE_VIRTIO_DEVICE, 1626 .parent = TYPE_DEVICE, 1627 .instance_size = sizeof(VirtIODevice), 1628 .class_init = virtio_device_class_init, 1629 .abstract = true, 1630 .class_size = sizeof(VirtioDeviceClass), 1631 }; 1632 1633 static void virtio_register_types(void) 1634 { 1635 type_register_static(&virtio_device_info); 1636 } 1637 1638 type_init(virtio_register_types) 1639