1 /* 2 * Virtio Support 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include <inttypes.h> 15 16 #include "trace.h" 17 #include "exec/address-spaces.h" 18 #include "qemu/error-report.h" 19 #include "hw/virtio/virtio.h" 20 #include "qemu/atomic.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/migration.h" 23 #include "hw/virtio/virtio-access.h" 24 25 /* 26 * The alignment to use between consumer and producer parts of vring. 27 * x86 pagesize again. This is the default, used by transports like PCI 28 * which don't provide a means for the guest to tell the host the alignment. 29 */ 30 #define VIRTIO_PCI_VRING_ALIGN 4096 31 32 typedef struct VRingDesc 33 { 34 uint64_t addr; 35 uint32_t len; 36 uint16_t flags; 37 uint16_t next; 38 } VRingDesc; 39 40 typedef struct VRingAvail 41 { 42 uint16_t flags; 43 uint16_t idx; 44 uint16_t ring[0]; 45 } VRingAvail; 46 47 typedef struct VRingUsedElem 48 { 49 uint32_t id; 50 uint32_t len; 51 } VRingUsedElem; 52 53 typedef struct VRingUsed 54 { 55 uint16_t flags; 56 uint16_t idx; 57 VRingUsedElem ring[0]; 58 } VRingUsed; 59 60 typedef struct VRing 61 { 62 unsigned int num; 63 unsigned int num_default; 64 unsigned int align; 65 hwaddr desc; 66 hwaddr avail; 67 hwaddr used; 68 } VRing; 69 70 struct VirtQueue 71 { 72 VRing vring; 73 uint16_t last_avail_idx; 74 /* Last used index value we have signalled on */ 75 uint16_t signalled_used; 76 77 /* Last used index value we have signalled on */ 78 bool signalled_used_valid; 79 80 /* Notification enabled? */ 81 bool notification; 82 83 uint16_t queue_index; 84 85 int inuse; 86 87 uint16_t vector; 88 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); 89 VirtIODevice *vdev; 90 EventNotifier guest_notifier; 91 EventNotifier host_notifier; 92 QLIST_ENTRY(VirtQueue) node; 93 }; 94 95 /* virt queue functions */ 96 void virtio_queue_update_rings(VirtIODevice *vdev, int n) 97 { 98 VRing *vring = &vdev->vq[n].vring; 99 100 if (!vring->desc) { 101 /* not yet setup -> nothing to do */ 102 return; 103 } 104 vring->avail = vring->desc + vring->num * sizeof(VRingDesc); 105 vring->used = vring_align(vring->avail + 106 offsetof(VRingAvail, ring[vring->num]), 107 vring->align); 108 } 109 110 static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa, 111 int i) 112 { 113 hwaddr pa; 114 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); 115 return virtio_ldq_phys(vdev, pa); 116 } 117 118 static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i) 119 { 120 hwaddr pa; 121 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); 122 return virtio_ldl_phys(vdev, pa); 123 } 124 125 static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa, 126 int i) 127 { 128 hwaddr pa; 129 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); 130 return virtio_lduw_phys(vdev, pa); 131 } 132 133 static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa, 134 int i) 135 { 136 hwaddr pa; 137 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); 138 return virtio_lduw_phys(vdev, pa); 139 } 140 141 static inline uint16_t vring_avail_flags(VirtQueue *vq) 142 { 143 hwaddr pa; 144 pa = vq->vring.avail + offsetof(VRingAvail, flags); 145 return virtio_lduw_phys(vq->vdev, pa); 146 } 147 148 static inline uint16_t vring_avail_idx(VirtQueue *vq) 149 { 150 hwaddr pa; 151 pa = vq->vring.avail + offsetof(VRingAvail, idx); 152 return virtio_lduw_phys(vq->vdev, pa); 153 } 154 155 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) 156 { 157 hwaddr pa; 158 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); 159 return virtio_lduw_phys(vq->vdev, pa); 160 } 161 162 static inline uint16_t vring_get_used_event(VirtQueue *vq) 163 { 164 return vring_avail_ring(vq, vq->vring.num); 165 } 166 167 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) 168 { 169 hwaddr pa; 170 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); 171 virtio_stl_phys(vq->vdev, pa, val); 172 } 173 174 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) 175 { 176 hwaddr pa; 177 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); 178 virtio_stl_phys(vq->vdev, pa, val); 179 } 180 181 static uint16_t vring_used_idx(VirtQueue *vq) 182 { 183 hwaddr pa; 184 pa = vq->vring.used + offsetof(VRingUsed, idx); 185 return virtio_lduw_phys(vq->vdev, pa); 186 } 187 188 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) 189 { 190 hwaddr pa; 191 pa = vq->vring.used + offsetof(VRingUsed, idx); 192 virtio_stw_phys(vq->vdev, pa, val); 193 } 194 195 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) 196 { 197 VirtIODevice *vdev = vq->vdev; 198 hwaddr pa; 199 pa = vq->vring.used + offsetof(VRingUsed, flags); 200 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); 201 } 202 203 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) 204 { 205 VirtIODevice *vdev = vq->vdev; 206 hwaddr pa; 207 pa = vq->vring.used + offsetof(VRingUsed, flags); 208 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); 209 } 210 211 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) 212 { 213 hwaddr pa; 214 if (!vq->notification) { 215 return; 216 } 217 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); 218 virtio_stw_phys(vq->vdev, pa, val); 219 } 220 221 void virtio_queue_set_notification(VirtQueue *vq, int enable) 222 { 223 vq->notification = enable; 224 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 225 vring_set_avail_event(vq, vring_avail_idx(vq)); 226 } else if (enable) { 227 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 228 } else { 229 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 230 } 231 if (enable) { 232 /* Expose avail event/used flags before caller checks the avail idx. */ 233 smp_mb(); 234 } 235 } 236 237 int virtio_queue_ready(VirtQueue *vq) 238 { 239 return vq->vring.avail != 0; 240 } 241 242 int virtio_queue_empty(VirtQueue *vq) 243 { 244 return vring_avail_idx(vq) == vq->last_avail_idx; 245 } 246 247 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, 248 unsigned int len, unsigned int idx) 249 { 250 unsigned int offset; 251 int i; 252 253 trace_virtqueue_fill(vq, elem, len, idx); 254 255 offset = 0; 256 for (i = 0; i < elem->in_num; i++) { 257 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); 258 259 cpu_physical_memory_unmap(elem->in_sg[i].iov_base, 260 elem->in_sg[i].iov_len, 261 1, size); 262 263 offset += size; 264 } 265 266 for (i = 0; i < elem->out_num; i++) 267 cpu_physical_memory_unmap(elem->out_sg[i].iov_base, 268 elem->out_sg[i].iov_len, 269 0, elem->out_sg[i].iov_len); 270 271 idx = (idx + vring_used_idx(vq)) % vq->vring.num; 272 273 /* Get a pointer to the next entry in the used ring. */ 274 vring_used_ring_id(vq, idx, elem->index); 275 vring_used_ring_len(vq, idx, len); 276 } 277 278 void virtqueue_flush(VirtQueue *vq, unsigned int count) 279 { 280 uint16_t old, new; 281 /* Make sure buffer is written before we update index. */ 282 smp_wmb(); 283 trace_virtqueue_flush(vq, count); 284 old = vring_used_idx(vq); 285 new = old + count; 286 vring_used_idx_set(vq, new); 287 vq->inuse -= count; 288 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) 289 vq->signalled_used_valid = false; 290 } 291 292 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, 293 unsigned int len) 294 { 295 virtqueue_fill(vq, elem, len, 0); 296 virtqueue_flush(vq, 1); 297 } 298 299 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) 300 { 301 uint16_t num_heads = vring_avail_idx(vq) - idx; 302 303 /* Check it isn't doing very strange things with descriptor numbers. */ 304 if (num_heads > vq->vring.num) { 305 error_report("Guest moved used index from %u to %u", 306 idx, vring_avail_idx(vq)); 307 exit(1); 308 } 309 /* On success, callers read a descriptor at vq->last_avail_idx. 310 * Make sure descriptor read does not bypass avail index read. */ 311 if (num_heads) { 312 smp_rmb(); 313 } 314 315 return num_heads; 316 } 317 318 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) 319 { 320 unsigned int head; 321 322 /* Grab the next descriptor number they're advertising, and increment 323 * the index we've seen. */ 324 head = vring_avail_ring(vq, idx % vq->vring.num); 325 326 /* If their number is silly, that's a fatal mistake. */ 327 if (head >= vq->vring.num) { 328 error_report("Guest says index %u is available", head); 329 exit(1); 330 } 331 332 return head; 333 } 334 335 static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa, 336 unsigned int i, unsigned int max) 337 { 338 unsigned int next; 339 340 /* If this descriptor says it doesn't chain, we're done. */ 341 if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) { 342 return max; 343 } 344 345 /* Check they're not leading us off end of descriptors. */ 346 next = vring_desc_next(vdev, desc_pa, i); 347 /* Make sure compiler knows to grab that: we don't want it changing! */ 348 smp_wmb(); 349 350 if (next >= max) { 351 error_report("Desc next is %u", next); 352 exit(1); 353 } 354 355 return next; 356 } 357 358 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, 359 unsigned int *out_bytes, 360 unsigned max_in_bytes, unsigned max_out_bytes) 361 { 362 unsigned int idx; 363 unsigned int total_bufs, in_total, out_total; 364 365 idx = vq->last_avail_idx; 366 367 total_bufs = in_total = out_total = 0; 368 while (virtqueue_num_heads(vq, idx)) { 369 VirtIODevice *vdev = vq->vdev; 370 unsigned int max, num_bufs, indirect = 0; 371 hwaddr desc_pa; 372 int i; 373 374 max = vq->vring.num; 375 num_bufs = total_bufs; 376 i = virtqueue_get_head(vq, idx++); 377 desc_pa = vq->vring.desc; 378 379 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 380 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 381 error_report("Invalid size for indirect buffer table"); 382 exit(1); 383 } 384 385 /* If we've got too many, that implies a descriptor loop. */ 386 if (num_bufs >= max) { 387 error_report("Looped descriptor"); 388 exit(1); 389 } 390 391 /* loop over the indirect descriptor table */ 392 indirect = 1; 393 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 394 desc_pa = vring_desc_addr(vdev, desc_pa, i); 395 num_bufs = i = 0; 396 } 397 398 do { 399 /* If we've got too many, that implies a descriptor loop. */ 400 if (++num_bufs > max) { 401 error_report("Looped descriptor"); 402 exit(1); 403 } 404 405 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 406 in_total += vring_desc_len(vdev, desc_pa, i); 407 } else { 408 out_total += vring_desc_len(vdev, desc_pa, i); 409 } 410 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 411 goto done; 412 } 413 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 414 415 if (!indirect) 416 total_bufs = num_bufs; 417 else 418 total_bufs++; 419 } 420 done: 421 if (in_bytes) { 422 *in_bytes = in_total; 423 } 424 if (out_bytes) { 425 *out_bytes = out_total; 426 } 427 } 428 429 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, 430 unsigned int out_bytes) 431 { 432 unsigned int in_total, out_total; 433 434 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); 435 return in_bytes <= in_total && out_bytes <= out_total; 436 } 437 438 void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, 439 size_t num_sg, int is_write) 440 { 441 unsigned int i; 442 hwaddr len; 443 444 if (num_sg > VIRTQUEUE_MAX_SIZE) { 445 error_report("virtio: map attempt out of bounds: %zd > %d", 446 num_sg, VIRTQUEUE_MAX_SIZE); 447 exit(1); 448 } 449 450 for (i = 0; i < num_sg; i++) { 451 len = sg[i].iov_len; 452 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); 453 if (sg[i].iov_base == NULL || len != sg[i].iov_len) { 454 error_report("virtio: error trying to map MMIO memory"); 455 exit(1); 456 } 457 } 458 } 459 460 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) 461 { 462 unsigned int i, head, max; 463 hwaddr desc_pa = vq->vring.desc; 464 VirtIODevice *vdev = vq->vdev; 465 466 if (!virtqueue_num_heads(vq, vq->last_avail_idx)) 467 return 0; 468 469 /* When we start there are none of either input nor output. */ 470 elem->out_num = elem->in_num = 0; 471 472 max = vq->vring.num; 473 474 i = head = virtqueue_get_head(vq, vq->last_avail_idx++); 475 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 476 vring_set_avail_event(vq, vq->last_avail_idx); 477 } 478 479 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 480 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 481 error_report("Invalid size for indirect buffer table"); 482 exit(1); 483 } 484 485 /* loop over the indirect descriptor table */ 486 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 487 desc_pa = vring_desc_addr(vdev, desc_pa, i); 488 i = 0; 489 } 490 491 /* Collect all the descriptors */ 492 do { 493 struct iovec *sg; 494 495 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 496 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) { 497 error_report("Too many write descriptors in indirect table"); 498 exit(1); 499 } 500 elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i); 501 sg = &elem->in_sg[elem->in_num++]; 502 } else { 503 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) { 504 error_report("Too many read descriptors in indirect table"); 505 exit(1); 506 } 507 elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i); 508 sg = &elem->out_sg[elem->out_num++]; 509 } 510 511 sg->iov_len = vring_desc_len(vdev, desc_pa, i); 512 513 /* If we've got too many, that implies a descriptor loop. */ 514 if ((elem->in_num + elem->out_num) > max) { 515 error_report("Looped descriptor"); 516 exit(1); 517 } 518 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 519 520 /* Now map what we have collected */ 521 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1); 522 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0); 523 524 elem->index = head; 525 526 vq->inuse++; 527 528 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 529 return elem->in_num + elem->out_num; 530 } 531 532 /* virtio device */ 533 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) 534 { 535 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 536 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 537 538 if (k->notify) { 539 k->notify(qbus->parent, vector); 540 } 541 } 542 543 void virtio_update_irq(VirtIODevice *vdev) 544 { 545 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 546 } 547 548 static int virtio_validate_features(VirtIODevice *vdev) 549 { 550 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 551 552 if (k->validate_features) { 553 return k->validate_features(vdev); 554 } else { 555 return 0; 556 } 557 } 558 559 int virtio_set_status(VirtIODevice *vdev, uint8_t val) 560 { 561 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 562 trace_virtio_set_status(vdev, val); 563 564 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 565 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && 566 val & VIRTIO_CONFIG_S_FEATURES_OK) { 567 int ret = virtio_validate_features(vdev); 568 569 if (ret) { 570 return ret; 571 } 572 } 573 } 574 if (k->set_status) { 575 k->set_status(vdev, val); 576 } 577 vdev->status = val; 578 return 0; 579 } 580 581 bool target_words_bigendian(void); 582 static enum virtio_device_endian virtio_default_endian(void) 583 { 584 if (target_words_bigendian()) { 585 return VIRTIO_DEVICE_ENDIAN_BIG; 586 } else { 587 return VIRTIO_DEVICE_ENDIAN_LITTLE; 588 } 589 } 590 591 static enum virtio_device_endian virtio_current_cpu_endian(void) 592 { 593 CPUClass *cc = CPU_GET_CLASS(current_cpu); 594 595 if (cc->virtio_is_big_endian(current_cpu)) { 596 return VIRTIO_DEVICE_ENDIAN_BIG; 597 } else { 598 return VIRTIO_DEVICE_ENDIAN_LITTLE; 599 } 600 } 601 602 void virtio_reset(void *opaque) 603 { 604 VirtIODevice *vdev = opaque; 605 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 606 int i; 607 608 virtio_set_status(vdev, 0); 609 if (current_cpu) { 610 /* Guest initiated reset */ 611 vdev->device_endian = virtio_current_cpu_endian(); 612 } else { 613 /* System reset */ 614 vdev->device_endian = virtio_default_endian(); 615 } 616 617 if (k->reset) { 618 k->reset(vdev); 619 } 620 621 vdev->guest_features = 0; 622 vdev->queue_sel = 0; 623 vdev->status = 0; 624 vdev->isr = 0; 625 vdev->config_vector = VIRTIO_NO_VECTOR; 626 virtio_notify_vector(vdev, vdev->config_vector); 627 628 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { 629 vdev->vq[i].vring.desc = 0; 630 vdev->vq[i].vring.avail = 0; 631 vdev->vq[i].vring.used = 0; 632 vdev->vq[i].last_avail_idx = 0; 633 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); 634 vdev->vq[i].signalled_used = 0; 635 vdev->vq[i].signalled_used_valid = false; 636 vdev->vq[i].notification = true; 637 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; 638 } 639 } 640 641 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) 642 { 643 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 644 uint8_t val; 645 646 if (addr + sizeof(val) > vdev->config_len) { 647 return (uint32_t)-1; 648 } 649 650 k->get_config(vdev, vdev->config); 651 652 val = ldub_p(vdev->config + addr); 653 return val; 654 } 655 656 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) 657 { 658 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 659 uint16_t val; 660 661 if (addr + sizeof(val) > vdev->config_len) { 662 return (uint32_t)-1; 663 } 664 665 k->get_config(vdev, vdev->config); 666 667 val = lduw_p(vdev->config + addr); 668 return val; 669 } 670 671 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) 672 { 673 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 674 uint32_t val; 675 676 if (addr + sizeof(val) > vdev->config_len) { 677 return (uint32_t)-1; 678 } 679 680 k->get_config(vdev, vdev->config); 681 682 val = ldl_p(vdev->config + addr); 683 return val; 684 } 685 686 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) 687 { 688 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 689 uint8_t val = data; 690 691 if (addr + sizeof(val) > vdev->config_len) { 692 return; 693 } 694 695 stb_p(vdev->config + addr, val); 696 697 if (k->set_config) { 698 k->set_config(vdev, vdev->config); 699 } 700 } 701 702 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) 703 { 704 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 705 uint16_t val = data; 706 707 if (addr + sizeof(val) > vdev->config_len) { 708 return; 709 } 710 711 stw_p(vdev->config + addr, val); 712 713 if (k->set_config) { 714 k->set_config(vdev, vdev->config); 715 } 716 } 717 718 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) 719 { 720 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 721 uint32_t val = data; 722 723 if (addr + sizeof(val) > vdev->config_len) { 724 return; 725 } 726 727 stl_p(vdev->config + addr, val); 728 729 if (k->set_config) { 730 k->set_config(vdev, vdev->config); 731 } 732 } 733 734 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr) 735 { 736 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 737 uint8_t val; 738 739 if (addr + sizeof(val) > vdev->config_len) { 740 return (uint32_t)-1; 741 } 742 743 k->get_config(vdev, vdev->config); 744 745 val = ldub_p(vdev->config + addr); 746 return val; 747 } 748 749 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr) 750 { 751 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 752 uint16_t val; 753 754 if (addr + sizeof(val) > vdev->config_len) { 755 return (uint32_t)-1; 756 } 757 758 k->get_config(vdev, vdev->config); 759 760 val = lduw_le_p(vdev->config + addr); 761 return val; 762 } 763 764 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr) 765 { 766 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 767 uint32_t val; 768 769 if (addr + sizeof(val) > vdev->config_len) { 770 return (uint32_t)-1; 771 } 772 773 k->get_config(vdev, vdev->config); 774 775 val = ldl_le_p(vdev->config + addr); 776 return val; 777 } 778 779 void virtio_config_modern_writeb(VirtIODevice *vdev, 780 uint32_t addr, uint32_t data) 781 { 782 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 783 uint8_t val = data; 784 785 if (addr + sizeof(val) > vdev->config_len) { 786 return; 787 } 788 789 stb_p(vdev->config + addr, val); 790 791 if (k->set_config) { 792 k->set_config(vdev, vdev->config); 793 } 794 } 795 796 void virtio_config_modern_writew(VirtIODevice *vdev, 797 uint32_t addr, uint32_t data) 798 { 799 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 800 uint16_t val = data; 801 802 if (addr + sizeof(val) > vdev->config_len) { 803 return; 804 } 805 806 stw_le_p(vdev->config + addr, val); 807 808 if (k->set_config) { 809 k->set_config(vdev, vdev->config); 810 } 811 } 812 813 void virtio_config_modern_writel(VirtIODevice *vdev, 814 uint32_t addr, uint32_t data) 815 { 816 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 817 uint32_t val = data; 818 819 if (addr + sizeof(val) > vdev->config_len) { 820 return; 821 } 822 823 stl_le_p(vdev->config + addr, val); 824 825 if (k->set_config) { 826 k->set_config(vdev, vdev->config); 827 } 828 } 829 830 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) 831 { 832 vdev->vq[n].vring.desc = addr; 833 virtio_queue_update_rings(vdev, n); 834 } 835 836 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) 837 { 838 return vdev->vq[n].vring.desc; 839 } 840 841 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, 842 hwaddr avail, hwaddr used) 843 { 844 vdev->vq[n].vring.desc = desc; 845 vdev->vq[n].vring.avail = avail; 846 vdev->vq[n].vring.used = used; 847 } 848 849 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) 850 { 851 /* Don't allow guest to flip queue between existent and 852 * nonexistent states, or to set it to an invalid size. 853 */ 854 if (!!num != !!vdev->vq[n].vring.num || 855 num > VIRTQUEUE_MAX_SIZE || 856 num < 0) { 857 return; 858 } 859 vdev->vq[n].vring.num = num; 860 } 861 862 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) 863 { 864 return QLIST_FIRST(&vdev->vector_queues[vector]); 865 } 866 867 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) 868 { 869 return QLIST_NEXT(vq, node); 870 } 871 872 int virtio_queue_get_num(VirtIODevice *vdev, int n) 873 { 874 return vdev->vq[n].vring.num; 875 } 876 877 int virtio_get_num_queues(VirtIODevice *vdev) 878 { 879 int i; 880 881 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 882 if (!virtio_queue_get_num(vdev, i)) { 883 break; 884 } 885 } 886 887 return i; 888 } 889 890 int virtio_queue_get_id(VirtQueue *vq) 891 { 892 VirtIODevice *vdev = vq->vdev; 893 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]); 894 return vq - &vdev->vq[0]; 895 } 896 897 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) 898 { 899 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 900 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 901 902 /* virtio-1 compliant devices cannot change the alignment */ 903 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 904 error_report("tried to modify queue alignment for virtio-1 device"); 905 return; 906 } 907 /* Check that the transport told us it was going to do this 908 * (so a buggy transport will immediately assert rather than 909 * silently failing to migrate this state) 910 */ 911 assert(k->has_variable_vring_alignment); 912 913 vdev->vq[n].vring.align = align; 914 virtio_queue_update_rings(vdev, n); 915 } 916 917 void virtio_queue_notify_vq(VirtQueue *vq) 918 { 919 if (vq->vring.desc && vq->handle_output) { 920 VirtIODevice *vdev = vq->vdev; 921 922 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 923 vq->handle_output(vdev, vq); 924 } 925 } 926 927 void virtio_queue_notify(VirtIODevice *vdev, int n) 928 { 929 virtio_queue_notify_vq(&vdev->vq[n]); 930 } 931 932 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) 933 { 934 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : 935 VIRTIO_NO_VECTOR; 936 } 937 938 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) 939 { 940 VirtQueue *vq = &vdev->vq[n]; 941 942 if (n < VIRTIO_QUEUE_MAX) { 943 if (vdev->vector_queues && 944 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { 945 QLIST_REMOVE(vq, node); 946 } 947 vdev->vq[n].vector = vector; 948 if (vdev->vector_queues && 949 vector != VIRTIO_NO_VECTOR) { 950 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); 951 } 952 } 953 } 954 955 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, 956 void (*handle_output)(VirtIODevice *, VirtQueue *)) 957 { 958 int i; 959 960 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 961 if (vdev->vq[i].vring.num == 0) 962 break; 963 } 964 965 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) 966 abort(); 967 968 vdev->vq[i].vring.num = queue_size; 969 vdev->vq[i].vring.num_default = queue_size; 970 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; 971 vdev->vq[i].handle_output = handle_output; 972 973 return &vdev->vq[i]; 974 } 975 976 void virtio_del_queue(VirtIODevice *vdev, int n) 977 { 978 if (n < 0 || n >= VIRTIO_QUEUE_MAX) { 979 abort(); 980 } 981 982 vdev->vq[n].vring.num = 0; 983 vdev->vq[n].vring.num_default = 0; 984 } 985 986 void virtio_irq(VirtQueue *vq) 987 { 988 trace_virtio_irq(vq); 989 vq->vdev->isr |= 0x01; 990 virtio_notify_vector(vq->vdev, vq->vector); 991 } 992 993 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) 994 { 995 uint16_t old, new; 996 bool v; 997 /* We need to expose used array entries before checking used event. */ 998 smp_mb(); 999 /* Always notify when queue is empty (when feature acknowledge) */ 1000 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && 1001 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) { 1002 return true; 1003 } 1004 1005 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 1006 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 1007 } 1008 1009 v = vq->signalled_used_valid; 1010 vq->signalled_used_valid = true; 1011 old = vq->signalled_used; 1012 new = vq->signalled_used = vring_used_idx(vq); 1013 return !v || vring_need_event(vring_get_used_event(vq), new, old); 1014 } 1015 1016 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) 1017 { 1018 if (!vring_notify(vdev, vq)) { 1019 return; 1020 } 1021 1022 trace_virtio_notify(vdev, vq); 1023 vdev->isr |= 0x01; 1024 virtio_notify_vector(vdev, vq->vector); 1025 } 1026 1027 void virtio_notify_config(VirtIODevice *vdev) 1028 { 1029 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 1030 return; 1031 1032 vdev->isr |= 0x03; 1033 vdev->generation++; 1034 virtio_notify_vector(vdev, vdev->config_vector); 1035 } 1036 1037 static bool virtio_device_endian_needed(void *opaque) 1038 { 1039 VirtIODevice *vdev = opaque; 1040 1041 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); 1042 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1043 return vdev->device_endian != virtio_default_endian(); 1044 } 1045 /* Devices conforming to VIRTIO 1.0 or later are always LE. */ 1046 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; 1047 } 1048 1049 static bool virtio_64bit_features_needed(void *opaque) 1050 { 1051 VirtIODevice *vdev = opaque; 1052 1053 return (vdev->host_features >> 32) != 0; 1054 } 1055 1056 static bool virtio_virtqueue_needed(void *opaque) 1057 { 1058 VirtIODevice *vdev = opaque; 1059 1060 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1); 1061 } 1062 1063 static bool virtio_ringsize_needed(void *opaque) 1064 { 1065 VirtIODevice *vdev = opaque; 1066 int i; 1067 1068 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1069 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) { 1070 return true; 1071 } 1072 } 1073 return false; 1074 } 1075 1076 static void put_virtqueue_state(QEMUFile *f, void *pv, size_t size) 1077 { 1078 VirtIODevice *vdev = pv; 1079 int i; 1080 1081 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1082 qemu_put_be64(f, vdev->vq[i].vring.avail); 1083 qemu_put_be64(f, vdev->vq[i].vring.used); 1084 } 1085 } 1086 1087 static int get_virtqueue_state(QEMUFile *f, void *pv, size_t size) 1088 { 1089 VirtIODevice *vdev = pv; 1090 int i; 1091 1092 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1093 vdev->vq[i].vring.avail = qemu_get_be64(f); 1094 vdev->vq[i].vring.used = qemu_get_be64(f); 1095 } 1096 return 0; 1097 } 1098 1099 static VMStateInfo vmstate_info_virtqueue = { 1100 .name = "virtqueue_state", 1101 .get = get_virtqueue_state, 1102 .put = put_virtqueue_state, 1103 }; 1104 1105 static const VMStateDescription vmstate_virtio_virtqueues = { 1106 .name = "virtio/virtqueues", 1107 .version_id = 1, 1108 .minimum_version_id = 1, 1109 .needed = &virtio_virtqueue_needed, 1110 .fields = (VMStateField[]) { 1111 { 1112 .name = "virtqueues", 1113 .version_id = 0, 1114 .field_exists = NULL, 1115 .size = 0, 1116 .info = &vmstate_info_virtqueue, 1117 .flags = VMS_SINGLE, 1118 .offset = 0, 1119 }, 1120 VMSTATE_END_OF_LIST() 1121 } 1122 }; 1123 1124 static void put_ringsize_state(QEMUFile *f, void *pv, size_t size) 1125 { 1126 VirtIODevice *vdev = pv; 1127 int i; 1128 1129 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1130 qemu_put_be32(f, vdev->vq[i].vring.num_default); 1131 } 1132 } 1133 1134 static int get_ringsize_state(QEMUFile *f, void *pv, size_t size) 1135 { 1136 VirtIODevice *vdev = pv; 1137 int i; 1138 1139 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1140 vdev->vq[i].vring.num_default = qemu_get_be32(f); 1141 } 1142 return 0; 1143 } 1144 1145 static VMStateInfo vmstate_info_ringsize = { 1146 .name = "ringsize_state", 1147 .get = get_ringsize_state, 1148 .put = put_ringsize_state, 1149 }; 1150 1151 static const VMStateDescription vmstate_virtio_ringsize = { 1152 .name = "virtio/ringsize", 1153 .version_id = 1, 1154 .minimum_version_id = 1, 1155 .needed = &virtio_ringsize_needed, 1156 .fields = (VMStateField[]) { 1157 { 1158 .name = "ringsize", 1159 .version_id = 0, 1160 .field_exists = NULL, 1161 .size = 0, 1162 .info = &vmstate_info_ringsize, 1163 .flags = VMS_SINGLE, 1164 .offset = 0, 1165 }, 1166 VMSTATE_END_OF_LIST() 1167 } 1168 }; 1169 1170 static const VMStateDescription vmstate_virtio_device_endian = { 1171 .name = "virtio/device_endian", 1172 .version_id = 1, 1173 .minimum_version_id = 1, 1174 .needed = &virtio_device_endian_needed, 1175 .fields = (VMStateField[]) { 1176 VMSTATE_UINT8(device_endian, VirtIODevice), 1177 VMSTATE_END_OF_LIST() 1178 } 1179 }; 1180 1181 static const VMStateDescription vmstate_virtio_64bit_features = { 1182 .name = "virtio/64bit_features", 1183 .version_id = 1, 1184 .minimum_version_id = 1, 1185 .needed = &virtio_64bit_features_needed, 1186 .fields = (VMStateField[]) { 1187 VMSTATE_UINT64(guest_features, VirtIODevice), 1188 VMSTATE_END_OF_LIST() 1189 } 1190 }; 1191 1192 static const VMStateDescription vmstate_virtio = { 1193 .name = "virtio", 1194 .version_id = 1, 1195 .minimum_version_id = 1, 1196 .minimum_version_id_old = 1, 1197 .fields = (VMStateField[]) { 1198 VMSTATE_END_OF_LIST() 1199 }, 1200 .subsections = (const VMStateDescription*[]) { 1201 &vmstate_virtio_device_endian, 1202 &vmstate_virtio_64bit_features, 1203 &vmstate_virtio_virtqueues, 1204 &vmstate_virtio_ringsize, 1205 NULL 1206 } 1207 }; 1208 1209 void virtio_save(VirtIODevice *vdev, QEMUFile *f) 1210 { 1211 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1212 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1213 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1214 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); 1215 int i; 1216 1217 if (k->save_config) { 1218 k->save_config(qbus->parent, f); 1219 } 1220 1221 qemu_put_8s(f, &vdev->status); 1222 qemu_put_8s(f, &vdev->isr); 1223 qemu_put_be16s(f, &vdev->queue_sel); 1224 qemu_put_be32s(f, &guest_features_lo); 1225 qemu_put_be32(f, vdev->config_len); 1226 qemu_put_buffer(f, vdev->config, vdev->config_len); 1227 1228 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1229 if (vdev->vq[i].vring.num == 0) 1230 break; 1231 } 1232 1233 qemu_put_be32(f, i); 1234 1235 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1236 if (vdev->vq[i].vring.num == 0) 1237 break; 1238 1239 qemu_put_be32(f, vdev->vq[i].vring.num); 1240 if (k->has_variable_vring_alignment) { 1241 qemu_put_be32(f, vdev->vq[i].vring.align); 1242 } 1243 /* XXX virtio-1 devices */ 1244 qemu_put_be64(f, vdev->vq[i].vring.desc); 1245 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); 1246 if (k->save_queue) { 1247 k->save_queue(qbus->parent, i, f); 1248 } 1249 } 1250 1251 if (vdc->save != NULL) { 1252 vdc->save(vdev, f); 1253 } 1254 1255 /* Subsections */ 1256 vmstate_save_state(f, &vmstate_virtio, vdev, NULL); 1257 } 1258 1259 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) 1260 { 1261 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1262 bool bad = (val & ~(vdev->host_features)) != 0; 1263 1264 val &= vdev->host_features; 1265 if (k->set_features) { 1266 k->set_features(vdev, val); 1267 } 1268 vdev->guest_features = val; 1269 return bad ? -1 : 0; 1270 } 1271 1272 int virtio_set_features(VirtIODevice *vdev, uint64_t val) 1273 { 1274 /* 1275 * The driver must not attempt to set features after feature negotiation 1276 * has finished. 1277 */ 1278 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { 1279 return -EINVAL; 1280 } 1281 return virtio_set_features_nocheck(vdev, val); 1282 } 1283 1284 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) 1285 { 1286 int i, ret; 1287 int32_t config_len; 1288 uint32_t num; 1289 uint32_t features; 1290 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1291 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1292 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1293 1294 /* 1295 * We poison the endianness to ensure it does not get used before 1296 * subsections have been loaded. 1297 */ 1298 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; 1299 1300 if (k->load_config) { 1301 ret = k->load_config(qbus->parent, f); 1302 if (ret) 1303 return ret; 1304 } 1305 1306 qemu_get_8s(f, &vdev->status); 1307 qemu_get_8s(f, &vdev->isr); 1308 qemu_get_be16s(f, &vdev->queue_sel); 1309 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { 1310 return -1; 1311 } 1312 qemu_get_be32s(f, &features); 1313 1314 config_len = qemu_get_be32(f); 1315 1316 /* 1317 * There are cases where the incoming config can be bigger or smaller 1318 * than what we have; so load what we have space for, and skip 1319 * any excess that's in the stream. 1320 */ 1321 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); 1322 1323 while (config_len > vdev->config_len) { 1324 qemu_get_byte(f); 1325 config_len--; 1326 } 1327 1328 num = qemu_get_be32(f); 1329 1330 if (num > VIRTIO_QUEUE_MAX) { 1331 error_report("Invalid number of PCI queues: 0x%x", num); 1332 return -1; 1333 } 1334 1335 for (i = 0; i < num; i++) { 1336 vdev->vq[i].vring.num = qemu_get_be32(f); 1337 if (k->has_variable_vring_alignment) { 1338 vdev->vq[i].vring.align = qemu_get_be32(f); 1339 } 1340 vdev->vq[i].vring.desc = qemu_get_be64(f); 1341 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); 1342 vdev->vq[i].signalled_used_valid = false; 1343 vdev->vq[i].notification = true; 1344 1345 if (vdev->vq[i].vring.desc) { 1346 /* XXX virtio-1 devices */ 1347 virtio_queue_update_rings(vdev, i); 1348 } else if (vdev->vq[i].last_avail_idx) { 1349 error_report("VQ %d address 0x0 " 1350 "inconsistent with Host index 0x%x", 1351 i, vdev->vq[i].last_avail_idx); 1352 return -1; 1353 } 1354 if (k->load_queue) { 1355 ret = k->load_queue(qbus->parent, i, f); 1356 if (ret) 1357 return ret; 1358 } 1359 } 1360 1361 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 1362 1363 if (vdc->load != NULL) { 1364 ret = vdc->load(vdev, f, version_id); 1365 if (ret) { 1366 return ret; 1367 } 1368 } 1369 1370 /* Subsections */ 1371 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); 1372 if (ret) { 1373 return ret; 1374 } 1375 1376 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { 1377 vdev->device_endian = virtio_default_endian(); 1378 } 1379 1380 if (virtio_64bit_features_needed(vdev)) { 1381 /* 1382 * Subsection load filled vdev->guest_features. Run them 1383 * through virtio_set_features to sanity-check them against 1384 * host_features. 1385 */ 1386 uint64_t features64 = vdev->guest_features; 1387 if (virtio_set_features_nocheck(vdev, features64) < 0) { 1388 error_report("Features 0x%" PRIx64 " unsupported. " 1389 "Allowed features: 0x%" PRIx64, 1390 features64, vdev->host_features); 1391 return -1; 1392 } 1393 } else { 1394 if (virtio_set_features_nocheck(vdev, features) < 0) { 1395 error_report("Features 0x%x unsupported. " 1396 "Allowed features: 0x%" PRIx64, 1397 features, vdev->host_features); 1398 return -1; 1399 } 1400 } 1401 1402 for (i = 0; i < num; i++) { 1403 if (vdev->vq[i].vring.desc) { 1404 uint16_t nheads; 1405 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; 1406 /* Check it isn't doing strange things with descriptor numbers. */ 1407 if (nheads > vdev->vq[i].vring.num) { 1408 error_report("VQ %d size 0x%x Guest index 0x%x " 1409 "inconsistent with Host index 0x%x: delta 0x%x", 1410 i, vdev->vq[i].vring.num, 1411 vring_avail_idx(&vdev->vq[i]), 1412 vdev->vq[i].last_avail_idx, nheads); 1413 return -1; 1414 } 1415 } 1416 } 1417 1418 return 0; 1419 } 1420 1421 void virtio_cleanup(VirtIODevice *vdev) 1422 { 1423 qemu_del_vm_change_state_handler(vdev->vmstate); 1424 g_free(vdev->config); 1425 g_free(vdev->vq); 1426 g_free(vdev->vector_queues); 1427 } 1428 1429 static void virtio_vmstate_change(void *opaque, int running, RunState state) 1430 { 1431 VirtIODevice *vdev = opaque; 1432 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1433 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1434 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); 1435 vdev->vm_running = running; 1436 1437 if (backend_run) { 1438 virtio_set_status(vdev, vdev->status); 1439 } 1440 1441 if (k->vmstate_change) { 1442 k->vmstate_change(qbus->parent, backend_run); 1443 } 1444 1445 if (!backend_run) { 1446 virtio_set_status(vdev, vdev->status); 1447 } 1448 } 1449 1450 void virtio_instance_init_common(Object *proxy_obj, void *data, 1451 size_t vdev_size, const char *vdev_name) 1452 { 1453 DeviceState *vdev = data; 1454 1455 object_initialize(vdev, vdev_size, vdev_name); 1456 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL); 1457 object_unref(OBJECT(vdev)); 1458 qdev_alias_all_properties(vdev, proxy_obj); 1459 } 1460 1461 void virtio_init(VirtIODevice *vdev, const char *name, 1462 uint16_t device_id, size_t config_size) 1463 { 1464 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1465 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1466 int i; 1467 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; 1468 1469 if (nvectors) { 1470 vdev->vector_queues = 1471 g_malloc0(sizeof(*vdev->vector_queues) * nvectors); 1472 } 1473 1474 vdev->device_id = device_id; 1475 vdev->status = 0; 1476 vdev->isr = 0; 1477 vdev->queue_sel = 0; 1478 vdev->config_vector = VIRTIO_NO_VECTOR; 1479 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); 1480 vdev->vm_running = runstate_is_running(); 1481 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1482 vdev->vq[i].vector = VIRTIO_NO_VECTOR; 1483 vdev->vq[i].vdev = vdev; 1484 vdev->vq[i].queue_index = i; 1485 } 1486 1487 vdev->name = name; 1488 vdev->config_len = config_size; 1489 if (vdev->config_len) { 1490 vdev->config = g_malloc0(config_size); 1491 } else { 1492 vdev->config = NULL; 1493 } 1494 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, 1495 vdev); 1496 vdev->device_endian = virtio_default_endian(); 1497 } 1498 1499 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) 1500 { 1501 return vdev->vq[n].vring.desc; 1502 } 1503 1504 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) 1505 { 1506 return vdev->vq[n].vring.avail; 1507 } 1508 1509 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) 1510 { 1511 return vdev->vq[n].vring.used; 1512 } 1513 1514 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n) 1515 { 1516 return vdev->vq[n].vring.desc; 1517 } 1518 1519 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) 1520 { 1521 return sizeof(VRingDesc) * vdev->vq[n].vring.num; 1522 } 1523 1524 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) 1525 { 1526 return offsetof(VRingAvail, ring) + 1527 sizeof(uint16_t) * vdev->vq[n].vring.num; 1528 } 1529 1530 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) 1531 { 1532 return offsetof(VRingUsed, ring) + 1533 sizeof(VRingUsedElem) * vdev->vq[n].vring.num; 1534 } 1535 1536 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n) 1537 { 1538 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc + 1539 virtio_queue_get_used_size(vdev, n); 1540 } 1541 1542 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) 1543 { 1544 return vdev->vq[n].last_avail_idx; 1545 } 1546 1547 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) 1548 { 1549 vdev->vq[n].last_avail_idx = idx; 1550 } 1551 1552 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) 1553 { 1554 vdev->vq[n].signalled_used_valid = false; 1555 } 1556 1557 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) 1558 { 1559 return vdev->vq + n; 1560 } 1561 1562 uint16_t virtio_get_queue_index(VirtQueue *vq) 1563 { 1564 return vq->queue_index; 1565 } 1566 1567 static void virtio_queue_guest_notifier_read(EventNotifier *n) 1568 { 1569 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); 1570 if (event_notifier_test_and_clear(n)) { 1571 virtio_irq(vq); 1572 } 1573 } 1574 1575 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, 1576 bool with_irqfd) 1577 { 1578 if (assign && !with_irqfd) { 1579 event_notifier_set_handler(&vq->guest_notifier, 1580 virtio_queue_guest_notifier_read); 1581 } else { 1582 event_notifier_set_handler(&vq->guest_notifier, NULL); 1583 } 1584 if (!assign) { 1585 /* Test and clear notifier before closing it, 1586 * in case poll callback didn't have time to run. */ 1587 virtio_queue_guest_notifier_read(&vq->guest_notifier); 1588 } 1589 } 1590 1591 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) 1592 { 1593 return &vq->guest_notifier; 1594 } 1595 1596 static void virtio_queue_host_notifier_read(EventNotifier *n) 1597 { 1598 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 1599 if (event_notifier_test_and_clear(n)) { 1600 virtio_queue_notify_vq(vq); 1601 } 1602 } 1603 1604 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign, 1605 bool set_handler) 1606 { 1607 if (assign && set_handler) { 1608 event_notifier_set_handler(&vq->host_notifier, 1609 virtio_queue_host_notifier_read); 1610 } else { 1611 event_notifier_set_handler(&vq->host_notifier, NULL); 1612 } 1613 if (!assign) { 1614 /* Test and clear notifier before after disabling event, 1615 * in case poll callback didn't have time to run. */ 1616 virtio_queue_host_notifier_read(&vq->host_notifier); 1617 } 1618 } 1619 1620 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) 1621 { 1622 return &vq->host_notifier; 1623 } 1624 1625 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) 1626 { 1627 g_free(vdev->bus_name); 1628 vdev->bus_name = g_strdup(bus_name); 1629 } 1630 1631 static void virtio_device_realize(DeviceState *dev, Error **errp) 1632 { 1633 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1634 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1635 Error *err = NULL; 1636 1637 if (vdc->realize != NULL) { 1638 vdc->realize(dev, &err); 1639 if (err != NULL) { 1640 error_propagate(errp, err); 1641 return; 1642 } 1643 } 1644 1645 virtio_bus_device_plugged(vdev, &err); 1646 if (err != NULL) { 1647 error_propagate(errp, err); 1648 return; 1649 } 1650 } 1651 1652 static void virtio_device_unrealize(DeviceState *dev, Error **errp) 1653 { 1654 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1655 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1656 Error *err = NULL; 1657 1658 virtio_bus_device_unplugged(vdev); 1659 1660 if (vdc->unrealize != NULL) { 1661 vdc->unrealize(dev, &err); 1662 if (err != NULL) { 1663 error_propagate(errp, err); 1664 return; 1665 } 1666 } 1667 1668 g_free(vdev->bus_name); 1669 vdev->bus_name = NULL; 1670 } 1671 1672 static Property virtio_properties[] = { 1673 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), 1674 DEFINE_PROP_END_OF_LIST(), 1675 }; 1676 1677 static void virtio_device_class_init(ObjectClass *klass, void *data) 1678 { 1679 /* Set the default value here. */ 1680 DeviceClass *dc = DEVICE_CLASS(klass); 1681 1682 dc->realize = virtio_device_realize; 1683 dc->unrealize = virtio_device_unrealize; 1684 dc->bus_type = TYPE_VIRTIO_BUS; 1685 dc->props = virtio_properties; 1686 } 1687 1688 static const TypeInfo virtio_device_info = { 1689 .name = TYPE_VIRTIO_DEVICE, 1690 .parent = TYPE_DEVICE, 1691 .instance_size = sizeof(VirtIODevice), 1692 .class_init = virtio_device_class_init, 1693 .abstract = true, 1694 .class_size = sizeof(VirtioDeviceClass), 1695 }; 1696 1697 static void virtio_register_types(void) 1698 { 1699 type_register_static(&virtio_device_info); 1700 } 1701 1702 type_init(virtio_register_types) 1703