1 /* 2 * Virtio Support 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include <inttypes.h> 15 16 #include "trace.h" 17 #include "exec/address-spaces.h" 18 #include "qemu/error-report.h" 19 #include "hw/virtio/virtio.h" 20 #include "qemu/atomic.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/migration.h" 23 #include "hw/virtio/virtio-access.h" 24 25 /* 26 * The alignment to use between consumer and producer parts of vring. 27 * x86 pagesize again. This is the default, used by transports like PCI 28 * which don't provide a means for the guest to tell the host the alignment. 29 */ 30 #define VIRTIO_PCI_VRING_ALIGN 4096 31 32 typedef struct VRingDesc 33 { 34 uint64_t addr; 35 uint32_t len; 36 uint16_t flags; 37 uint16_t next; 38 } VRingDesc; 39 40 typedef struct VRingAvail 41 { 42 uint16_t flags; 43 uint16_t idx; 44 uint16_t ring[0]; 45 } VRingAvail; 46 47 typedef struct VRingUsedElem 48 { 49 uint32_t id; 50 uint32_t len; 51 } VRingUsedElem; 52 53 typedef struct VRingUsed 54 { 55 uint16_t flags; 56 uint16_t idx; 57 VRingUsedElem ring[0]; 58 } VRingUsed; 59 60 typedef struct VRing 61 { 62 unsigned int num; 63 unsigned int align; 64 hwaddr desc; 65 hwaddr avail; 66 hwaddr used; 67 } VRing; 68 69 struct VirtQueue 70 { 71 VRing vring; 72 uint16_t last_avail_idx; 73 /* Last used index value we have signalled on */ 74 uint16_t signalled_used; 75 76 /* Last used index value we have signalled on */ 77 bool signalled_used_valid; 78 79 /* Notification enabled? */ 80 bool notification; 81 82 uint16_t queue_index; 83 84 int inuse; 85 86 uint16_t vector; 87 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); 88 VirtIODevice *vdev; 89 EventNotifier guest_notifier; 90 EventNotifier host_notifier; 91 QLIST_ENTRY(VirtQueue) node; 92 }; 93 94 /* virt queue functions */ 95 void virtio_queue_update_rings(VirtIODevice *vdev, int n) 96 { 97 VRing *vring = &vdev->vq[n].vring; 98 99 if (!vring->desc) { 100 /* not yet setup -> nothing to do */ 101 return; 102 } 103 vring->avail = vring->desc + vring->num * sizeof(VRingDesc); 104 vring->used = vring_align(vring->avail + 105 offsetof(VRingAvail, ring[vring->num]), 106 vring->align); 107 } 108 109 static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa, 110 int i) 111 { 112 hwaddr pa; 113 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); 114 return virtio_ldq_phys(vdev, pa); 115 } 116 117 static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i) 118 { 119 hwaddr pa; 120 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); 121 return virtio_ldl_phys(vdev, pa); 122 } 123 124 static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa, 125 int i) 126 { 127 hwaddr pa; 128 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); 129 return virtio_lduw_phys(vdev, pa); 130 } 131 132 static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa, 133 int i) 134 { 135 hwaddr pa; 136 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); 137 return virtio_lduw_phys(vdev, pa); 138 } 139 140 static inline uint16_t vring_avail_flags(VirtQueue *vq) 141 { 142 hwaddr pa; 143 pa = vq->vring.avail + offsetof(VRingAvail, flags); 144 return virtio_lduw_phys(vq->vdev, pa); 145 } 146 147 static inline uint16_t vring_avail_idx(VirtQueue *vq) 148 { 149 hwaddr pa; 150 pa = vq->vring.avail + offsetof(VRingAvail, idx); 151 return virtio_lduw_phys(vq->vdev, pa); 152 } 153 154 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) 155 { 156 hwaddr pa; 157 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); 158 return virtio_lduw_phys(vq->vdev, pa); 159 } 160 161 static inline uint16_t vring_get_used_event(VirtQueue *vq) 162 { 163 return vring_avail_ring(vq, vq->vring.num); 164 } 165 166 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) 167 { 168 hwaddr pa; 169 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); 170 virtio_stl_phys(vq->vdev, pa, val); 171 } 172 173 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) 174 { 175 hwaddr pa; 176 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); 177 virtio_stl_phys(vq->vdev, pa, val); 178 } 179 180 static uint16_t vring_used_idx(VirtQueue *vq) 181 { 182 hwaddr pa; 183 pa = vq->vring.used + offsetof(VRingUsed, idx); 184 return virtio_lduw_phys(vq->vdev, pa); 185 } 186 187 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) 188 { 189 hwaddr pa; 190 pa = vq->vring.used + offsetof(VRingUsed, idx); 191 virtio_stw_phys(vq->vdev, pa, val); 192 } 193 194 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) 195 { 196 VirtIODevice *vdev = vq->vdev; 197 hwaddr pa; 198 pa = vq->vring.used + offsetof(VRingUsed, flags); 199 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); 200 } 201 202 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) 203 { 204 VirtIODevice *vdev = vq->vdev; 205 hwaddr pa; 206 pa = vq->vring.used + offsetof(VRingUsed, flags); 207 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); 208 } 209 210 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) 211 { 212 hwaddr pa; 213 if (!vq->notification) { 214 return; 215 } 216 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); 217 virtio_stw_phys(vq->vdev, pa, val); 218 } 219 220 void virtio_queue_set_notification(VirtQueue *vq, int enable) 221 { 222 vq->notification = enable; 223 if (virtio_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 224 vring_set_avail_event(vq, vring_avail_idx(vq)); 225 } else if (enable) { 226 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 227 } else { 228 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 229 } 230 if (enable) { 231 /* Expose avail event/used flags before caller checks the avail idx. */ 232 smp_mb(); 233 } 234 } 235 236 int virtio_queue_ready(VirtQueue *vq) 237 { 238 return vq->vring.avail != 0; 239 } 240 241 int virtio_queue_empty(VirtQueue *vq) 242 { 243 return vring_avail_idx(vq) == vq->last_avail_idx; 244 } 245 246 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, 247 unsigned int len, unsigned int idx) 248 { 249 unsigned int offset; 250 int i; 251 252 trace_virtqueue_fill(vq, elem, len, idx); 253 254 offset = 0; 255 for (i = 0; i < elem->in_num; i++) { 256 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); 257 258 cpu_physical_memory_unmap(elem->in_sg[i].iov_base, 259 elem->in_sg[i].iov_len, 260 1, size); 261 262 offset += size; 263 } 264 265 for (i = 0; i < elem->out_num; i++) 266 cpu_physical_memory_unmap(elem->out_sg[i].iov_base, 267 elem->out_sg[i].iov_len, 268 0, elem->out_sg[i].iov_len); 269 270 idx = (idx + vring_used_idx(vq)) % vq->vring.num; 271 272 /* Get a pointer to the next entry in the used ring. */ 273 vring_used_ring_id(vq, idx, elem->index); 274 vring_used_ring_len(vq, idx, len); 275 } 276 277 void virtqueue_flush(VirtQueue *vq, unsigned int count) 278 { 279 uint16_t old, new; 280 /* Make sure buffer is written before we update index. */ 281 smp_wmb(); 282 trace_virtqueue_flush(vq, count); 283 old = vring_used_idx(vq); 284 new = old + count; 285 vring_used_idx_set(vq, new); 286 vq->inuse -= count; 287 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) 288 vq->signalled_used_valid = false; 289 } 290 291 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, 292 unsigned int len) 293 { 294 virtqueue_fill(vq, elem, len, 0); 295 virtqueue_flush(vq, 1); 296 } 297 298 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) 299 { 300 uint16_t num_heads = vring_avail_idx(vq) - idx; 301 302 /* Check it isn't doing very strange things with descriptor numbers. */ 303 if (num_heads > vq->vring.num) { 304 error_report("Guest moved used index from %u to %u", 305 idx, vring_avail_idx(vq)); 306 exit(1); 307 } 308 /* On success, callers read a descriptor at vq->last_avail_idx. 309 * Make sure descriptor read does not bypass avail index read. */ 310 if (num_heads) { 311 smp_rmb(); 312 } 313 314 return num_heads; 315 } 316 317 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) 318 { 319 unsigned int head; 320 321 /* Grab the next descriptor number they're advertising, and increment 322 * the index we've seen. */ 323 head = vring_avail_ring(vq, idx % vq->vring.num); 324 325 /* If their number is silly, that's a fatal mistake. */ 326 if (head >= vq->vring.num) { 327 error_report("Guest says index %u is available", head); 328 exit(1); 329 } 330 331 return head; 332 } 333 334 static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa, 335 unsigned int i, unsigned int max) 336 { 337 unsigned int next; 338 339 /* If this descriptor says it doesn't chain, we're done. */ 340 if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) { 341 return max; 342 } 343 344 /* Check they're not leading us off end of descriptors. */ 345 next = vring_desc_next(vdev, desc_pa, i); 346 /* Make sure compiler knows to grab that: we don't want it changing! */ 347 smp_wmb(); 348 349 if (next >= max) { 350 error_report("Desc next is %u", next); 351 exit(1); 352 } 353 354 return next; 355 } 356 357 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, 358 unsigned int *out_bytes, 359 unsigned max_in_bytes, unsigned max_out_bytes) 360 { 361 unsigned int idx; 362 unsigned int total_bufs, in_total, out_total; 363 364 idx = vq->last_avail_idx; 365 366 total_bufs = in_total = out_total = 0; 367 while (virtqueue_num_heads(vq, idx)) { 368 VirtIODevice *vdev = vq->vdev; 369 unsigned int max, num_bufs, indirect = 0; 370 hwaddr desc_pa; 371 int i; 372 373 max = vq->vring.num; 374 num_bufs = total_bufs; 375 i = virtqueue_get_head(vq, idx++); 376 desc_pa = vq->vring.desc; 377 378 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 379 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 380 error_report("Invalid size for indirect buffer table"); 381 exit(1); 382 } 383 384 /* If we've got too many, that implies a descriptor loop. */ 385 if (num_bufs >= max) { 386 error_report("Looped descriptor"); 387 exit(1); 388 } 389 390 /* loop over the indirect descriptor table */ 391 indirect = 1; 392 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 393 desc_pa = vring_desc_addr(vdev, desc_pa, i); 394 num_bufs = i = 0; 395 } 396 397 do { 398 /* If we've got too many, that implies a descriptor loop. */ 399 if (++num_bufs > max) { 400 error_report("Looped descriptor"); 401 exit(1); 402 } 403 404 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 405 in_total += vring_desc_len(vdev, desc_pa, i); 406 } else { 407 out_total += vring_desc_len(vdev, desc_pa, i); 408 } 409 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 410 goto done; 411 } 412 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 413 414 if (!indirect) 415 total_bufs = num_bufs; 416 else 417 total_bufs++; 418 } 419 done: 420 if (in_bytes) { 421 *in_bytes = in_total; 422 } 423 if (out_bytes) { 424 *out_bytes = out_total; 425 } 426 } 427 428 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, 429 unsigned int out_bytes) 430 { 431 unsigned int in_total, out_total; 432 433 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); 434 return in_bytes <= in_total && out_bytes <= out_total; 435 } 436 437 void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, 438 size_t num_sg, int is_write) 439 { 440 unsigned int i; 441 hwaddr len; 442 443 if (num_sg > VIRTQUEUE_MAX_SIZE) { 444 error_report("virtio: map attempt out of bounds: %zd > %d", 445 num_sg, VIRTQUEUE_MAX_SIZE); 446 exit(1); 447 } 448 449 for (i = 0; i < num_sg; i++) { 450 len = sg[i].iov_len; 451 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); 452 if (sg[i].iov_base == NULL || len != sg[i].iov_len) { 453 error_report("virtio: error trying to map MMIO memory"); 454 exit(1); 455 } 456 } 457 } 458 459 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) 460 { 461 unsigned int i, head, max; 462 hwaddr desc_pa = vq->vring.desc; 463 VirtIODevice *vdev = vq->vdev; 464 465 if (!virtqueue_num_heads(vq, vq->last_avail_idx)) 466 return 0; 467 468 /* When we start there are none of either input nor output. */ 469 elem->out_num = elem->in_num = 0; 470 471 max = vq->vring.num; 472 473 i = head = virtqueue_get_head(vq, vq->last_avail_idx++); 474 if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 475 vring_set_avail_event(vq, vq->last_avail_idx); 476 } 477 478 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 479 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 480 error_report("Invalid size for indirect buffer table"); 481 exit(1); 482 } 483 484 /* loop over the indirect descriptor table */ 485 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 486 desc_pa = vring_desc_addr(vdev, desc_pa, i); 487 i = 0; 488 } 489 490 /* Collect all the descriptors */ 491 do { 492 struct iovec *sg; 493 494 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 495 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) { 496 error_report("Too many write descriptors in indirect table"); 497 exit(1); 498 } 499 elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i); 500 sg = &elem->in_sg[elem->in_num++]; 501 } else { 502 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) { 503 error_report("Too many read descriptors in indirect table"); 504 exit(1); 505 } 506 elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i); 507 sg = &elem->out_sg[elem->out_num++]; 508 } 509 510 sg->iov_len = vring_desc_len(vdev, desc_pa, i); 511 512 /* If we've got too many, that implies a descriptor loop. */ 513 if ((elem->in_num + elem->out_num) > max) { 514 error_report("Looped descriptor"); 515 exit(1); 516 } 517 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 518 519 /* Now map what we have collected */ 520 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1); 521 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0); 522 523 elem->index = head; 524 525 vq->inuse++; 526 527 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 528 return elem->in_num + elem->out_num; 529 } 530 531 /* virtio device */ 532 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) 533 { 534 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 535 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 536 537 if (k->notify) { 538 k->notify(qbus->parent, vector); 539 } 540 } 541 542 void virtio_update_irq(VirtIODevice *vdev) 543 { 544 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 545 } 546 547 static int virtio_validate_features(VirtIODevice *vdev) 548 { 549 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 550 551 if (k->validate_features) { 552 return k->validate_features(vdev); 553 } else { 554 return 0; 555 } 556 } 557 558 int virtio_set_status(VirtIODevice *vdev, uint8_t val) 559 { 560 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 561 trace_virtio_set_status(vdev, val); 562 563 if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 564 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && 565 val & VIRTIO_CONFIG_S_FEATURES_OK) { 566 int ret = virtio_validate_features(vdev); 567 568 if (ret) { 569 return ret; 570 } 571 } 572 } 573 if (k->set_status) { 574 k->set_status(vdev, val); 575 } 576 vdev->status = val; 577 return 0; 578 } 579 580 bool target_words_bigendian(void); 581 static enum virtio_device_endian virtio_default_endian(void) 582 { 583 if (target_words_bigendian()) { 584 return VIRTIO_DEVICE_ENDIAN_BIG; 585 } else { 586 return VIRTIO_DEVICE_ENDIAN_LITTLE; 587 } 588 } 589 590 static enum virtio_device_endian virtio_current_cpu_endian(void) 591 { 592 CPUClass *cc = CPU_GET_CLASS(current_cpu); 593 594 if (cc->virtio_is_big_endian(current_cpu)) { 595 return VIRTIO_DEVICE_ENDIAN_BIG; 596 } else { 597 return VIRTIO_DEVICE_ENDIAN_LITTLE; 598 } 599 } 600 601 void virtio_reset(void *opaque) 602 { 603 VirtIODevice *vdev = opaque; 604 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 605 int i; 606 607 virtio_set_status(vdev, 0); 608 if (current_cpu) { 609 /* Guest initiated reset */ 610 vdev->device_endian = virtio_current_cpu_endian(); 611 } else { 612 /* System reset */ 613 vdev->device_endian = virtio_default_endian(); 614 } 615 616 if (k->reset) { 617 k->reset(vdev); 618 } 619 620 vdev->guest_features = 0; 621 vdev->queue_sel = 0; 622 vdev->status = 0; 623 vdev->isr = 0; 624 vdev->config_vector = VIRTIO_NO_VECTOR; 625 virtio_notify_vector(vdev, vdev->config_vector); 626 627 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { 628 vdev->vq[i].vring.desc = 0; 629 vdev->vq[i].vring.avail = 0; 630 vdev->vq[i].vring.used = 0; 631 vdev->vq[i].last_avail_idx = 0; 632 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); 633 vdev->vq[i].signalled_used = 0; 634 vdev->vq[i].signalled_used_valid = false; 635 vdev->vq[i].notification = true; 636 } 637 } 638 639 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) 640 { 641 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 642 uint8_t val; 643 644 if (addr + sizeof(val) > vdev->config_len) { 645 return (uint32_t)-1; 646 } 647 648 k->get_config(vdev, vdev->config); 649 650 val = ldub_p(vdev->config + addr); 651 return val; 652 } 653 654 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) 655 { 656 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 657 uint16_t val; 658 659 if (addr + sizeof(val) > vdev->config_len) { 660 return (uint32_t)-1; 661 } 662 663 k->get_config(vdev, vdev->config); 664 665 val = lduw_p(vdev->config + addr); 666 return val; 667 } 668 669 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) 670 { 671 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 672 uint32_t val; 673 674 if (addr + sizeof(val) > vdev->config_len) { 675 return (uint32_t)-1; 676 } 677 678 k->get_config(vdev, vdev->config); 679 680 val = ldl_p(vdev->config + addr); 681 return val; 682 } 683 684 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) 685 { 686 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 687 uint8_t val = data; 688 689 if (addr + sizeof(val) > vdev->config_len) { 690 return; 691 } 692 693 stb_p(vdev->config + addr, val); 694 695 if (k->set_config) { 696 k->set_config(vdev, vdev->config); 697 } 698 } 699 700 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) 701 { 702 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 703 uint16_t val = data; 704 705 if (addr + sizeof(val) > vdev->config_len) { 706 return; 707 } 708 709 stw_p(vdev->config + addr, val); 710 711 if (k->set_config) { 712 k->set_config(vdev, vdev->config); 713 } 714 } 715 716 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) 717 { 718 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 719 uint32_t val = data; 720 721 if (addr + sizeof(val) > vdev->config_len) { 722 return; 723 } 724 725 stl_p(vdev->config + addr, val); 726 727 if (k->set_config) { 728 k->set_config(vdev, vdev->config); 729 } 730 } 731 732 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr) 733 { 734 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 735 uint8_t val; 736 737 if (addr + sizeof(val) > vdev->config_len) { 738 return (uint32_t)-1; 739 } 740 741 k->get_config(vdev, vdev->config); 742 743 val = ldub_p(vdev->config + addr); 744 return val; 745 } 746 747 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr) 748 { 749 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 750 uint16_t val; 751 752 if (addr + sizeof(val) > vdev->config_len) { 753 return (uint32_t)-1; 754 } 755 756 k->get_config(vdev, vdev->config); 757 758 val = lduw_le_p(vdev->config + addr); 759 return val; 760 } 761 762 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr) 763 { 764 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 765 uint32_t val; 766 767 if (addr + sizeof(val) > vdev->config_len) { 768 return (uint32_t)-1; 769 } 770 771 k->get_config(vdev, vdev->config); 772 773 val = ldl_le_p(vdev->config + addr); 774 return val; 775 } 776 777 void virtio_config_modern_writeb(VirtIODevice *vdev, 778 uint32_t addr, uint32_t data) 779 { 780 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 781 uint8_t val = data; 782 783 if (addr + sizeof(val) > vdev->config_len) { 784 return; 785 } 786 787 stb_p(vdev->config + addr, val); 788 789 if (k->set_config) { 790 k->set_config(vdev, vdev->config); 791 } 792 } 793 794 void virtio_config_modern_writew(VirtIODevice *vdev, 795 uint32_t addr, uint32_t data) 796 { 797 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 798 uint16_t val = data; 799 800 if (addr + sizeof(val) > vdev->config_len) { 801 return; 802 } 803 804 stw_le_p(vdev->config + addr, val); 805 806 if (k->set_config) { 807 k->set_config(vdev, vdev->config); 808 } 809 } 810 811 void virtio_config_modern_writel(VirtIODevice *vdev, 812 uint32_t addr, uint32_t data) 813 { 814 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 815 uint32_t val = data; 816 817 if (addr + sizeof(val) > vdev->config_len) { 818 return; 819 } 820 821 stl_le_p(vdev->config + addr, val); 822 823 if (k->set_config) { 824 k->set_config(vdev, vdev->config); 825 } 826 } 827 828 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) 829 { 830 vdev->vq[n].vring.desc = addr; 831 virtio_queue_update_rings(vdev, n); 832 } 833 834 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) 835 { 836 return vdev->vq[n].vring.desc; 837 } 838 839 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, 840 hwaddr avail, hwaddr used) 841 { 842 vdev->vq[n].vring.desc = desc; 843 vdev->vq[n].vring.avail = avail; 844 vdev->vq[n].vring.used = used; 845 } 846 847 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) 848 { 849 /* Don't allow guest to flip queue between existent and 850 * nonexistent states, or to set it to an invalid size. 851 */ 852 if (!!num != !!vdev->vq[n].vring.num || 853 num > VIRTQUEUE_MAX_SIZE || 854 num < 0) { 855 return; 856 } 857 vdev->vq[n].vring.num = num; 858 } 859 860 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) 861 { 862 return QLIST_FIRST(&vdev->vector_queues[vector]); 863 } 864 865 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) 866 { 867 return QLIST_NEXT(vq, node); 868 } 869 870 int virtio_queue_get_num(VirtIODevice *vdev, int n) 871 { 872 return vdev->vq[n].vring.num; 873 } 874 875 int virtio_get_num_queues(VirtIODevice *vdev) 876 { 877 int i; 878 879 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 880 if (!virtio_queue_get_num(vdev, i)) { 881 break; 882 } 883 } 884 885 return i; 886 } 887 888 int virtio_queue_get_id(VirtQueue *vq) 889 { 890 VirtIODevice *vdev = vq->vdev; 891 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]); 892 return vq - &vdev->vq[0]; 893 } 894 895 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) 896 { 897 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 898 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 899 900 /* virtio-1 compliant devices cannot change the alignment */ 901 if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 902 error_report("tried to modify queue alignment for virtio-1 device"); 903 return; 904 } 905 /* Check that the transport told us it was going to do this 906 * (so a buggy transport will immediately assert rather than 907 * silently failing to migrate this state) 908 */ 909 assert(k->has_variable_vring_alignment); 910 911 vdev->vq[n].vring.align = align; 912 virtio_queue_update_rings(vdev, n); 913 } 914 915 void virtio_queue_notify_vq(VirtQueue *vq) 916 { 917 if (vq->vring.desc && vq->handle_output) { 918 VirtIODevice *vdev = vq->vdev; 919 920 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 921 vq->handle_output(vdev, vq); 922 } 923 } 924 925 void virtio_queue_notify(VirtIODevice *vdev, int n) 926 { 927 virtio_queue_notify_vq(&vdev->vq[n]); 928 } 929 930 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) 931 { 932 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : 933 VIRTIO_NO_VECTOR; 934 } 935 936 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) 937 { 938 VirtQueue *vq = &vdev->vq[n]; 939 940 if (n < VIRTIO_QUEUE_MAX) { 941 if (vdev->vector_queues && 942 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { 943 QLIST_REMOVE(vq, node); 944 } 945 vdev->vq[n].vector = vector; 946 if (vdev->vector_queues && 947 vector != VIRTIO_NO_VECTOR) { 948 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); 949 } 950 } 951 } 952 953 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, 954 void (*handle_output)(VirtIODevice *, VirtQueue *)) 955 { 956 int i; 957 958 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 959 if (vdev->vq[i].vring.num == 0) 960 break; 961 } 962 963 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) 964 abort(); 965 966 vdev->vq[i].vring.num = queue_size; 967 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; 968 vdev->vq[i].handle_output = handle_output; 969 970 return &vdev->vq[i]; 971 } 972 973 void virtio_del_queue(VirtIODevice *vdev, int n) 974 { 975 if (n < 0 || n >= VIRTIO_QUEUE_MAX) { 976 abort(); 977 } 978 979 vdev->vq[n].vring.num = 0; 980 } 981 982 void virtio_irq(VirtQueue *vq) 983 { 984 trace_virtio_irq(vq); 985 vq->vdev->isr |= 0x01; 986 virtio_notify_vector(vq->vdev, vq->vector); 987 } 988 989 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) 990 { 991 uint16_t old, new; 992 bool v; 993 /* We need to expose used array entries before checking used event. */ 994 smp_mb(); 995 /* Always notify when queue is empty (when feature acknowledge) */ 996 if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && 997 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) { 998 return true; 999 } 1000 1001 if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 1002 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 1003 } 1004 1005 v = vq->signalled_used_valid; 1006 vq->signalled_used_valid = true; 1007 old = vq->signalled_used; 1008 new = vq->signalled_used = vring_used_idx(vq); 1009 return !v || vring_need_event(vring_get_used_event(vq), new, old); 1010 } 1011 1012 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) 1013 { 1014 if (!vring_notify(vdev, vq)) { 1015 return; 1016 } 1017 1018 trace_virtio_notify(vdev, vq); 1019 vdev->isr |= 0x01; 1020 virtio_notify_vector(vdev, vq->vector); 1021 } 1022 1023 void virtio_notify_config(VirtIODevice *vdev) 1024 { 1025 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 1026 return; 1027 1028 vdev->isr |= 0x03; 1029 vdev->generation++; 1030 virtio_notify_vector(vdev, vdev->config_vector); 1031 } 1032 1033 static bool virtio_device_endian_needed(void *opaque) 1034 { 1035 VirtIODevice *vdev = opaque; 1036 1037 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); 1038 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1039 return vdev->device_endian != virtio_default_endian(); 1040 } 1041 /* Devices conforming to VIRTIO 1.0 or later are always LE. */ 1042 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; 1043 } 1044 1045 static bool virtio_64bit_features_needed(void *opaque) 1046 { 1047 VirtIODevice *vdev = opaque; 1048 1049 return (vdev->host_features >> 32) != 0; 1050 } 1051 1052 static const VMStateDescription vmstate_virtio_device_endian = { 1053 .name = "virtio/device_endian", 1054 .version_id = 1, 1055 .minimum_version_id = 1, 1056 .fields = (VMStateField[]) { 1057 VMSTATE_UINT8(device_endian, VirtIODevice), 1058 VMSTATE_END_OF_LIST() 1059 } 1060 }; 1061 1062 static const VMStateDescription vmstate_virtio_64bit_features = { 1063 .name = "virtio/64bit_features", 1064 .version_id = 1, 1065 .minimum_version_id = 1, 1066 .fields = (VMStateField[]) { 1067 VMSTATE_UINT64(guest_features, VirtIODevice), 1068 VMSTATE_END_OF_LIST() 1069 } 1070 }; 1071 1072 static const VMStateDescription vmstate_virtio = { 1073 .name = "virtio", 1074 .version_id = 1, 1075 .minimum_version_id = 1, 1076 .minimum_version_id_old = 1, 1077 .fields = (VMStateField[]) { 1078 VMSTATE_END_OF_LIST() 1079 }, 1080 .subsections = (VMStateSubsection[]) { 1081 { 1082 .vmsd = &vmstate_virtio_device_endian, 1083 .needed = &virtio_device_endian_needed 1084 }, 1085 { 1086 .vmsd = &vmstate_virtio_64bit_features, 1087 .needed = &virtio_64bit_features_needed 1088 }, 1089 { 0 } 1090 } 1091 }; 1092 1093 void virtio_save(VirtIODevice *vdev, QEMUFile *f) 1094 { 1095 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1096 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1097 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1098 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); 1099 int i; 1100 1101 if (k->save_config) { 1102 k->save_config(qbus->parent, f); 1103 } 1104 1105 qemu_put_8s(f, &vdev->status); 1106 qemu_put_8s(f, &vdev->isr); 1107 qemu_put_be16s(f, &vdev->queue_sel); 1108 qemu_put_be32s(f, &guest_features_lo); 1109 qemu_put_be32(f, vdev->config_len); 1110 qemu_put_buffer(f, vdev->config, vdev->config_len); 1111 1112 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1113 if (vdev->vq[i].vring.num == 0) 1114 break; 1115 } 1116 1117 qemu_put_be32(f, i); 1118 1119 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1120 if (vdev->vq[i].vring.num == 0) 1121 break; 1122 1123 qemu_put_be32(f, vdev->vq[i].vring.num); 1124 if (k->has_variable_vring_alignment) { 1125 qemu_put_be32(f, vdev->vq[i].vring.align); 1126 } 1127 /* XXX virtio-1 devices */ 1128 qemu_put_be64(f, vdev->vq[i].vring.desc); 1129 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); 1130 if (k->save_queue) { 1131 k->save_queue(qbus->parent, i, f); 1132 } 1133 } 1134 1135 if (vdc->save != NULL) { 1136 vdc->save(vdev, f); 1137 } 1138 1139 /* Subsections */ 1140 vmstate_save_state(f, &vmstate_virtio, vdev, NULL); 1141 } 1142 1143 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) 1144 { 1145 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1146 bool bad = (val & ~(vdev->host_features)) != 0; 1147 1148 val &= vdev->host_features; 1149 if (k->set_features) { 1150 k->set_features(vdev, val); 1151 } 1152 vdev->guest_features = val; 1153 return bad ? -1 : 0; 1154 } 1155 1156 int virtio_set_features(VirtIODevice *vdev, uint64_t val) 1157 { 1158 /* 1159 * The driver must not attempt to set features after feature negotiation 1160 * has finished. 1161 */ 1162 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { 1163 return -EINVAL; 1164 } 1165 return virtio_set_features_nocheck(vdev, val); 1166 } 1167 1168 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) 1169 { 1170 int i, ret; 1171 int32_t config_len; 1172 uint32_t num; 1173 uint32_t features; 1174 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1175 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1176 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1177 1178 /* 1179 * We poison the endianness to ensure it does not get used before 1180 * subsections have been loaded. 1181 */ 1182 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; 1183 1184 if (k->load_config) { 1185 ret = k->load_config(qbus->parent, f); 1186 if (ret) 1187 return ret; 1188 } 1189 1190 qemu_get_8s(f, &vdev->status); 1191 qemu_get_8s(f, &vdev->isr); 1192 qemu_get_be16s(f, &vdev->queue_sel); 1193 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { 1194 return -1; 1195 } 1196 qemu_get_be32s(f, &features); 1197 1198 config_len = qemu_get_be32(f); 1199 1200 /* 1201 * There are cases where the incoming config can be bigger or smaller 1202 * than what we have; so load what we have space for, and skip 1203 * any excess that's in the stream. 1204 */ 1205 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); 1206 1207 while (config_len > vdev->config_len) { 1208 qemu_get_byte(f); 1209 config_len--; 1210 } 1211 1212 num = qemu_get_be32(f); 1213 1214 if (num > VIRTIO_QUEUE_MAX) { 1215 error_report("Invalid number of PCI queues: 0x%x", num); 1216 return -1; 1217 } 1218 1219 for (i = 0; i < num; i++) { 1220 vdev->vq[i].vring.num = qemu_get_be32(f); 1221 if (k->has_variable_vring_alignment) { 1222 vdev->vq[i].vring.align = qemu_get_be32(f); 1223 } 1224 vdev->vq[i].vring.desc = qemu_get_be64(f); 1225 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); 1226 vdev->vq[i].signalled_used_valid = false; 1227 vdev->vq[i].notification = true; 1228 1229 if (vdev->vq[i].vring.desc) { 1230 /* XXX virtio-1 devices */ 1231 virtio_queue_update_rings(vdev, i); 1232 } else if (vdev->vq[i].last_avail_idx) { 1233 error_report("VQ %d address 0x0 " 1234 "inconsistent with Host index 0x%x", 1235 i, vdev->vq[i].last_avail_idx); 1236 return -1; 1237 } 1238 if (k->load_queue) { 1239 ret = k->load_queue(qbus->parent, i, f); 1240 if (ret) 1241 return ret; 1242 } 1243 } 1244 1245 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 1246 1247 if (vdc->load != NULL) { 1248 ret = vdc->load(vdev, f, version_id); 1249 if (ret) { 1250 return ret; 1251 } 1252 } 1253 1254 /* Subsections */ 1255 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); 1256 if (ret) { 1257 return ret; 1258 } 1259 1260 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { 1261 vdev->device_endian = virtio_default_endian(); 1262 } 1263 1264 if (virtio_64bit_features_needed(vdev)) { 1265 /* 1266 * Subsection load filled vdev->guest_features. Run them 1267 * through virtio_set_features to sanity-check them against 1268 * host_features. 1269 */ 1270 uint64_t features64 = vdev->guest_features; 1271 if (virtio_set_features_nocheck(vdev, features64) < 0) { 1272 error_report("Features 0x%" PRIx64 " unsupported. " 1273 "Allowed features: 0x%" PRIx64, 1274 features64, vdev->host_features); 1275 return -1; 1276 } 1277 } else { 1278 if (virtio_set_features_nocheck(vdev, features) < 0) { 1279 error_report("Features 0x%x unsupported. " 1280 "Allowed features: 0x%" PRIx64, 1281 features, vdev->host_features); 1282 return -1; 1283 } 1284 } 1285 1286 for (i = 0; i < num; i++) { 1287 if (vdev->vq[i].vring.desc) { 1288 uint16_t nheads; 1289 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; 1290 /* Check it isn't doing strange things with descriptor numbers. */ 1291 if (nheads > vdev->vq[i].vring.num) { 1292 error_report("VQ %d size 0x%x Guest index 0x%x " 1293 "inconsistent with Host index 0x%x: delta 0x%x", 1294 i, vdev->vq[i].vring.num, 1295 vring_avail_idx(&vdev->vq[i]), 1296 vdev->vq[i].last_avail_idx, nheads); 1297 return -1; 1298 } 1299 } 1300 } 1301 1302 return 0; 1303 } 1304 1305 void virtio_cleanup(VirtIODevice *vdev) 1306 { 1307 qemu_del_vm_change_state_handler(vdev->vmstate); 1308 g_free(vdev->config); 1309 g_free(vdev->vq); 1310 g_free(vdev->vector_queues); 1311 } 1312 1313 static void virtio_vmstate_change(void *opaque, int running, RunState state) 1314 { 1315 VirtIODevice *vdev = opaque; 1316 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1317 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1318 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); 1319 vdev->vm_running = running; 1320 1321 if (backend_run) { 1322 virtio_set_status(vdev, vdev->status); 1323 } 1324 1325 if (k->vmstate_change) { 1326 k->vmstate_change(qbus->parent, backend_run); 1327 } 1328 1329 if (!backend_run) { 1330 virtio_set_status(vdev, vdev->status); 1331 } 1332 } 1333 1334 void virtio_instance_init_common(Object *proxy_obj, void *data, 1335 size_t vdev_size, const char *vdev_name) 1336 { 1337 DeviceState *vdev = data; 1338 1339 object_initialize(vdev, vdev_size, vdev_name); 1340 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL); 1341 object_unref(OBJECT(vdev)); 1342 qdev_alias_all_properties(vdev, proxy_obj); 1343 } 1344 1345 void virtio_init(VirtIODevice *vdev, const char *name, 1346 uint16_t device_id, size_t config_size) 1347 { 1348 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1349 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1350 int i; 1351 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; 1352 1353 if (nvectors) { 1354 vdev->vector_queues = 1355 g_malloc0(sizeof(*vdev->vector_queues) * nvectors); 1356 } 1357 1358 vdev->device_id = device_id; 1359 vdev->status = 0; 1360 vdev->isr = 0; 1361 vdev->queue_sel = 0; 1362 vdev->config_vector = VIRTIO_NO_VECTOR; 1363 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); 1364 vdev->vm_running = runstate_is_running(); 1365 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1366 vdev->vq[i].vector = VIRTIO_NO_VECTOR; 1367 vdev->vq[i].vdev = vdev; 1368 vdev->vq[i].queue_index = i; 1369 } 1370 1371 vdev->name = name; 1372 vdev->config_len = config_size; 1373 if (vdev->config_len) { 1374 vdev->config = g_malloc0(config_size); 1375 } else { 1376 vdev->config = NULL; 1377 } 1378 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, 1379 vdev); 1380 vdev->device_endian = virtio_default_endian(); 1381 } 1382 1383 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) 1384 { 1385 return vdev->vq[n].vring.desc; 1386 } 1387 1388 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) 1389 { 1390 return vdev->vq[n].vring.avail; 1391 } 1392 1393 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) 1394 { 1395 return vdev->vq[n].vring.used; 1396 } 1397 1398 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n) 1399 { 1400 return vdev->vq[n].vring.desc; 1401 } 1402 1403 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) 1404 { 1405 return sizeof(VRingDesc) * vdev->vq[n].vring.num; 1406 } 1407 1408 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) 1409 { 1410 return offsetof(VRingAvail, ring) + 1411 sizeof(uint64_t) * vdev->vq[n].vring.num; 1412 } 1413 1414 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) 1415 { 1416 return offsetof(VRingUsed, ring) + 1417 sizeof(VRingUsedElem) * vdev->vq[n].vring.num; 1418 } 1419 1420 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n) 1421 { 1422 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc + 1423 virtio_queue_get_used_size(vdev, n); 1424 } 1425 1426 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) 1427 { 1428 return vdev->vq[n].last_avail_idx; 1429 } 1430 1431 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) 1432 { 1433 vdev->vq[n].last_avail_idx = idx; 1434 } 1435 1436 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) 1437 { 1438 vdev->vq[n].signalled_used_valid = false; 1439 } 1440 1441 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) 1442 { 1443 return vdev->vq + n; 1444 } 1445 1446 uint16_t virtio_get_queue_index(VirtQueue *vq) 1447 { 1448 return vq->queue_index; 1449 } 1450 1451 static void virtio_queue_guest_notifier_read(EventNotifier *n) 1452 { 1453 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); 1454 if (event_notifier_test_and_clear(n)) { 1455 virtio_irq(vq); 1456 } 1457 } 1458 1459 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, 1460 bool with_irqfd) 1461 { 1462 if (assign && !with_irqfd) { 1463 event_notifier_set_handler(&vq->guest_notifier, 1464 virtio_queue_guest_notifier_read); 1465 } else { 1466 event_notifier_set_handler(&vq->guest_notifier, NULL); 1467 } 1468 if (!assign) { 1469 /* Test and clear notifier before closing it, 1470 * in case poll callback didn't have time to run. */ 1471 virtio_queue_guest_notifier_read(&vq->guest_notifier); 1472 } 1473 } 1474 1475 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) 1476 { 1477 return &vq->guest_notifier; 1478 } 1479 1480 static void virtio_queue_host_notifier_read(EventNotifier *n) 1481 { 1482 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 1483 if (event_notifier_test_and_clear(n)) { 1484 virtio_queue_notify_vq(vq); 1485 } 1486 } 1487 1488 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign, 1489 bool set_handler) 1490 { 1491 if (assign && set_handler) { 1492 event_notifier_set_handler(&vq->host_notifier, 1493 virtio_queue_host_notifier_read); 1494 } else { 1495 event_notifier_set_handler(&vq->host_notifier, NULL); 1496 } 1497 if (!assign) { 1498 /* Test and clear notifier before after disabling event, 1499 * in case poll callback didn't have time to run. */ 1500 virtio_queue_host_notifier_read(&vq->host_notifier); 1501 } 1502 } 1503 1504 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) 1505 { 1506 return &vq->host_notifier; 1507 } 1508 1509 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) 1510 { 1511 g_free(vdev->bus_name); 1512 vdev->bus_name = g_strdup(bus_name); 1513 } 1514 1515 static void virtio_device_realize(DeviceState *dev, Error **errp) 1516 { 1517 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1518 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1519 Error *err = NULL; 1520 1521 if (vdc->realize != NULL) { 1522 vdc->realize(dev, &err); 1523 if (err != NULL) { 1524 error_propagate(errp, err); 1525 return; 1526 } 1527 } 1528 1529 virtio_bus_device_plugged(vdev, &err); 1530 if (err != NULL) { 1531 error_propagate(errp, err); 1532 return; 1533 } 1534 } 1535 1536 static void virtio_device_unrealize(DeviceState *dev, Error **errp) 1537 { 1538 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1539 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1540 Error *err = NULL; 1541 1542 virtio_bus_device_unplugged(vdev); 1543 1544 if (vdc->unrealize != NULL) { 1545 vdc->unrealize(dev, &err); 1546 if (err != NULL) { 1547 error_propagate(errp, err); 1548 return; 1549 } 1550 } 1551 1552 g_free(vdev->bus_name); 1553 vdev->bus_name = NULL; 1554 } 1555 1556 static Property virtio_properties[] = { 1557 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), 1558 DEFINE_PROP_END_OF_LIST(), 1559 }; 1560 1561 static void virtio_device_class_init(ObjectClass *klass, void *data) 1562 { 1563 /* Set the default value here. */ 1564 DeviceClass *dc = DEVICE_CLASS(klass); 1565 1566 dc->realize = virtio_device_realize; 1567 dc->unrealize = virtio_device_unrealize; 1568 dc->bus_type = TYPE_VIRTIO_BUS; 1569 dc->props = virtio_properties; 1570 } 1571 1572 static const TypeInfo virtio_device_info = { 1573 .name = TYPE_VIRTIO_DEVICE, 1574 .parent = TYPE_DEVICE, 1575 .instance_size = sizeof(VirtIODevice), 1576 .class_init = virtio_device_class_init, 1577 .abstract = true, 1578 .class_size = sizeof(VirtioDeviceClass), 1579 }; 1580 1581 static void virtio_register_types(void) 1582 { 1583 type_register_static(&virtio_device_info); 1584 } 1585 1586 type_init(virtio_register_types) 1587