1 /* 2 * Virtio Support 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include <inttypes.h> 15 16 #include "trace.h" 17 #include "exec/address-spaces.h" 18 #include "qemu/error-report.h" 19 #include "hw/virtio/virtio.h" 20 #include "qemu/atomic.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/migration.h" 23 #include "hw/virtio/virtio-access.h" 24 25 /* 26 * The alignment to use between consumer and producer parts of vring. 27 * x86 pagesize again. This is the default, used by transports like PCI 28 * which don't provide a means for the guest to tell the host the alignment. 29 */ 30 #define VIRTIO_PCI_VRING_ALIGN 4096 31 32 typedef struct VRingDesc 33 { 34 uint64_t addr; 35 uint32_t len; 36 uint16_t flags; 37 uint16_t next; 38 } VRingDesc; 39 40 typedef struct VRingAvail 41 { 42 uint16_t flags; 43 uint16_t idx; 44 uint16_t ring[0]; 45 } VRingAvail; 46 47 typedef struct VRingUsedElem 48 { 49 uint32_t id; 50 uint32_t len; 51 } VRingUsedElem; 52 53 typedef struct VRingUsed 54 { 55 uint16_t flags; 56 uint16_t idx; 57 VRingUsedElem ring[0]; 58 } VRingUsed; 59 60 typedef struct VRing 61 { 62 unsigned int num; 63 unsigned int align; 64 hwaddr desc; 65 hwaddr avail; 66 hwaddr used; 67 } VRing; 68 69 struct VirtQueue 70 { 71 VRing vring; 72 uint16_t last_avail_idx; 73 /* Last used index value we have signalled on */ 74 uint16_t signalled_used; 75 76 /* Last used index value we have signalled on */ 77 bool signalled_used_valid; 78 79 /* Notification enabled? */ 80 bool notification; 81 82 uint16_t queue_index; 83 84 int inuse; 85 86 uint16_t vector; 87 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); 88 VirtIODevice *vdev; 89 EventNotifier guest_notifier; 90 EventNotifier host_notifier; 91 QLIST_ENTRY(VirtQueue) node; 92 }; 93 94 /* virt queue functions */ 95 void virtio_queue_update_rings(VirtIODevice *vdev, int n) 96 { 97 VRing *vring = &vdev->vq[n].vring; 98 99 if (!vring->desc) { 100 /* not yet setup -> nothing to do */ 101 return; 102 } 103 vring->avail = vring->desc + vring->num * sizeof(VRingDesc); 104 vring->used = vring_align(vring->avail + 105 offsetof(VRingAvail, ring[vring->num]), 106 vring->align); 107 } 108 109 static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa, 110 int i) 111 { 112 hwaddr pa; 113 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); 114 return virtio_ldq_phys(vdev, pa); 115 } 116 117 static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i) 118 { 119 hwaddr pa; 120 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); 121 return virtio_ldl_phys(vdev, pa); 122 } 123 124 static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa, 125 int i) 126 { 127 hwaddr pa; 128 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); 129 return virtio_lduw_phys(vdev, pa); 130 } 131 132 static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa, 133 int i) 134 { 135 hwaddr pa; 136 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); 137 return virtio_lduw_phys(vdev, pa); 138 } 139 140 static inline uint16_t vring_avail_flags(VirtQueue *vq) 141 { 142 hwaddr pa; 143 pa = vq->vring.avail + offsetof(VRingAvail, flags); 144 return virtio_lduw_phys(vq->vdev, pa); 145 } 146 147 static inline uint16_t vring_avail_idx(VirtQueue *vq) 148 { 149 hwaddr pa; 150 pa = vq->vring.avail + offsetof(VRingAvail, idx); 151 return virtio_lduw_phys(vq->vdev, pa); 152 } 153 154 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) 155 { 156 hwaddr pa; 157 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); 158 return virtio_lduw_phys(vq->vdev, pa); 159 } 160 161 static inline uint16_t vring_get_used_event(VirtQueue *vq) 162 { 163 return vring_avail_ring(vq, vq->vring.num); 164 } 165 166 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) 167 { 168 hwaddr pa; 169 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); 170 virtio_stl_phys(vq->vdev, pa, val); 171 } 172 173 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) 174 { 175 hwaddr pa; 176 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); 177 virtio_stl_phys(vq->vdev, pa, val); 178 } 179 180 static uint16_t vring_used_idx(VirtQueue *vq) 181 { 182 hwaddr pa; 183 pa = vq->vring.used + offsetof(VRingUsed, idx); 184 return virtio_lduw_phys(vq->vdev, pa); 185 } 186 187 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) 188 { 189 hwaddr pa; 190 pa = vq->vring.used + offsetof(VRingUsed, idx); 191 virtio_stw_phys(vq->vdev, pa, val); 192 } 193 194 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) 195 { 196 VirtIODevice *vdev = vq->vdev; 197 hwaddr pa; 198 pa = vq->vring.used + offsetof(VRingUsed, flags); 199 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); 200 } 201 202 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) 203 { 204 VirtIODevice *vdev = vq->vdev; 205 hwaddr pa; 206 pa = vq->vring.used + offsetof(VRingUsed, flags); 207 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); 208 } 209 210 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) 211 { 212 hwaddr pa; 213 if (!vq->notification) { 214 return; 215 } 216 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); 217 virtio_stw_phys(vq->vdev, pa, val); 218 } 219 220 void virtio_queue_set_notification(VirtQueue *vq, int enable) 221 { 222 vq->notification = enable; 223 if (virtio_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 224 vring_set_avail_event(vq, vring_avail_idx(vq)); 225 } else if (enable) { 226 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 227 } else { 228 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 229 } 230 if (enable) { 231 /* Expose avail event/used flags before caller checks the avail idx. */ 232 smp_mb(); 233 } 234 } 235 236 int virtio_queue_ready(VirtQueue *vq) 237 { 238 return vq->vring.avail != 0; 239 } 240 241 int virtio_queue_empty(VirtQueue *vq) 242 { 243 return vring_avail_idx(vq) == vq->last_avail_idx; 244 } 245 246 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, 247 unsigned int len, unsigned int idx) 248 { 249 unsigned int offset; 250 int i; 251 252 trace_virtqueue_fill(vq, elem, len, idx); 253 254 offset = 0; 255 for (i = 0; i < elem->in_num; i++) { 256 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); 257 258 cpu_physical_memory_unmap(elem->in_sg[i].iov_base, 259 elem->in_sg[i].iov_len, 260 1, size); 261 262 offset += size; 263 } 264 265 for (i = 0; i < elem->out_num; i++) 266 cpu_physical_memory_unmap(elem->out_sg[i].iov_base, 267 elem->out_sg[i].iov_len, 268 0, elem->out_sg[i].iov_len); 269 270 idx = (idx + vring_used_idx(vq)) % vq->vring.num; 271 272 /* Get a pointer to the next entry in the used ring. */ 273 vring_used_ring_id(vq, idx, elem->index); 274 vring_used_ring_len(vq, idx, len); 275 } 276 277 void virtqueue_flush(VirtQueue *vq, unsigned int count) 278 { 279 uint16_t old, new; 280 /* Make sure buffer is written before we update index. */ 281 smp_wmb(); 282 trace_virtqueue_flush(vq, count); 283 old = vring_used_idx(vq); 284 new = old + count; 285 vring_used_idx_set(vq, new); 286 vq->inuse -= count; 287 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) 288 vq->signalled_used_valid = false; 289 } 290 291 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, 292 unsigned int len) 293 { 294 virtqueue_fill(vq, elem, len, 0); 295 virtqueue_flush(vq, 1); 296 } 297 298 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) 299 { 300 uint16_t num_heads = vring_avail_idx(vq) - idx; 301 302 /* Check it isn't doing very strange things with descriptor numbers. */ 303 if (num_heads > vq->vring.num) { 304 error_report("Guest moved used index from %u to %u", 305 idx, vring_avail_idx(vq)); 306 exit(1); 307 } 308 /* On success, callers read a descriptor at vq->last_avail_idx. 309 * Make sure descriptor read does not bypass avail index read. */ 310 if (num_heads) { 311 smp_rmb(); 312 } 313 314 return num_heads; 315 } 316 317 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) 318 { 319 unsigned int head; 320 321 /* Grab the next descriptor number they're advertising, and increment 322 * the index we've seen. */ 323 head = vring_avail_ring(vq, idx % vq->vring.num); 324 325 /* If their number is silly, that's a fatal mistake. */ 326 if (head >= vq->vring.num) { 327 error_report("Guest says index %u is available", head); 328 exit(1); 329 } 330 331 return head; 332 } 333 334 static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa, 335 unsigned int i, unsigned int max) 336 { 337 unsigned int next; 338 339 /* If this descriptor says it doesn't chain, we're done. */ 340 if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) { 341 return max; 342 } 343 344 /* Check they're not leading us off end of descriptors. */ 345 next = vring_desc_next(vdev, desc_pa, i); 346 /* Make sure compiler knows to grab that: we don't want it changing! */ 347 smp_wmb(); 348 349 if (next >= max) { 350 error_report("Desc next is %u", next); 351 exit(1); 352 } 353 354 return next; 355 } 356 357 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, 358 unsigned int *out_bytes, 359 unsigned max_in_bytes, unsigned max_out_bytes) 360 { 361 unsigned int idx; 362 unsigned int total_bufs, in_total, out_total; 363 364 idx = vq->last_avail_idx; 365 366 total_bufs = in_total = out_total = 0; 367 while (virtqueue_num_heads(vq, idx)) { 368 VirtIODevice *vdev = vq->vdev; 369 unsigned int max, num_bufs, indirect = 0; 370 hwaddr desc_pa; 371 int i; 372 373 max = vq->vring.num; 374 num_bufs = total_bufs; 375 i = virtqueue_get_head(vq, idx++); 376 desc_pa = vq->vring.desc; 377 378 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 379 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 380 error_report("Invalid size for indirect buffer table"); 381 exit(1); 382 } 383 384 /* If we've got too many, that implies a descriptor loop. */ 385 if (num_bufs >= max) { 386 error_report("Looped descriptor"); 387 exit(1); 388 } 389 390 /* loop over the indirect descriptor table */ 391 indirect = 1; 392 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 393 desc_pa = vring_desc_addr(vdev, desc_pa, i); 394 num_bufs = i = 0; 395 } 396 397 do { 398 /* If we've got too many, that implies a descriptor loop. */ 399 if (++num_bufs > max) { 400 error_report("Looped descriptor"); 401 exit(1); 402 } 403 404 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 405 in_total += vring_desc_len(vdev, desc_pa, i); 406 } else { 407 out_total += vring_desc_len(vdev, desc_pa, i); 408 } 409 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 410 goto done; 411 } 412 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 413 414 if (!indirect) 415 total_bufs = num_bufs; 416 else 417 total_bufs++; 418 } 419 done: 420 if (in_bytes) { 421 *in_bytes = in_total; 422 } 423 if (out_bytes) { 424 *out_bytes = out_total; 425 } 426 } 427 428 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, 429 unsigned int out_bytes) 430 { 431 unsigned int in_total, out_total; 432 433 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); 434 return in_bytes <= in_total && out_bytes <= out_total; 435 } 436 437 void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, 438 size_t num_sg, int is_write) 439 { 440 unsigned int i; 441 hwaddr len; 442 443 if (num_sg > VIRTQUEUE_MAX_SIZE) { 444 error_report("virtio: map attempt out of bounds: %zd > %d", 445 num_sg, VIRTQUEUE_MAX_SIZE); 446 exit(1); 447 } 448 449 for (i = 0; i < num_sg; i++) { 450 len = sg[i].iov_len; 451 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); 452 if (sg[i].iov_base == NULL || len != sg[i].iov_len) { 453 error_report("virtio: error trying to map MMIO memory"); 454 exit(1); 455 } 456 } 457 } 458 459 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) 460 { 461 unsigned int i, head, max; 462 hwaddr desc_pa = vq->vring.desc; 463 VirtIODevice *vdev = vq->vdev; 464 465 if (!virtqueue_num_heads(vq, vq->last_avail_idx)) 466 return 0; 467 468 /* When we start there are none of either input nor output. */ 469 elem->out_num = elem->in_num = 0; 470 471 max = vq->vring.num; 472 473 i = head = virtqueue_get_head(vq, vq->last_avail_idx++); 474 if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 475 vring_set_avail_event(vq, vq->last_avail_idx); 476 } 477 478 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 479 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 480 error_report("Invalid size for indirect buffer table"); 481 exit(1); 482 } 483 484 /* loop over the indirect descriptor table */ 485 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 486 desc_pa = vring_desc_addr(vdev, desc_pa, i); 487 i = 0; 488 } 489 490 /* Collect all the descriptors */ 491 do { 492 struct iovec *sg; 493 494 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 495 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) { 496 error_report("Too many write descriptors in indirect table"); 497 exit(1); 498 } 499 elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i); 500 sg = &elem->in_sg[elem->in_num++]; 501 } else { 502 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) { 503 error_report("Too many read descriptors in indirect table"); 504 exit(1); 505 } 506 elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i); 507 sg = &elem->out_sg[elem->out_num++]; 508 } 509 510 sg->iov_len = vring_desc_len(vdev, desc_pa, i); 511 512 /* If we've got too many, that implies a descriptor loop. */ 513 if ((elem->in_num + elem->out_num) > max) { 514 error_report("Looped descriptor"); 515 exit(1); 516 } 517 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 518 519 /* Now map what we have collected */ 520 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1); 521 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0); 522 523 elem->index = head; 524 525 vq->inuse++; 526 527 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 528 return elem->in_num + elem->out_num; 529 } 530 531 /* virtio device */ 532 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) 533 { 534 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 535 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 536 537 if (k->notify) { 538 k->notify(qbus->parent, vector); 539 } 540 } 541 542 void virtio_update_irq(VirtIODevice *vdev) 543 { 544 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 545 } 546 547 static int virtio_validate_features(VirtIODevice *vdev) 548 { 549 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 550 551 if (k->validate_features) { 552 return k->validate_features(vdev); 553 } else { 554 return 0; 555 } 556 } 557 558 int virtio_set_status(VirtIODevice *vdev, uint8_t val) 559 { 560 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 561 trace_virtio_set_status(vdev, val); 562 563 if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 564 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && 565 val & VIRTIO_CONFIG_S_FEATURES_OK) { 566 int ret = virtio_validate_features(vdev); 567 568 if (ret) { 569 return ret; 570 } 571 } 572 } 573 if (k->set_status) { 574 k->set_status(vdev, val); 575 } 576 vdev->status = val; 577 return 0; 578 } 579 580 bool target_words_bigendian(void); 581 static enum virtio_device_endian virtio_default_endian(void) 582 { 583 if (target_words_bigendian()) { 584 return VIRTIO_DEVICE_ENDIAN_BIG; 585 } else { 586 return VIRTIO_DEVICE_ENDIAN_LITTLE; 587 } 588 } 589 590 static enum virtio_device_endian virtio_current_cpu_endian(void) 591 { 592 CPUClass *cc = CPU_GET_CLASS(current_cpu); 593 594 if (cc->virtio_is_big_endian(current_cpu)) { 595 return VIRTIO_DEVICE_ENDIAN_BIG; 596 } else { 597 return VIRTIO_DEVICE_ENDIAN_LITTLE; 598 } 599 } 600 601 void virtio_reset(void *opaque) 602 { 603 VirtIODevice *vdev = opaque; 604 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 605 int i; 606 607 virtio_set_status(vdev, 0); 608 if (current_cpu) { 609 /* Guest initiated reset */ 610 vdev->device_endian = virtio_current_cpu_endian(); 611 } else { 612 /* System reset */ 613 vdev->device_endian = virtio_default_endian(); 614 } 615 616 if (k->reset) { 617 k->reset(vdev); 618 } 619 620 vdev->guest_features = 0; 621 vdev->queue_sel = 0; 622 vdev->status = 0; 623 vdev->isr = 0; 624 vdev->config_vector = VIRTIO_NO_VECTOR; 625 virtio_notify_vector(vdev, vdev->config_vector); 626 627 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { 628 vdev->vq[i].vring.desc = 0; 629 vdev->vq[i].vring.avail = 0; 630 vdev->vq[i].vring.used = 0; 631 vdev->vq[i].last_avail_idx = 0; 632 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); 633 vdev->vq[i].signalled_used = 0; 634 vdev->vq[i].signalled_used_valid = false; 635 vdev->vq[i].notification = true; 636 } 637 } 638 639 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) 640 { 641 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 642 uint8_t val; 643 644 if (addr + sizeof(val) > vdev->config_len) { 645 return (uint32_t)-1; 646 } 647 648 k->get_config(vdev, vdev->config); 649 650 val = ldub_p(vdev->config + addr); 651 return val; 652 } 653 654 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) 655 { 656 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 657 uint16_t val; 658 659 if (addr + sizeof(val) > vdev->config_len) { 660 return (uint32_t)-1; 661 } 662 663 k->get_config(vdev, vdev->config); 664 665 val = lduw_p(vdev->config + addr); 666 return val; 667 } 668 669 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) 670 { 671 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 672 uint32_t val; 673 674 if (addr + sizeof(val) > vdev->config_len) { 675 return (uint32_t)-1; 676 } 677 678 k->get_config(vdev, vdev->config); 679 680 val = ldl_p(vdev->config + addr); 681 return val; 682 } 683 684 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) 685 { 686 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 687 uint8_t val = data; 688 689 if (addr + sizeof(val) > vdev->config_len) { 690 return; 691 } 692 693 stb_p(vdev->config + addr, val); 694 695 if (k->set_config) { 696 k->set_config(vdev, vdev->config); 697 } 698 } 699 700 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) 701 { 702 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 703 uint16_t val = data; 704 705 if (addr + sizeof(val) > vdev->config_len) { 706 return; 707 } 708 709 stw_p(vdev->config + addr, val); 710 711 if (k->set_config) { 712 k->set_config(vdev, vdev->config); 713 } 714 } 715 716 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) 717 { 718 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 719 uint32_t val = data; 720 721 if (addr + sizeof(val) > vdev->config_len) { 722 return; 723 } 724 725 stl_p(vdev->config + addr, val); 726 727 if (k->set_config) { 728 k->set_config(vdev, vdev->config); 729 } 730 } 731 732 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr) 733 { 734 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 735 uint8_t val; 736 737 if (addr + sizeof(val) > vdev->config_len) { 738 return (uint32_t)-1; 739 } 740 741 k->get_config(vdev, vdev->config); 742 743 val = ldub_p(vdev->config + addr); 744 return val; 745 } 746 747 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr) 748 { 749 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 750 uint16_t val; 751 752 if (addr + sizeof(val) > vdev->config_len) { 753 return (uint32_t)-1; 754 } 755 756 k->get_config(vdev, vdev->config); 757 758 val = lduw_le_p(vdev->config + addr); 759 return val; 760 } 761 762 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr) 763 { 764 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 765 uint32_t val; 766 767 if (addr + sizeof(val) > vdev->config_len) { 768 return (uint32_t)-1; 769 } 770 771 k->get_config(vdev, vdev->config); 772 773 val = ldl_le_p(vdev->config + addr); 774 return val; 775 } 776 777 void virtio_config_modern_writeb(VirtIODevice *vdev, 778 uint32_t addr, uint32_t data) 779 { 780 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 781 uint8_t val = data; 782 783 if (addr + sizeof(val) > vdev->config_len) { 784 return; 785 } 786 787 stb_p(vdev->config + addr, val); 788 789 if (k->set_config) { 790 k->set_config(vdev, vdev->config); 791 } 792 } 793 794 void virtio_config_modern_writew(VirtIODevice *vdev, 795 uint32_t addr, uint32_t data) 796 { 797 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 798 uint16_t val = data; 799 800 if (addr + sizeof(val) > vdev->config_len) { 801 return; 802 } 803 804 stw_le_p(vdev->config + addr, val); 805 806 if (k->set_config) { 807 k->set_config(vdev, vdev->config); 808 } 809 } 810 811 void virtio_config_modern_writel(VirtIODevice *vdev, 812 uint32_t addr, uint32_t data) 813 { 814 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 815 uint32_t val = data; 816 817 if (addr + sizeof(val) > vdev->config_len) { 818 return; 819 } 820 821 stl_le_p(vdev->config + addr, val); 822 823 if (k->set_config) { 824 k->set_config(vdev, vdev->config); 825 } 826 } 827 828 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) 829 { 830 vdev->vq[n].vring.desc = addr; 831 virtio_queue_update_rings(vdev, n); 832 } 833 834 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) 835 { 836 return vdev->vq[n].vring.desc; 837 } 838 839 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, 840 hwaddr avail, hwaddr used) 841 { 842 vdev->vq[n].vring.desc = desc; 843 vdev->vq[n].vring.avail = avail; 844 vdev->vq[n].vring.used = used; 845 } 846 847 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) 848 { 849 /* Don't allow guest to flip queue between existent and 850 * nonexistent states, or to set it to an invalid size. 851 */ 852 if (!!num != !!vdev->vq[n].vring.num || 853 num > VIRTQUEUE_MAX_SIZE || 854 num < 0) { 855 return; 856 } 857 vdev->vq[n].vring.num = num; 858 } 859 860 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) 861 { 862 return QLIST_FIRST(&vdev->vector_queues[vector]); 863 } 864 865 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) 866 { 867 return QLIST_NEXT(vq, node); 868 } 869 870 int virtio_queue_get_num(VirtIODevice *vdev, int n) 871 { 872 return vdev->vq[n].vring.num; 873 } 874 875 int virtio_get_num_queues(VirtIODevice *vdev) 876 { 877 int i; 878 879 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 880 if (!virtio_queue_get_num(vdev, i)) { 881 break; 882 } 883 } 884 885 return i; 886 } 887 888 int virtio_queue_get_id(VirtQueue *vq) 889 { 890 VirtIODevice *vdev = vq->vdev; 891 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]); 892 return vq - &vdev->vq[0]; 893 } 894 895 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) 896 { 897 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 898 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 899 900 /* virtio-1 compliant devices cannot change the alignment */ 901 if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 902 error_report("tried to modify queue alignment for virtio-1 device"); 903 return; 904 } 905 /* Check that the transport told us it was going to do this 906 * (so a buggy transport will immediately assert rather than 907 * silently failing to migrate this state) 908 */ 909 assert(k->has_variable_vring_alignment); 910 911 vdev->vq[n].vring.align = align; 912 virtio_queue_update_rings(vdev, n); 913 } 914 915 void virtio_queue_notify_vq(VirtQueue *vq) 916 { 917 if (vq->vring.desc && vq->handle_output) { 918 VirtIODevice *vdev = vq->vdev; 919 920 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 921 vq->handle_output(vdev, vq); 922 } 923 } 924 925 void virtio_queue_notify(VirtIODevice *vdev, int n) 926 { 927 virtio_queue_notify_vq(&vdev->vq[n]); 928 } 929 930 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) 931 { 932 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : 933 VIRTIO_NO_VECTOR; 934 } 935 936 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) 937 { 938 VirtQueue *vq = &vdev->vq[n]; 939 940 if (n < VIRTIO_QUEUE_MAX) { 941 if (vdev->vector_queues && 942 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { 943 QLIST_REMOVE(vq, node); 944 } 945 vdev->vq[n].vector = vector; 946 if (vdev->vector_queues && 947 vector != VIRTIO_NO_VECTOR) { 948 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); 949 } 950 } 951 } 952 953 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, 954 void (*handle_output)(VirtIODevice *, VirtQueue *)) 955 { 956 int i; 957 958 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 959 if (vdev->vq[i].vring.num == 0) 960 break; 961 } 962 963 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) 964 abort(); 965 966 vdev->vq[i].vring.num = queue_size; 967 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; 968 vdev->vq[i].handle_output = handle_output; 969 970 return &vdev->vq[i]; 971 } 972 973 void virtio_del_queue(VirtIODevice *vdev, int n) 974 { 975 if (n < 0 || n >= VIRTIO_QUEUE_MAX) { 976 abort(); 977 } 978 979 vdev->vq[n].vring.num = 0; 980 } 981 982 void virtio_irq(VirtQueue *vq) 983 { 984 trace_virtio_irq(vq); 985 vq->vdev->isr |= 0x01; 986 virtio_notify_vector(vq->vdev, vq->vector); 987 } 988 989 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) 990 { 991 uint16_t old, new; 992 bool v; 993 /* We need to expose used array entries before checking used event. */ 994 smp_mb(); 995 /* Always notify when queue is empty (when feature acknowledge) */ 996 if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && 997 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) { 998 return true; 999 } 1000 1001 if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 1002 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 1003 } 1004 1005 v = vq->signalled_used_valid; 1006 vq->signalled_used_valid = true; 1007 old = vq->signalled_used; 1008 new = vq->signalled_used = vring_used_idx(vq); 1009 return !v || vring_need_event(vring_get_used_event(vq), new, old); 1010 } 1011 1012 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) 1013 { 1014 if (!vring_notify(vdev, vq)) { 1015 return; 1016 } 1017 1018 trace_virtio_notify(vdev, vq); 1019 vdev->isr |= 0x01; 1020 virtio_notify_vector(vdev, vq->vector); 1021 } 1022 1023 void virtio_notify_config(VirtIODevice *vdev) 1024 { 1025 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 1026 return; 1027 1028 vdev->isr |= 0x03; 1029 vdev->generation++; 1030 virtio_notify_vector(vdev, vdev->config_vector); 1031 } 1032 1033 static bool virtio_device_endian_needed(void *opaque) 1034 { 1035 VirtIODevice *vdev = opaque; 1036 1037 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); 1038 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 1039 return vdev->device_endian != virtio_default_endian(); 1040 } 1041 /* Devices conforming to VIRTIO 1.0 or later are always LE. */ 1042 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; 1043 } 1044 1045 static bool virtio_64bit_features_needed(void *opaque) 1046 { 1047 VirtIODevice *vdev = opaque; 1048 1049 return (vdev->host_features >> 32) != 0; 1050 } 1051 1052 static const VMStateDescription vmstate_virtio_device_endian = { 1053 .name = "virtio/device_endian", 1054 .version_id = 1, 1055 .minimum_version_id = 1, 1056 .needed = &virtio_device_endian_needed, 1057 .fields = (VMStateField[]) { 1058 VMSTATE_UINT8(device_endian, VirtIODevice), 1059 VMSTATE_END_OF_LIST() 1060 } 1061 }; 1062 1063 static const VMStateDescription vmstate_virtio_64bit_features = { 1064 .name = "virtio/64bit_features", 1065 .version_id = 1, 1066 .minimum_version_id = 1, 1067 .needed = &virtio_64bit_features_needed, 1068 .fields = (VMStateField[]) { 1069 VMSTATE_UINT64(guest_features, VirtIODevice), 1070 VMSTATE_END_OF_LIST() 1071 } 1072 }; 1073 1074 static const VMStateDescription vmstate_virtio = { 1075 .name = "virtio", 1076 .version_id = 1, 1077 .minimum_version_id = 1, 1078 .minimum_version_id_old = 1, 1079 .fields = (VMStateField[]) { 1080 VMSTATE_END_OF_LIST() 1081 }, 1082 .subsections = (const VMStateDescription*[]) { 1083 &vmstate_virtio_device_endian, 1084 &vmstate_virtio_64bit_features, 1085 NULL 1086 } 1087 }; 1088 1089 void virtio_save(VirtIODevice *vdev, QEMUFile *f) 1090 { 1091 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1092 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1093 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1094 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); 1095 int i; 1096 1097 if (k->save_config) { 1098 k->save_config(qbus->parent, f); 1099 } 1100 1101 qemu_put_8s(f, &vdev->status); 1102 qemu_put_8s(f, &vdev->isr); 1103 qemu_put_be16s(f, &vdev->queue_sel); 1104 qemu_put_be32s(f, &guest_features_lo); 1105 qemu_put_be32(f, vdev->config_len); 1106 qemu_put_buffer(f, vdev->config, vdev->config_len); 1107 1108 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1109 if (vdev->vq[i].vring.num == 0) 1110 break; 1111 } 1112 1113 qemu_put_be32(f, i); 1114 1115 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1116 if (vdev->vq[i].vring.num == 0) 1117 break; 1118 1119 qemu_put_be32(f, vdev->vq[i].vring.num); 1120 if (k->has_variable_vring_alignment) { 1121 qemu_put_be32(f, vdev->vq[i].vring.align); 1122 } 1123 /* XXX virtio-1 devices */ 1124 qemu_put_be64(f, vdev->vq[i].vring.desc); 1125 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); 1126 if (k->save_queue) { 1127 k->save_queue(qbus->parent, i, f); 1128 } 1129 } 1130 1131 if (vdc->save != NULL) { 1132 vdc->save(vdev, f); 1133 } 1134 1135 /* Subsections */ 1136 vmstate_save_state(f, &vmstate_virtio, vdev, NULL); 1137 } 1138 1139 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) 1140 { 1141 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 1142 bool bad = (val & ~(vdev->host_features)) != 0; 1143 1144 val &= vdev->host_features; 1145 if (k->set_features) { 1146 k->set_features(vdev, val); 1147 } 1148 vdev->guest_features = val; 1149 return bad ? -1 : 0; 1150 } 1151 1152 int virtio_set_features(VirtIODevice *vdev, uint64_t val) 1153 { 1154 /* 1155 * The driver must not attempt to set features after feature negotiation 1156 * has finished. 1157 */ 1158 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { 1159 return -EINVAL; 1160 } 1161 return virtio_set_features_nocheck(vdev, val); 1162 } 1163 1164 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) 1165 { 1166 int i, ret; 1167 int32_t config_len; 1168 uint32_t num; 1169 uint32_t features; 1170 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1171 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1172 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1173 1174 /* 1175 * We poison the endianness to ensure it does not get used before 1176 * subsections have been loaded. 1177 */ 1178 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; 1179 1180 if (k->load_config) { 1181 ret = k->load_config(qbus->parent, f); 1182 if (ret) 1183 return ret; 1184 } 1185 1186 qemu_get_8s(f, &vdev->status); 1187 qemu_get_8s(f, &vdev->isr); 1188 qemu_get_be16s(f, &vdev->queue_sel); 1189 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { 1190 return -1; 1191 } 1192 qemu_get_be32s(f, &features); 1193 1194 config_len = qemu_get_be32(f); 1195 1196 /* 1197 * There are cases where the incoming config can be bigger or smaller 1198 * than what we have; so load what we have space for, and skip 1199 * any excess that's in the stream. 1200 */ 1201 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); 1202 1203 while (config_len > vdev->config_len) { 1204 qemu_get_byte(f); 1205 config_len--; 1206 } 1207 1208 num = qemu_get_be32(f); 1209 1210 if (num > VIRTIO_QUEUE_MAX) { 1211 error_report("Invalid number of PCI queues: 0x%x", num); 1212 return -1; 1213 } 1214 1215 for (i = 0; i < num; i++) { 1216 vdev->vq[i].vring.num = qemu_get_be32(f); 1217 if (k->has_variable_vring_alignment) { 1218 vdev->vq[i].vring.align = qemu_get_be32(f); 1219 } 1220 vdev->vq[i].vring.desc = qemu_get_be64(f); 1221 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); 1222 vdev->vq[i].signalled_used_valid = false; 1223 vdev->vq[i].notification = true; 1224 1225 if (vdev->vq[i].vring.desc) { 1226 /* XXX virtio-1 devices */ 1227 virtio_queue_update_rings(vdev, i); 1228 } else if (vdev->vq[i].last_avail_idx) { 1229 error_report("VQ %d address 0x0 " 1230 "inconsistent with Host index 0x%x", 1231 i, vdev->vq[i].last_avail_idx); 1232 return -1; 1233 } 1234 if (k->load_queue) { 1235 ret = k->load_queue(qbus->parent, i, f); 1236 if (ret) 1237 return ret; 1238 } 1239 } 1240 1241 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 1242 1243 if (vdc->load != NULL) { 1244 ret = vdc->load(vdev, f, version_id); 1245 if (ret) { 1246 return ret; 1247 } 1248 } 1249 1250 /* Subsections */ 1251 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); 1252 if (ret) { 1253 return ret; 1254 } 1255 1256 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { 1257 vdev->device_endian = virtio_default_endian(); 1258 } 1259 1260 if (virtio_64bit_features_needed(vdev)) { 1261 /* 1262 * Subsection load filled vdev->guest_features. Run them 1263 * through virtio_set_features to sanity-check them against 1264 * host_features. 1265 */ 1266 uint64_t features64 = vdev->guest_features; 1267 if (virtio_set_features_nocheck(vdev, features64) < 0) { 1268 error_report("Features 0x%" PRIx64 " unsupported. " 1269 "Allowed features: 0x%" PRIx64, 1270 features64, vdev->host_features); 1271 return -1; 1272 } 1273 } else { 1274 if (virtio_set_features_nocheck(vdev, features) < 0) { 1275 error_report("Features 0x%x unsupported. " 1276 "Allowed features: 0x%" PRIx64, 1277 features, vdev->host_features); 1278 return -1; 1279 } 1280 } 1281 1282 for (i = 0; i < num; i++) { 1283 if (vdev->vq[i].vring.desc) { 1284 uint16_t nheads; 1285 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; 1286 /* Check it isn't doing strange things with descriptor numbers. */ 1287 if (nheads > vdev->vq[i].vring.num) { 1288 error_report("VQ %d size 0x%x Guest index 0x%x " 1289 "inconsistent with Host index 0x%x: delta 0x%x", 1290 i, vdev->vq[i].vring.num, 1291 vring_avail_idx(&vdev->vq[i]), 1292 vdev->vq[i].last_avail_idx, nheads); 1293 return -1; 1294 } 1295 } 1296 } 1297 1298 return 0; 1299 } 1300 1301 void virtio_cleanup(VirtIODevice *vdev) 1302 { 1303 qemu_del_vm_change_state_handler(vdev->vmstate); 1304 g_free(vdev->config); 1305 g_free(vdev->vq); 1306 g_free(vdev->vector_queues); 1307 } 1308 1309 static void virtio_vmstate_change(void *opaque, int running, RunState state) 1310 { 1311 VirtIODevice *vdev = opaque; 1312 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1313 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1314 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); 1315 vdev->vm_running = running; 1316 1317 if (backend_run) { 1318 virtio_set_status(vdev, vdev->status); 1319 } 1320 1321 if (k->vmstate_change) { 1322 k->vmstate_change(qbus->parent, backend_run); 1323 } 1324 1325 if (!backend_run) { 1326 virtio_set_status(vdev, vdev->status); 1327 } 1328 } 1329 1330 void virtio_instance_init_common(Object *proxy_obj, void *data, 1331 size_t vdev_size, const char *vdev_name) 1332 { 1333 DeviceState *vdev = data; 1334 1335 object_initialize(vdev, vdev_size, vdev_name); 1336 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL); 1337 object_unref(OBJECT(vdev)); 1338 qdev_alias_all_properties(vdev, proxy_obj); 1339 } 1340 1341 void virtio_init(VirtIODevice *vdev, const char *name, 1342 uint16_t device_id, size_t config_size) 1343 { 1344 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1345 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1346 int i; 1347 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; 1348 1349 if (nvectors) { 1350 vdev->vector_queues = 1351 g_malloc0(sizeof(*vdev->vector_queues) * nvectors); 1352 } 1353 1354 vdev->device_id = device_id; 1355 vdev->status = 0; 1356 vdev->isr = 0; 1357 vdev->queue_sel = 0; 1358 vdev->config_vector = VIRTIO_NO_VECTOR; 1359 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); 1360 vdev->vm_running = runstate_is_running(); 1361 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1362 vdev->vq[i].vector = VIRTIO_NO_VECTOR; 1363 vdev->vq[i].vdev = vdev; 1364 vdev->vq[i].queue_index = i; 1365 } 1366 1367 vdev->name = name; 1368 vdev->config_len = config_size; 1369 if (vdev->config_len) { 1370 vdev->config = g_malloc0(config_size); 1371 } else { 1372 vdev->config = NULL; 1373 } 1374 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, 1375 vdev); 1376 vdev->device_endian = virtio_default_endian(); 1377 } 1378 1379 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) 1380 { 1381 return vdev->vq[n].vring.desc; 1382 } 1383 1384 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) 1385 { 1386 return vdev->vq[n].vring.avail; 1387 } 1388 1389 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) 1390 { 1391 return vdev->vq[n].vring.used; 1392 } 1393 1394 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n) 1395 { 1396 return vdev->vq[n].vring.desc; 1397 } 1398 1399 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) 1400 { 1401 return sizeof(VRingDesc) * vdev->vq[n].vring.num; 1402 } 1403 1404 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) 1405 { 1406 return offsetof(VRingAvail, ring) + 1407 sizeof(uint64_t) * vdev->vq[n].vring.num; 1408 } 1409 1410 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) 1411 { 1412 return offsetof(VRingUsed, ring) + 1413 sizeof(VRingUsedElem) * vdev->vq[n].vring.num; 1414 } 1415 1416 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n) 1417 { 1418 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc + 1419 virtio_queue_get_used_size(vdev, n); 1420 } 1421 1422 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) 1423 { 1424 return vdev->vq[n].last_avail_idx; 1425 } 1426 1427 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) 1428 { 1429 vdev->vq[n].last_avail_idx = idx; 1430 } 1431 1432 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) 1433 { 1434 vdev->vq[n].signalled_used_valid = false; 1435 } 1436 1437 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) 1438 { 1439 return vdev->vq + n; 1440 } 1441 1442 uint16_t virtio_get_queue_index(VirtQueue *vq) 1443 { 1444 return vq->queue_index; 1445 } 1446 1447 static void virtio_queue_guest_notifier_read(EventNotifier *n) 1448 { 1449 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); 1450 if (event_notifier_test_and_clear(n)) { 1451 virtio_irq(vq); 1452 } 1453 } 1454 1455 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, 1456 bool with_irqfd) 1457 { 1458 if (assign && !with_irqfd) { 1459 event_notifier_set_handler(&vq->guest_notifier, 1460 virtio_queue_guest_notifier_read); 1461 } else { 1462 event_notifier_set_handler(&vq->guest_notifier, NULL); 1463 } 1464 if (!assign) { 1465 /* Test and clear notifier before closing it, 1466 * in case poll callback didn't have time to run. */ 1467 virtio_queue_guest_notifier_read(&vq->guest_notifier); 1468 } 1469 } 1470 1471 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) 1472 { 1473 return &vq->guest_notifier; 1474 } 1475 1476 static void virtio_queue_host_notifier_read(EventNotifier *n) 1477 { 1478 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 1479 if (event_notifier_test_and_clear(n)) { 1480 virtio_queue_notify_vq(vq); 1481 } 1482 } 1483 1484 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign, 1485 bool set_handler) 1486 { 1487 if (assign && set_handler) { 1488 event_notifier_set_handler(&vq->host_notifier, 1489 virtio_queue_host_notifier_read); 1490 } else { 1491 event_notifier_set_handler(&vq->host_notifier, NULL); 1492 } 1493 if (!assign) { 1494 /* Test and clear notifier before after disabling event, 1495 * in case poll callback didn't have time to run. */ 1496 virtio_queue_host_notifier_read(&vq->host_notifier); 1497 } 1498 } 1499 1500 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) 1501 { 1502 return &vq->host_notifier; 1503 } 1504 1505 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) 1506 { 1507 g_free(vdev->bus_name); 1508 vdev->bus_name = g_strdup(bus_name); 1509 } 1510 1511 static void virtio_device_realize(DeviceState *dev, Error **errp) 1512 { 1513 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1514 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1515 Error *err = NULL; 1516 1517 if (vdc->realize != NULL) { 1518 vdc->realize(dev, &err); 1519 if (err != NULL) { 1520 error_propagate(errp, err); 1521 return; 1522 } 1523 } 1524 1525 virtio_bus_device_plugged(vdev, &err); 1526 if (err != NULL) { 1527 error_propagate(errp, err); 1528 return; 1529 } 1530 } 1531 1532 static void virtio_device_unrealize(DeviceState *dev, Error **errp) 1533 { 1534 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1535 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1536 Error *err = NULL; 1537 1538 virtio_bus_device_unplugged(vdev); 1539 1540 if (vdc->unrealize != NULL) { 1541 vdc->unrealize(dev, &err); 1542 if (err != NULL) { 1543 error_propagate(errp, err); 1544 return; 1545 } 1546 } 1547 1548 g_free(vdev->bus_name); 1549 vdev->bus_name = NULL; 1550 } 1551 1552 static Property virtio_properties[] = { 1553 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), 1554 DEFINE_PROP_END_OF_LIST(), 1555 }; 1556 1557 static void virtio_device_class_init(ObjectClass *klass, void *data) 1558 { 1559 /* Set the default value here. */ 1560 DeviceClass *dc = DEVICE_CLASS(klass); 1561 1562 dc->realize = virtio_device_realize; 1563 dc->unrealize = virtio_device_unrealize; 1564 dc->bus_type = TYPE_VIRTIO_BUS; 1565 dc->props = virtio_properties; 1566 } 1567 1568 static const TypeInfo virtio_device_info = { 1569 .name = TYPE_VIRTIO_DEVICE, 1570 .parent = TYPE_DEVICE, 1571 .instance_size = sizeof(VirtIODevice), 1572 .class_init = virtio_device_class_init, 1573 .abstract = true, 1574 .class_size = sizeof(VirtioDeviceClass), 1575 }; 1576 1577 static void virtio_register_types(void) 1578 { 1579 type_register_static(&virtio_device_info); 1580 } 1581 1582 type_init(virtio_register_types) 1583