1 /* 2 * Virtio Support 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include <inttypes.h> 15 16 #include "trace.h" 17 #include "exec/address-spaces.h" 18 #include "qemu/error-report.h" 19 #include "hw/virtio/virtio.h" 20 #include "qemu/atomic.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/migration.h" 23 #include "hw/virtio/virtio-access.h" 24 25 /* 26 * The alignment to use between consumer and producer parts of vring. 27 * x86 pagesize again. This is the default, used by transports like PCI 28 * which don't provide a means for the guest to tell the host the alignment. 29 */ 30 #define VIRTIO_PCI_VRING_ALIGN 4096 31 32 typedef struct VRingDesc 33 { 34 uint64_t addr; 35 uint32_t len; 36 uint16_t flags; 37 uint16_t next; 38 } VRingDesc; 39 40 typedef struct VRingAvail 41 { 42 uint16_t flags; 43 uint16_t idx; 44 uint16_t ring[0]; 45 } VRingAvail; 46 47 typedef struct VRingUsedElem 48 { 49 uint32_t id; 50 uint32_t len; 51 } VRingUsedElem; 52 53 typedef struct VRingUsed 54 { 55 uint16_t flags; 56 uint16_t idx; 57 VRingUsedElem ring[0]; 58 } VRingUsed; 59 60 typedef struct VRing 61 { 62 unsigned int num; 63 unsigned int align; 64 hwaddr desc; 65 hwaddr avail; 66 hwaddr used; 67 } VRing; 68 69 struct VirtQueue 70 { 71 VRing vring; 72 hwaddr pa; 73 uint16_t last_avail_idx; 74 /* Last used index value we have signalled on */ 75 uint16_t signalled_used; 76 77 /* Last used index value we have signalled on */ 78 bool signalled_used_valid; 79 80 /* Notification enabled? */ 81 bool notification; 82 83 uint16_t queue_index; 84 85 int inuse; 86 87 uint16_t vector; 88 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); 89 VirtIODevice *vdev; 90 EventNotifier guest_notifier; 91 EventNotifier host_notifier; 92 QLIST_ENTRY(VirtQueue) node; 93 }; 94 95 /* virt queue functions */ 96 static void virtqueue_init(VirtQueue *vq) 97 { 98 hwaddr pa = vq->pa; 99 100 vq->vring.desc = pa; 101 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc); 102 vq->vring.used = vring_align(vq->vring.avail + 103 offsetof(VRingAvail, ring[vq->vring.num]), 104 vq->vring.align); 105 } 106 107 static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa, 108 int i) 109 { 110 hwaddr pa; 111 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); 112 return virtio_ldq_phys(vdev, pa); 113 } 114 115 static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i) 116 { 117 hwaddr pa; 118 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); 119 return virtio_ldl_phys(vdev, pa); 120 } 121 122 static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa, 123 int i) 124 { 125 hwaddr pa; 126 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); 127 return virtio_lduw_phys(vdev, pa); 128 } 129 130 static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa, 131 int i) 132 { 133 hwaddr pa; 134 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); 135 return virtio_lduw_phys(vdev, pa); 136 } 137 138 static inline uint16_t vring_avail_flags(VirtQueue *vq) 139 { 140 hwaddr pa; 141 pa = vq->vring.avail + offsetof(VRingAvail, flags); 142 return virtio_lduw_phys(vq->vdev, pa); 143 } 144 145 static inline uint16_t vring_avail_idx(VirtQueue *vq) 146 { 147 hwaddr pa; 148 pa = vq->vring.avail + offsetof(VRingAvail, idx); 149 return virtio_lduw_phys(vq->vdev, pa); 150 } 151 152 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) 153 { 154 hwaddr pa; 155 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); 156 return virtio_lduw_phys(vq->vdev, pa); 157 } 158 159 static inline uint16_t vring_get_used_event(VirtQueue *vq) 160 { 161 return vring_avail_ring(vq, vq->vring.num); 162 } 163 164 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) 165 { 166 hwaddr pa; 167 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); 168 virtio_stl_phys(vq->vdev, pa, val); 169 } 170 171 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) 172 { 173 hwaddr pa; 174 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); 175 virtio_stl_phys(vq->vdev, pa, val); 176 } 177 178 static uint16_t vring_used_idx(VirtQueue *vq) 179 { 180 hwaddr pa; 181 pa = vq->vring.used + offsetof(VRingUsed, idx); 182 return virtio_lduw_phys(vq->vdev, pa); 183 } 184 185 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) 186 { 187 hwaddr pa; 188 pa = vq->vring.used + offsetof(VRingUsed, idx); 189 virtio_stw_phys(vq->vdev, pa, val); 190 } 191 192 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) 193 { 194 VirtIODevice *vdev = vq->vdev; 195 hwaddr pa; 196 pa = vq->vring.used + offsetof(VRingUsed, flags); 197 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask); 198 } 199 200 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) 201 { 202 VirtIODevice *vdev = vq->vdev; 203 hwaddr pa; 204 pa = vq->vring.used + offsetof(VRingUsed, flags); 205 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask); 206 } 207 208 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) 209 { 210 hwaddr pa; 211 if (!vq->notification) { 212 return; 213 } 214 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); 215 virtio_stw_phys(vq->vdev, pa, val); 216 } 217 218 void virtio_queue_set_notification(VirtQueue *vq, int enable) 219 { 220 vq->notification = enable; 221 if (virtio_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { 222 vring_set_avail_event(vq, vring_avail_idx(vq)); 223 } else if (enable) { 224 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 225 } else { 226 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 227 } 228 if (enable) { 229 /* Expose avail event/used flags before caller checks the avail idx. */ 230 smp_mb(); 231 } 232 } 233 234 int virtio_queue_ready(VirtQueue *vq) 235 { 236 return vq->vring.avail != 0; 237 } 238 239 int virtio_queue_empty(VirtQueue *vq) 240 { 241 return vring_avail_idx(vq) == vq->last_avail_idx; 242 } 243 244 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, 245 unsigned int len, unsigned int idx) 246 { 247 unsigned int offset; 248 int i; 249 250 trace_virtqueue_fill(vq, elem, len, idx); 251 252 offset = 0; 253 for (i = 0; i < elem->in_num; i++) { 254 size_t size = MIN(len - offset, elem->in_sg[i].iov_len); 255 256 cpu_physical_memory_unmap(elem->in_sg[i].iov_base, 257 elem->in_sg[i].iov_len, 258 1, size); 259 260 offset += size; 261 } 262 263 for (i = 0; i < elem->out_num; i++) 264 cpu_physical_memory_unmap(elem->out_sg[i].iov_base, 265 elem->out_sg[i].iov_len, 266 0, elem->out_sg[i].iov_len); 267 268 idx = (idx + vring_used_idx(vq)) % vq->vring.num; 269 270 /* Get a pointer to the next entry in the used ring. */ 271 vring_used_ring_id(vq, idx, elem->index); 272 vring_used_ring_len(vq, idx, len); 273 } 274 275 void virtqueue_flush(VirtQueue *vq, unsigned int count) 276 { 277 uint16_t old, new; 278 /* Make sure buffer is written before we update index. */ 279 smp_wmb(); 280 trace_virtqueue_flush(vq, count); 281 old = vring_used_idx(vq); 282 new = old + count; 283 vring_used_idx_set(vq, new); 284 vq->inuse -= count; 285 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) 286 vq->signalled_used_valid = false; 287 } 288 289 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, 290 unsigned int len) 291 { 292 virtqueue_fill(vq, elem, len, 0); 293 virtqueue_flush(vq, 1); 294 } 295 296 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) 297 { 298 uint16_t num_heads = vring_avail_idx(vq) - idx; 299 300 /* Check it isn't doing very strange things with descriptor numbers. */ 301 if (num_heads > vq->vring.num) { 302 error_report("Guest moved used index from %u to %u", 303 idx, vring_avail_idx(vq)); 304 exit(1); 305 } 306 /* On success, callers read a descriptor at vq->last_avail_idx. 307 * Make sure descriptor read does not bypass avail index read. */ 308 if (num_heads) { 309 smp_rmb(); 310 } 311 312 return num_heads; 313 } 314 315 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) 316 { 317 unsigned int head; 318 319 /* Grab the next descriptor number they're advertising, and increment 320 * the index we've seen. */ 321 head = vring_avail_ring(vq, idx % vq->vring.num); 322 323 /* If their number is silly, that's a fatal mistake. */ 324 if (head >= vq->vring.num) { 325 error_report("Guest says index %u is available", head); 326 exit(1); 327 } 328 329 return head; 330 } 331 332 static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa, 333 unsigned int i, unsigned int max) 334 { 335 unsigned int next; 336 337 /* If this descriptor says it doesn't chain, we're done. */ 338 if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) { 339 return max; 340 } 341 342 /* Check they're not leading us off end of descriptors. */ 343 next = vring_desc_next(vdev, desc_pa, i); 344 /* Make sure compiler knows to grab that: we don't want it changing! */ 345 smp_wmb(); 346 347 if (next >= max) { 348 error_report("Desc next is %u", next); 349 exit(1); 350 } 351 352 return next; 353 } 354 355 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, 356 unsigned int *out_bytes, 357 unsigned max_in_bytes, unsigned max_out_bytes) 358 { 359 unsigned int idx; 360 unsigned int total_bufs, in_total, out_total; 361 362 idx = vq->last_avail_idx; 363 364 total_bufs = in_total = out_total = 0; 365 while (virtqueue_num_heads(vq, idx)) { 366 VirtIODevice *vdev = vq->vdev; 367 unsigned int max, num_bufs, indirect = 0; 368 hwaddr desc_pa; 369 int i; 370 371 max = vq->vring.num; 372 num_bufs = total_bufs; 373 i = virtqueue_get_head(vq, idx++); 374 desc_pa = vq->vring.desc; 375 376 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 377 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 378 error_report("Invalid size for indirect buffer table"); 379 exit(1); 380 } 381 382 /* If we've got too many, that implies a descriptor loop. */ 383 if (num_bufs >= max) { 384 error_report("Looped descriptor"); 385 exit(1); 386 } 387 388 /* loop over the indirect descriptor table */ 389 indirect = 1; 390 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 391 desc_pa = vring_desc_addr(vdev, desc_pa, i); 392 num_bufs = i = 0; 393 } 394 395 do { 396 /* If we've got too many, that implies a descriptor loop. */ 397 if (++num_bufs > max) { 398 error_report("Looped descriptor"); 399 exit(1); 400 } 401 402 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 403 in_total += vring_desc_len(vdev, desc_pa, i); 404 } else { 405 out_total += vring_desc_len(vdev, desc_pa, i); 406 } 407 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 408 goto done; 409 } 410 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 411 412 if (!indirect) 413 total_bufs = num_bufs; 414 else 415 total_bufs++; 416 } 417 done: 418 if (in_bytes) { 419 *in_bytes = in_total; 420 } 421 if (out_bytes) { 422 *out_bytes = out_total; 423 } 424 } 425 426 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, 427 unsigned int out_bytes) 428 { 429 unsigned int in_total, out_total; 430 431 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); 432 return in_bytes <= in_total && out_bytes <= out_total; 433 } 434 435 void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, 436 size_t num_sg, int is_write) 437 { 438 unsigned int i; 439 hwaddr len; 440 441 if (num_sg > VIRTQUEUE_MAX_SIZE) { 442 error_report("virtio: map attempt out of bounds: %zd > %d", 443 num_sg, VIRTQUEUE_MAX_SIZE); 444 exit(1); 445 } 446 447 for (i = 0; i < num_sg; i++) { 448 len = sg[i].iov_len; 449 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); 450 if (sg[i].iov_base == NULL || len != sg[i].iov_len) { 451 error_report("virtio: error trying to map MMIO memory"); 452 exit(1); 453 } 454 } 455 } 456 457 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) 458 { 459 unsigned int i, head, max; 460 hwaddr desc_pa = vq->vring.desc; 461 VirtIODevice *vdev = vq->vdev; 462 463 if (!virtqueue_num_heads(vq, vq->last_avail_idx)) 464 return 0; 465 466 /* When we start there are none of either input nor output. */ 467 elem->out_num = elem->in_num = 0; 468 469 max = vq->vring.num; 470 471 i = head = virtqueue_get_head(vq, vq->last_avail_idx++); 472 if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 473 vring_set_avail_event(vq, vq->last_avail_idx); 474 } 475 476 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) { 477 if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) { 478 error_report("Invalid size for indirect buffer table"); 479 exit(1); 480 } 481 482 /* loop over the indirect descriptor table */ 483 max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc); 484 desc_pa = vring_desc_addr(vdev, desc_pa, i); 485 i = 0; 486 } 487 488 /* Collect all the descriptors */ 489 do { 490 struct iovec *sg; 491 492 if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) { 493 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) { 494 error_report("Too many write descriptors in indirect table"); 495 exit(1); 496 } 497 elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i); 498 sg = &elem->in_sg[elem->in_num++]; 499 } else { 500 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) { 501 error_report("Too many read descriptors in indirect table"); 502 exit(1); 503 } 504 elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i); 505 sg = &elem->out_sg[elem->out_num++]; 506 } 507 508 sg->iov_len = vring_desc_len(vdev, desc_pa, i); 509 510 /* If we've got too many, that implies a descriptor loop. */ 511 if ((elem->in_num + elem->out_num) > max) { 512 error_report("Looped descriptor"); 513 exit(1); 514 } 515 } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); 516 517 /* Now map what we have collected */ 518 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1); 519 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0); 520 521 elem->index = head; 522 523 vq->inuse++; 524 525 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); 526 return elem->in_num + elem->out_num; 527 } 528 529 /* virtio device */ 530 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) 531 { 532 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 533 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 534 535 if (k->notify) { 536 k->notify(qbus->parent, vector); 537 } 538 } 539 540 void virtio_update_irq(VirtIODevice *vdev) 541 { 542 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 543 } 544 545 void virtio_set_status(VirtIODevice *vdev, uint8_t val) 546 { 547 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 548 trace_virtio_set_status(vdev, val); 549 550 if (k->set_status) { 551 k->set_status(vdev, val); 552 } 553 vdev->status = val; 554 } 555 556 bool target_words_bigendian(void); 557 static enum virtio_device_endian virtio_default_endian(void) 558 { 559 if (target_words_bigendian()) { 560 return VIRTIO_DEVICE_ENDIAN_BIG; 561 } else { 562 return VIRTIO_DEVICE_ENDIAN_LITTLE; 563 } 564 } 565 566 static enum virtio_device_endian virtio_current_cpu_endian(void) 567 { 568 CPUClass *cc = CPU_GET_CLASS(current_cpu); 569 570 if (cc->virtio_is_big_endian(current_cpu)) { 571 return VIRTIO_DEVICE_ENDIAN_BIG; 572 } else { 573 return VIRTIO_DEVICE_ENDIAN_LITTLE; 574 } 575 } 576 577 void virtio_reset(void *opaque) 578 { 579 VirtIODevice *vdev = opaque; 580 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 581 int i; 582 583 virtio_set_status(vdev, 0); 584 if (current_cpu) { 585 /* Guest initiated reset */ 586 vdev->device_endian = virtio_current_cpu_endian(); 587 } else { 588 /* System reset */ 589 vdev->device_endian = virtio_default_endian(); 590 } 591 592 if (k->reset) { 593 k->reset(vdev); 594 } 595 596 vdev->guest_features = 0; 597 vdev->queue_sel = 0; 598 vdev->status = 0; 599 vdev->isr = 0; 600 vdev->config_vector = VIRTIO_NO_VECTOR; 601 virtio_notify_vector(vdev, vdev->config_vector); 602 603 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { 604 vdev->vq[i].vring.desc = 0; 605 vdev->vq[i].vring.avail = 0; 606 vdev->vq[i].vring.used = 0; 607 vdev->vq[i].last_avail_idx = 0; 608 vdev->vq[i].pa = 0; 609 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); 610 vdev->vq[i].signalled_used = 0; 611 vdev->vq[i].signalled_used_valid = false; 612 vdev->vq[i].notification = true; 613 } 614 } 615 616 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) 617 { 618 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 619 uint8_t val; 620 621 if (addr + sizeof(val) > vdev->config_len) { 622 return (uint32_t)-1; 623 } 624 625 k->get_config(vdev, vdev->config); 626 627 val = ldub_p(vdev->config + addr); 628 return val; 629 } 630 631 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) 632 { 633 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 634 uint16_t val; 635 636 if (addr + sizeof(val) > vdev->config_len) { 637 return (uint32_t)-1; 638 } 639 640 k->get_config(vdev, vdev->config); 641 642 val = lduw_p(vdev->config + addr); 643 return val; 644 } 645 646 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) 647 { 648 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 649 uint32_t val; 650 651 if (addr + sizeof(val) > vdev->config_len) { 652 return (uint32_t)-1; 653 } 654 655 k->get_config(vdev, vdev->config); 656 657 val = ldl_p(vdev->config + addr); 658 return val; 659 } 660 661 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) 662 { 663 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 664 uint8_t val = data; 665 666 if (addr + sizeof(val) > vdev->config_len) { 667 return; 668 } 669 670 stb_p(vdev->config + addr, val); 671 672 if (k->set_config) { 673 k->set_config(vdev, vdev->config); 674 } 675 } 676 677 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) 678 { 679 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 680 uint16_t val = data; 681 682 if (addr + sizeof(val) > vdev->config_len) { 683 return; 684 } 685 686 stw_p(vdev->config + addr, val); 687 688 if (k->set_config) { 689 k->set_config(vdev, vdev->config); 690 } 691 } 692 693 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) 694 { 695 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 696 uint32_t val = data; 697 698 if (addr + sizeof(val) > vdev->config_len) { 699 return; 700 } 701 702 stl_p(vdev->config + addr, val); 703 704 if (k->set_config) { 705 k->set_config(vdev, vdev->config); 706 } 707 } 708 709 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) 710 { 711 vdev->vq[n].pa = addr; 712 virtqueue_init(&vdev->vq[n]); 713 } 714 715 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) 716 { 717 return vdev->vq[n].pa; 718 } 719 720 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) 721 { 722 /* Don't allow guest to flip queue between existent and 723 * nonexistent states, or to set it to an invalid size. 724 */ 725 if (!!num != !!vdev->vq[n].vring.num || 726 num > VIRTQUEUE_MAX_SIZE || 727 num < 0) { 728 return; 729 } 730 vdev->vq[n].vring.num = num; 731 virtqueue_init(&vdev->vq[n]); 732 } 733 734 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) 735 { 736 return QLIST_FIRST(&vdev->vector_queues[vector]); 737 } 738 739 VirtQueue *virtio_vector_next_queue(VirtQueue *vq) 740 { 741 return QLIST_NEXT(vq, node); 742 } 743 744 int virtio_queue_get_num(VirtIODevice *vdev, int n) 745 { 746 return vdev->vq[n].vring.num; 747 } 748 749 int virtio_get_num_queues(VirtIODevice *vdev) 750 { 751 int i; 752 753 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 754 if (!virtio_queue_get_num(vdev, i)) { 755 break; 756 } 757 } 758 759 return i; 760 } 761 762 int virtio_queue_get_id(VirtQueue *vq) 763 { 764 VirtIODevice *vdev = vq->vdev; 765 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]); 766 return vq - &vdev->vq[0]; 767 } 768 769 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) 770 { 771 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 772 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 773 774 /* Check that the transport told us it was going to do this 775 * (so a buggy transport will immediately assert rather than 776 * silently failing to migrate this state) 777 */ 778 assert(k->has_variable_vring_alignment); 779 780 vdev->vq[n].vring.align = align; 781 virtqueue_init(&vdev->vq[n]); 782 } 783 784 void virtio_queue_notify_vq(VirtQueue *vq) 785 { 786 if (vq->vring.desc && vq->handle_output) { 787 VirtIODevice *vdev = vq->vdev; 788 789 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); 790 vq->handle_output(vdev, vq); 791 } 792 } 793 794 void virtio_queue_notify(VirtIODevice *vdev, int n) 795 { 796 virtio_queue_notify_vq(&vdev->vq[n]); 797 } 798 799 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) 800 { 801 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : 802 VIRTIO_NO_VECTOR; 803 } 804 805 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) 806 { 807 VirtQueue *vq = &vdev->vq[n]; 808 809 if (n < VIRTIO_QUEUE_MAX) { 810 if (vdev->vector_queues && 811 vdev->vq[n].vector != VIRTIO_NO_VECTOR) { 812 QLIST_REMOVE(vq, node); 813 } 814 vdev->vq[n].vector = vector; 815 if (vdev->vector_queues && 816 vector != VIRTIO_NO_VECTOR) { 817 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); 818 } 819 } 820 } 821 822 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, 823 void (*handle_output)(VirtIODevice *, VirtQueue *)) 824 { 825 int i; 826 827 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 828 if (vdev->vq[i].vring.num == 0) 829 break; 830 } 831 832 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) 833 abort(); 834 835 vdev->vq[i].vring.num = queue_size; 836 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; 837 vdev->vq[i].handle_output = handle_output; 838 839 return &vdev->vq[i]; 840 } 841 842 void virtio_del_queue(VirtIODevice *vdev, int n) 843 { 844 if (n < 0 || n >= VIRTIO_QUEUE_MAX) { 845 abort(); 846 } 847 848 vdev->vq[n].vring.num = 0; 849 } 850 851 void virtio_irq(VirtQueue *vq) 852 { 853 trace_virtio_irq(vq); 854 vq->vdev->isr |= 0x01; 855 virtio_notify_vector(vq->vdev, vq->vector); 856 } 857 858 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) 859 { 860 uint16_t old, new; 861 bool v; 862 /* We need to expose used array entries before checking used event. */ 863 smp_mb(); 864 /* Always notify when queue is empty (when feature acknowledge) */ 865 if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && 866 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) { 867 return true; 868 } 869 870 if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { 871 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 872 } 873 874 v = vq->signalled_used_valid; 875 vq->signalled_used_valid = true; 876 old = vq->signalled_used; 877 new = vq->signalled_used = vring_used_idx(vq); 878 return !v || vring_need_event(vring_get_used_event(vq), new, old); 879 } 880 881 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) 882 { 883 if (!vring_notify(vdev, vq)) { 884 return; 885 } 886 887 trace_virtio_notify(vdev, vq); 888 vdev->isr |= 0x01; 889 virtio_notify_vector(vdev, vq->vector); 890 } 891 892 void virtio_notify_config(VirtIODevice *vdev) 893 { 894 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) 895 return; 896 897 vdev->isr |= 0x03; 898 virtio_notify_vector(vdev, vdev->config_vector); 899 } 900 901 static bool virtio_device_endian_needed(void *opaque) 902 { 903 VirtIODevice *vdev = opaque; 904 905 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); 906 return vdev->device_endian != virtio_default_endian(); 907 } 908 909 static const VMStateDescription vmstate_virtio_device_endian = { 910 .name = "virtio/device_endian", 911 .version_id = 1, 912 .minimum_version_id = 1, 913 .fields = (VMStateField[]) { 914 VMSTATE_UINT8(device_endian, VirtIODevice), 915 VMSTATE_END_OF_LIST() 916 } 917 }; 918 919 static const VMStateDescription vmstate_virtio = { 920 .name = "virtio", 921 .version_id = 1, 922 .minimum_version_id = 1, 923 .minimum_version_id_old = 1, 924 .fields = (VMStateField[]) { 925 VMSTATE_END_OF_LIST() 926 }, 927 .subsections = (VMStateSubsection[]) { 928 { 929 .vmsd = &vmstate_virtio_device_endian, 930 .needed = &virtio_device_endian_needed 931 }, 932 { 0 } 933 } 934 }; 935 936 void virtio_save(VirtIODevice *vdev, QEMUFile *f) 937 { 938 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 939 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 940 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 941 int i; 942 943 if (k->save_config) { 944 k->save_config(qbus->parent, f); 945 } 946 947 qemu_put_8s(f, &vdev->status); 948 qemu_put_8s(f, &vdev->isr); 949 qemu_put_be16s(f, &vdev->queue_sel); 950 qemu_put_be32s(f, &vdev->guest_features); 951 qemu_put_be32(f, vdev->config_len); 952 qemu_put_buffer(f, vdev->config, vdev->config_len); 953 954 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 955 if (vdev->vq[i].vring.num == 0) 956 break; 957 } 958 959 qemu_put_be32(f, i); 960 961 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 962 if (vdev->vq[i].vring.num == 0) 963 break; 964 965 qemu_put_be32(f, vdev->vq[i].vring.num); 966 if (k->has_variable_vring_alignment) { 967 qemu_put_be32(f, vdev->vq[i].vring.align); 968 } 969 qemu_put_be64(f, vdev->vq[i].pa); 970 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); 971 if (k->save_queue) { 972 k->save_queue(qbus->parent, i, f); 973 } 974 } 975 976 if (vdc->save != NULL) { 977 vdc->save(vdev, f); 978 } 979 980 /* Subsections */ 981 vmstate_save_state(f, &vmstate_virtio, vdev, NULL); 982 } 983 984 int virtio_set_features(VirtIODevice *vdev, uint32_t val) 985 { 986 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); 987 bool bad = (val & ~(vdev->host_features)) != 0; 988 989 val &= vdev->host_features; 990 if (k->set_features) { 991 k->set_features(vdev, val); 992 } 993 vdev->guest_features = val; 994 return bad ? -1 : 0; 995 } 996 997 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) 998 { 999 int i, ret; 1000 int32_t config_len; 1001 uint32_t num; 1002 uint32_t features; 1003 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1004 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1005 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); 1006 1007 /* 1008 * We poison the endianness to ensure it does not get used before 1009 * subsections have been loaded. 1010 */ 1011 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; 1012 1013 if (k->load_config) { 1014 ret = k->load_config(qbus->parent, f); 1015 if (ret) 1016 return ret; 1017 } 1018 1019 qemu_get_8s(f, &vdev->status); 1020 qemu_get_8s(f, &vdev->isr); 1021 qemu_get_be16s(f, &vdev->queue_sel); 1022 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { 1023 return -1; 1024 } 1025 qemu_get_be32s(f, &features); 1026 1027 if (virtio_set_features(vdev, features) < 0) { 1028 error_report("Features 0x%x unsupported. Allowed features: 0x%x", 1029 features, vdev->host_features); 1030 return -1; 1031 } 1032 config_len = qemu_get_be32(f); 1033 1034 /* 1035 * There are cases where the incoming config can be bigger or smaller 1036 * than what we have; so load what we have space for, and skip 1037 * any excess that's in the stream. 1038 */ 1039 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); 1040 1041 while (config_len > vdev->config_len) { 1042 qemu_get_byte(f); 1043 config_len--; 1044 } 1045 1046 num = qemu_get_be32(f); 1047 1048 if (num > VIRTIO_QUEUE_MAX) { 1049 error_report("Invalid number of PCI queues: 0x%x", num); 1050 return -1; 1051 } 1052 1053 for (i = 0; i < num; i++) { 1054 vdev->vq[i].vring.num = qemu_get_be32(f); 1055 if (k->has_variable_vring_alignment) { 1056 vdev->vq[i].vring.align = qemu_get_be32(f); 1057 } 1058 vdev->vq[i].pa = qemu_get_be64(f); 1059 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); 1060 vdev->vq[i].signalled_used_valid = false; 1061 vdev->vq[i].notification = true; 1062 1063 if (vdev->vq[i].pa) { 1064 virtqueue_init(&vdev->vq[i]); 1065 } else if (vdev->vq[i].last_avail_idx) { 1066 error_report("VQ %d address 0x0 " 1067 "inconsistent with Host index 0x%x", 1068 i, vdev->vq[i].last_avail_idx); 1069 return -1; 1070 } 1071 if (k->load_queue) { 1072 ret = k->load_queue(qbus->parent, i, f); 1073 if (ret) 1074 return ret; 1075 } 1076 } 1077 1078 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); 1079 1080 if (vdc->load != NULL) { 1081 ret = vdc->load(vdev, f, version_id); 1082 if (ret) { 1083 return ret; 1084 } 1085 } 1086 1087 /* Subsections */ 1088 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); 1089 if (ret) { 1090 return ret; 1091 } 1092 1093 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { 1094 vdev->device_endian = virtio_default_endian(); 1095 } 1096 1097 for (i = 0; i < num; i++) { 1098 if (vdev->vq[i].pa) { 1099 uint16_t nheads; 1100 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; 1101 /* Check it isn't doing strange things with descriptor numbers. */ 1102 if (nheads > vdev->vq[i].vring.num) { 1103 error_report("VQ %d size 0x%x Guest index 0x%x " 1104 "inconsistent with Host index 0x%x: delta 0x%x", 1105 i, vdev->vq[i].vring.num, 1106 vring_avail_idx(&vdev->vq[i]), 1107 vdev->vq[i].last_avail_idx, nheads); 1108 return -1; 1109 } 1110 } 1111 } 1112 1113 return 0; 1114 } 1115 1116 void virtio_cleanup(VirtIODevice *vdev) 1117 { 1118 qemu_del_vm_change_state_handler(vdev->vmstate); 1119 g_free(vdev->config); 1120 g_free(vdev->vq); 1121 g_free(vdev->vector_queues); 1122 } 1123 1124 static void virtio_vmstate_change(void *opaque, int running, RunState state) 1125 { 1126 VirtIODevice *vdev = opaque; 1127 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1128 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1129 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); 1130 vdev->vm_running = running; 1131 1132 if (backend_run) { 1133 virtio_set_status(vdev, vdev->status); 1134 } 1135 1136 if (k->vmstate_change) { 1137 k->vmstate_change(qbus->parent, backend_run); 1138 } 1139 1140 if (!backend_run) { 1141 virtio_set_status(vdev, vdev->status); 1142 } 1143 } 1144 1145 void virtio_instance_init_common(Object *proxy_obj, void *data, 1146 size_t vdev_size, const char *vdev_name) 1147 { 1148 DeviceState *vdev = data; 1149 1150 object_initialize(vdev, vdev_size, vdev_name); 1151 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL); 1152 object_unref(OBJECT(vdev)); 1153 qdev_alias_all_properties(vdev, proxy_obj); 1154 } 1155 1156 void virtio_init(VirtIODevice *vdev, const char *name, 1157 uint16_t device_id, size_t config_size) 1158 { 1159 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); 1160 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1161 int i; 1162 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; 1163 1164 if (nvectors) { 1165 vdev->vector_queues = 1166 g_malloc0(sizeof(*vdev->vector_queues) * nvectors); 1167 } 1168 1169 vdev->device_id = device_id; 1170 vdev->status = 0; 1171 vdev->isr = 0; 1172 vdev->queue_sel = 0; 1173 vdev->config_vector = VIRTIO_NO_VECTOR; 1174 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); 1175 vdev->vm_running = runstate_is_running(); 1176 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { 1177 vdev->vq[i].vector = VIRTIO_NO_VECTOR; 1178 vdev->vq[i].vdev = vdev; 1179 vdev->vq[i].queue_index = i; 1180 } 1181 1182 vdev->name = name; 1183 vdev->config_len = config_size; 1184 if (vdev->config_len) { 1185 vdev->config = g_malloc0(config_size); 1186 } else { 1187 vdev->config = NULL; 1188 } 1189 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, 1190 vdev); 1191 vdev->device_endian = virtio_default_endian(); 1192 } 1193 1194 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) 1195 { 1196 return vdev->vq[n].vring.desc; 1197 } 1198 1199 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) 1200 { 1201 return vdev->vq[n].vring.avail; 1202 } 1203 1204 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) 1205 { 1206 return vdev->vq[n].vring.used; 1207 } 1208 1209 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n) 1210 { 1211 return vdev->vq[n].vring.desc; 1212 } 1213 1214 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) 1215 { 1216 return sizeof(VRingDesc) * vdev->vq[n].vring.num; 1217 } 1218 1219 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) 1220 { 1221 return offsetof(VRingAvail, ring) + 1222 sizeof(uint64_t) * vdev->vq[n].vring.num; 1223 } 1224 1225 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) 1226 { 1227 return offsetof(VRingUsed, ring) + 1228 sizeof(VRingUsedElem) * vdev->vq[n].vring.num; 1229 } 1230 1231 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n) 1232 { 1233 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc + 1234 virtio_queue_get_used_size(vdev, n); 1235 } 1236 1237 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) 1238 { 1239 return vdev->vq[n].last_avail_idx; 1240 } 1241 1242 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) 1243 { 1244 vdev->vq[n].last_avail_idx = idx; 1245 } 1246 1247 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) 1248 { 1249 vdev->vq[n].signalled_used_valid = false; 1250 } 1251 1252 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) 1253 { 1254 return vdev->vq + n; 1255 } 1256 1257 uint16_t virtio_get_queue_index(VirtQueue *vq) 1258 { 1259 return vq->queue_index; 1260 } 1261 1262 static void virtio_queue_guest_notifier_read(EventNotifier *n) 1263 { 1264 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); 1265 if (event_notifier_test_and_clear(n)) { 1266 virtio_irq(vq); 1267 } 1268 } 1269 1270 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, 1271 bool with_irqfd) 1272 { 1273 if (assign && !with_irqfd) { 1274 event_notifier_set_handler(&vq->guest_notifier, 1275 virtio_queue_guest_notifier_read); 1276 } else { 1277 event_notifier_set_handler(&vq->guest_notifier, NULL); 1278 } 1279 if (!assign) { 1280 /* Test and clear notifier before closing it, 1281 * in case poll callback didn't have time to run. */ 1282 virtio_queue_guest_notifier_read(&vq->guest_notifier); 1283 } 1284 } 1285 1286 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) 1287 { 1288 return &vq->guest_notifier; 1289 } 1290 1291 static void virtio_queue_host_notifier_read(EventNotifier *n) 1292 { 1293 VirtQueue *vq = container_of(n, VirtQueue, host_notifier); 1294 if (event_notifier_test_and_clear(n)) { 1295 virtio_queue_notify_vq(vq); 1296 } 1297 } 1298 1299 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign, 1300 bool set_handler) 1301 { 1302 if (assign && set_handler) { 1303 event_notifier_set_handler(&vq->host_notifier, 1304 virtio_queue_host_notifier_read); 1305 } else { 1306 event_notifier_set_handler(&vq->host_notifier, NULL); 1307 } 1308 if (!assign) { 1309 /* Test and clear notifier before after disabling event, 1310 * in case poll callback didn't have time to run. */ 1311 virtio_queue_host_notifier_read(&vq->host_notifier); 1312 } 1313 } 1314 1315 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) 1316 { 1317 return &vq->host_notifier; 1318 } 1319 1320 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) 1321 { 1322 g_free(vdev->bus_name); 1323 vdev->bus_name = g_strdup(bus_name); 1324 } 1325 1326 static void virtio_device_realize(DeviceState *dev, Error **errp) 1327 { 1328 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1329 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1330 Error *err = NULL; 1331 1332 if (vdc->realize != NULL) { 1333 vdc->realize(dev, &err); 1334 if (err != NULL) { 1335 error_propagate(errp, err); 1336 return; 1337 } 1338 } 1339 1340 virtio_bus_device_plugged(vdev, &err); 1341 if (err != NULL) { 1342 error_propagate(errp, err); 1343 return; 1344 } 1345 } 1346 1347 static void virtio_device_unrealize(DeviceState *dev, Error **errp) 1348 { 1349 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1350 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); 1351 Error *err = NULL; 1352 1353 virtio_bus_device_unplugged(vdev); 1354 1355 if (vdc->unrealize != NULL) { 1356 vdc->unrealize(dev, &err); 1357 if (err != NULL) { 1358 error_propagate(errp, err); 1359 return; 1360 } 1361 } 1362 1363 g_free(vdev->bus_name); 1364 vdev->bus_name = NULL; 1365 } 1366 1367 static Property virtio_properties[] = { 1368 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), 1369 DEFINE_PROP_END_OF_LIST(), 1370 }; 1371 1372 static void virtio_device_class_init(ObjectClass *klass, void *data) 1373 { 1374 /* Set the default value here. */ 1375 DeviceClass *dc = DEVICE_CLASS(klass); 1376 1377 dc->realize = virtio_device_realize; 1378 dc->unrealize = virtio_device_unrealize; 1379 dc->bus_type = TYPE_VIRTIO_BUS; 1380 dc->props = virtio_properties; 1381 } 1382 1383 static const TypeInfo virtio_device_info = { 1384 .name = TYPE_VIRTIO_DEVICE, 1385 .parent = TYPE_DEVICE, 1386 .instance_size = sizeof(VirtIODevice), 1387 .class_init = virtio_device_class_init, 1388 .abstract = true, 1389 .class_size = sizeof(VirtioDeviceClass), 1390 }; 1391 1392 static void virtio_register_types(void) 1393 { 1394 type_register_static(&virtio_device_info); 1395 } 1396 1397 type_init(virtio_register_types) 1398