xref: /openbmc/qemu/hw/virtio/virtio.c (revision be9f8a08727e46c790adb8caa8a4525a1e8e9e73)
1 /*
2  * Virtio Support
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include <inttypes.h>
15 
16 #include "trace.h"
17 #include "exec/address-spaces.h"
18 #include "qemu/error-report.h"
19 #include "hw/virtio/virtio.h"
20 #include "qemu/atomic.h"
21 #include "hw/virtio/virtio-bus.h"
22 #include "migration/migration.h"
23 #include "hw/virtio/virtio-access.h"
24 
25 /*
26  * The alignment to use between consumer and producer parts of vring.
27  * x86 pagesize again. This is the default, used by transports like PCI
28  * which don't provide a means for the guest to tell the host the alignment.
29  */
30 #define VIRTIO_PCI_VRING_ALIGN         4096
31 
32 typedef struct VRingDesc
33 {
34     uint64_t addr;
35     uint32_t len;
36     uint16_t flags;
37     uint16_t next;
38 } VRingDesc;
39 
40 typedef struct VRingAvail
41 {
42     uint16_t flags;
43     uint16_t idx;
44     uint16_t ring[0];
45 } VRingAvail;
46 
47 typedef struct VRingUsedElem
48 {
49     uint32_t id;
50     uint32_t len;
51 } VRingUsedElem;
52 
53 typedef struct VRingUsed
54 {
55     uint16_t flags;
56     uint16_t idx;
57     VRingUsedElem ring[0];
58 } VRingUsed;
59 
60 typedef struct VRing
61 {
62     unsigned int num;
63     unsigned int align;
64     hwaddr desc;
65     hwaddr avail;
66     hwaddr used;
67 } VRing;
68 
69 struct VirtQueue
70 {
71     VRing vring;
72     hwaddr pa;
73     uint16_t last_avail_idx;
74     /* Last used index value we have signalled on */
75     uint16_t signalled_used;
76 
77     /* Last used index value we have signalled on */
78     bool signalled_used_valid;
79 
80     /* Notification enabled? */
81     bool notification;
82 
83     uint16_t queue_index;
84 
85     int inuse;
86 
87     uint16_t vector;
88     void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
89     VirtIODevice *vdev;
90     EventNotifier guest_notifier;
91     EventNotifier host_notifier;
92     QLIST_ENTRY(VirtQueue) node;
93 };
94 
95 /* virt queue functions */
96 static void virtqueue_init(VirtQueue *vq)
97 {
98     hwaddr pa = vq->pa;
99 
100     vq->vring.desc = pa;
101     vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
102     vq->vring.used = vring_align(vq->vring.avail +
103                                  offsetof(VRingAvail, ring[vq->vring.num]),
104                                  vq->vring.align);
105 }
106 
107 static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa,
108                                        int i)
109 {
110     hwaddr pa;
111     pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
112     return virtio_ldq_phys(vdev, pa);
113 }
114 
115 static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i)
116 {
117     hwaddr pa;
118     pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
119     return virtio_ldl_phys(vdev, pa);
120 }
121 
122 static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa,
123                                         int i)
124 {
125     hwaddr pa;
126     pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
127     return virtio_lduw_phys(vdev, pa);
128 }
129 
130 static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa,
131                                        int i)
132 {
133     hwaddr pa;
134     pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
135     return virtio_lduw_phys(vdev, pa);
136 }
137 
138 static inline uint16_t vring_avail_flags(VirtQueue *vq)
139 {
140     hwaddr pa;
141     pa = vq->vring.avail + offsetof(VRingAvail, flags);
142     return virtio_lduw_phys(vq->vdev, pa);
143 }
144 
145 static inline uint16_t vring_avail_idx(VirtQueue *vq)
146 {
147     hwaddr pa;
148     pa = vq->vring.avail + offsetof(VRingAvail, idx);
149     return virtio_lduw_phys(vq->vdev, pa);
150 }
151 
152 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
153 {
154     hwaddr pa;
155     pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
156     return virtio_lduw_phys(vq->vdev, pa);
157 }
158 
159 static inline uint16_t vring_get_used_event(VirtQueue *vq)
160 {
161     return vring_avail_ring(vq, vq->vring.num);
162 }
163 
164 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
165 {
166     hwaddr pa;
167     pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
168     virtio_stl_phys(vq->vdev, pa, val);
169 }
170 
171 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
172 {
173     hwaddr pa;
174     pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
175     virtio_stl_phys(vq->vdev, pa, val);
176 }
177 
178 static uint16_t vring_used_idx(VirtQueue *vq)
179 {
180     hwaddr pa;
181     pa = vq->vring.used + offsetof(VRingUsed, idx);
182     return virtio_lduw_phys(vq->vdev, pa);
183 }
184 
185 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
186 {
187     hwaddr pa;
188     pa = vq->vring.used + offsetof(VRingUsed, idx);
189     virtio_stw_phys(vq->vdev, pa, val);
190 }
191 
192 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
193 {
194     VirtIODevice *vdev = vq->vdev;
195     hwaddr pa;
196     pa = vq->vring.used + offsetof(VRingUsed, flags);
197     virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
198 }
199 
200 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
201 {
202     VirtIODevice *vdev = vq->vdev;
203     hwaddr pa;
204     pa = vq->vring.used + offsetof(VRingUsed, flags);
205     virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
206 }
207 
208 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
209 {
210     hwaddr pa;
211     if (!vq->notification) {
212         return;
213     }
214     pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
215     virtio_stw_phys(vq->vdev, pa, val);
216 }
217 
218 void virtio_queue_set_notification(VirtQueue *vq, int enable)
219 {
220     vq->notification = enable;
221     if (virtio_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
222         vring_set_avail_event(vq, vring_avail_idx(vq));
223     } else if (enable) {
224         vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
225     } else {
226         vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
227     }
228     if (enable) {
229         /* Expose avail event/used flags before caller checks the avail idx. */
230         smp_mb();
231     }
232 }
233 
234 int virtio_queue_ready(VirtQueue *vq)
235 {
236     return vq->vring.avail != 0;
237 }
238 
239 int virtio_queue_empty(VirtQueue *vq)
240 {
241     return vring_avail_idx(vq) == vq->last_avail_idx;
242 }
243 
244 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
245                     unsigned int len, unsigned int idx)
246 {
247     unsigned int offset;
248     int i;
249 
250     trace_virtqueue_fill(vq, elem, len, idx);
251 
252     offset = 0;
253     for (i = 0; i < elem->in_num; i++) {
254         size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
255 
256         cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
257                                   elem->in_sg[i].iov_len,
258                                   1, size);
259 
260         offset += size;
261     }
262 
263     for (i = 0; i < elem->out_num; i++)
264         cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
265                                   elem->out_sg[i].iov_len,
266                                   0, elem->out_sg[i].iov_len);
267 
268     idx = (idx + vring_used_idx(vq)) % vq->vring.num;
269 
270     /* Get a pointer to the next entry in the used ring. */
271     vring_used_ring_id(vq, idx, elem->index);
272     vring_used_ring_len(vq, idx, len);
273 }
274 
275 void virtqueue_flush(VirtQueue *vq, unsigned int count)
276 {
277     uint16_t old, new;
278     /* Make sure buffer is written before we update index. */
279     smp_wmb();
280     trace_virtqueue_flush(vq, count);
281     old = vring_used_idx(vq);
282     new = old + count;
283     vring_used_idx_set(vq, new);
284     vq->inuse -= count;
285     if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
286         vq->signalled_used_valid = false;
287 }
288 
289 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
290                     unsigned int len)
291 {
292     virtqueue_fill(vq, elem, len, 0);
293     virtqueue_flush(vq, 1);
294 }
295 
296 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
297 {
298     uint16_t num_heads = vring_avail_idx(vq) - idx;
299 
300     /* Check it isn't doing very strange things with descriptor numbers. */
301     if (num_heads > vq->vring.num) {
302         error_report("Guest moved used index from %u to %u",
303                      idx, vring_avail_idx(vq));
304         exit(1);
305     }
306     /* On success, callers read a descriptor at vq->last_avail_idx.
307      * Make sure descriptor read does not bypass avail index read. */
308     if (num_heads) {
309         smp_rmb();
310     }
311 
312     return num_heads;
313 }
314 
315 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
316 {
317     unsigned int head;
318 
319     /* Grab the next descriptor number they're advertising, and increment
320      * the index we've seen. */
321     head = vring_avail_ring(vq, idx % vq->vring.num);
322 
323     /* If their number is silly, that's a fatal mistake. */
324     if (head >= vq->vring.num) {
325         error_report("Guest says index %u is available", head);
326         exit(1);
327     }
328 
329     return head;
330 }
331 
332 static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa,
333                                     unsigned int i, unsigned int max)
334 {
335     unsigned int next;
336 
337     /* If this descriptor says it doesn't chain, we're done. */
338     if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) {
339         return max;
340     }
341 
342     /* Check they're not leading us off end of descriptors. */
343     next = vring_desc_next(vdev, desc_pa, i);
344     /* Make sure compiler knows to grab that: we don't want it changing! */
345     smp_wmb();
346 
347     if (next >= max) {
348         error_report("Desc next is %u", next);
349         exit(1);
350     }
351 
352     return next;
353 }
354 
355 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
356                                unsigned int *out_bytes,
357                                unsigned max_in_bytes, unsigned max_out_bytes)
358 {
359     unsigned int idx;
360     unsigned int total_bufs, in_total, out_total;
361 
362     idx = vq->last_avail_idx;
363 
364     total_bufs = in_total = out_total = 0;
365     while (virtqueue_num_heads(vq, idx)) {
366         VirtIODevice *vdev = vq->vdev;
367         unsigned int max, num_bufs, indirect = 0;
368         hwaddr desc_pa;
369         int i;
370 
371         max = vq->vring.num;
372         num_bufs = total_bufs;
373         i = virtqueue_get_head(vq, idx++);
374         desc_pa = vq->vring.desc;
375 
376         if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) {
377             if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) {
378                 error_report("Invalid size for indirect buffer table");
379                 exit(1);
380             }
381 
382             /* If we've got too many, that implies a descriptor loop. */
383             if (num_bufs >= max) {
384                 error_report("Looped descriptor");
385                 exit(1);
386             }
387 
388             /* loop over the indirect descriptor table */
389             indirect = 1;
390             max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc);
391             desc_pa = vring_desc_addr(vdev, desc_pa, i);
392             num_bufs = i = 0;
393         }
394 
395         do {
396             /* If we've got too many, that implies a descriptor loop. */
397             if (++num_bufs > max) {
398                 error_report("Looped descriptor");
399                 exit(1);
400             }
401 
402             if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
403                 in_total += vring_desc_len(vdev, desc_pa, i);
404             } else {
405                 out_total += vring_desc_len(vdev, desc_pa, i);
406             }
407             if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
408                 goto done;
409             }
410         } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
411 
412         if (!indirect)
413             total_bufs = num_bufs;
414         else
415             total_bufs++;
416     }
417 done:
418     if (in_bytes) {
419         *in_bytes = in_total;
420     }
421     if (out_bytes) {
422         *out_bytes = out_total;
423     }
424 }
425 
426 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
427                           unsigned int out_bytes)
428 {
429     unsigned int in_total, out_total;
430 
431     virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
432     return in_bytes <= in_total && out_bytes <= out_total;
433 }
434 
435 void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
436     size_t num_sg, int is_write)
437 {
438     unsigned int i;
439     hwaddr len;
440 
441     if (num_sg > VIRTQUEUE_MAX_SIZE) {
442         error_report("virtio: map attempt out of bounds: %zd > %d",
443                      num_sg, VIRTQUEUE_MAX_SIZE);
444         exit(1);
445     }
446 
447     for (i = 0; i < num_sg; i++) {
448         len = sg[i].iov_len;
449         sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
450         if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
451             error_report("virtio: error trying to map MMIO memory");
452             exit(1);
453         }
454     }
455 }
456 
457 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
458 {
459     unsigned int i, head, max;
460     hwaddr desc_pa = vq->vring.desc;
461     VirtIODevice *vdev = vq->vdev;
462 
463     if (!virtqueue_num_heads(vq, vq->last_avail_idx))
464         return 0;
465 
466     /* When we start there are none of either input nor output. */
467     elem->out_num = elem->in_num = 0;
468 
469     max = vq->vring.num;
470 
471     i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
472     if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
473         vring_set_avail_event(vq, vq->last_avail_idx);
474     }
475 
476     if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) {
477         if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) {
478             error_report("Invalid size for indirect buffer table");
479             exit(1);
480         }
481 
482         /* loop over the indirect descriptor table */
483         max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc);
484         desc_pa = vring_desc_addr(vdev, desc_pa, i);
485         i = 0;
486     }
487 
488     /* Collect all the descriptors */
489     do {
490         struct iovec *sg;
491 
492         if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
493             if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
494                 error_report("Too many write descriptors in indirect table");
495                 exit(1);
496             }
497             elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i);
498             sg = &elem->in_sg[elem->in_num++];
499         } else {
500             if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
501                 error_report("Too many read descriptors in indirect table");
502                 exit(1);
503             }
504             elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i);
505             sg = &elem->out_sg[elem->out_num++];
506         }
507 
508         sg->iov_len = vring_desc_len(vdev, desc_pa, i);
509 
510         /* If we've got too many, that implies a descriptor loop. */
511         if ((elem->in_num + elem->out_num) > max) {
512             error_report("Looped descriptor");
513             exit(1);
514         }
515     } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
516 
517     /* Now map what we have collected */
518     virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
519     virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
520 
521     elem->index = head;
522 
523     vq->inuse++;
524 
525     trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
526     return elem->in_num + elem->out_num;
527 }
528 
529 /* virtio device */
530 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
531 {
532     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
533     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
534 
535     if (k->notify) {
536         k->notify(qbus->parent, vector);
537     }
538 }
539 
540 void virtio_update_irq(VirtIODevice *vdev)
541 {
542     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
543 }
544 
545 void virtio_set_status(VirtIODevice *vdev, uint8_t val)
546 {
547     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
548     trace_virtio_set_status(vdev, val);
549 
550     if (k->set_status) {
551         k->set_status(vdev, val);
552     }
553     vdev->status = val;
554 }
555 
556 bool target_words_bigendian(void);
557 static enum virtio_device_endian virtio_default_endian(void)
558 {
559     if (target_words_bigendian()) {
560         return VIRTIO_DEVICE_ENDIAN_BIG;
561     } else {
562         return VIRTIO_DEVICE_ENDIAN_LITTLE;
563     }
564 }
565 
566 static enum virtio_device_endian virtio_current_cpu_endian(void)
567 {
568     CPUClass *cc = CPU_GET_CLASS(current_cpu);
569 
570     if (cc->virtio_is_big_endian(current_cpu)) {
571         return VIRTIO_DEVICE_ENDIAN_BIG;
572     } else {
573         return VIRTIO_DEVICE_ENDIAN_LITTLE;
574     }
575 }
576 
577 void virtio_reset(void *opaque)
578 {
579     VirtIODevice *vdev = opaque;
580     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
581     int i;
582 
583     virtio_set_status(vdev, 0);
584     if (current_cpu) {
585         /* Guest initiated reset */
586         vdev->device_endian = virtio_current_cpu_endian();
587     } else {
588         /* System reset */
589         vdev->device_endian = virtio_default_endian();
590     }
591 
592     if (k->reset) {
593         k->reset(vdev);
594     }
595 
596     vdev->guest_features = 0;
597     vdev->queue_sel = 0;
598     vdev->status = 0;
599     vdev->isr = 0;
600     vdev->config_vector = VIRTIO_NO_VECTOR;
601     virtio_notify_vector(vdev, vdev->config_vector);
602 
603     for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
604         vdev->vq[i].vring.desc = 0;
605         vdev->vq[i].vring.avail = 0;
606         vdev->vq[i].vring.used = 0;
607         vdev->vq[i].last_avail_idx = 0;
608         vdev->vq[i].pa = 0;
609         virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
610         vdev->vq[i].signalled_used = 0;
611         vdev->vq[i].signalled_used_valid = false;
612         vdev->vq[i].notification = true;
613     }
614 }
615 
616 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
617 {
618     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
619     uint8_t val;
620 
621     if (addr + sizeof(val) > vdev->config_len) {
622         return (uint32_t)-1;
623     }
624 
625     k->get_config(vdev, vdev->config);
626 
627     val = ldub_p(vdev->config + addr);
628     return val;
629 }
630 
631 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
632 {
633     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
634     uint16_t val;
635 
636     if (addr + sizeof(val) > vdev->config_len) {
637         return (uint32_t)-1;
638     }
639 
640     k->get_config(vdev, vdev->config);
641 
642     val = lduw_p(vdev->config + addr);
643     return val;
644 }
645 
646 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
647 {
648     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
649     uint32_t val;
650 
651     if (addr + sizeof(val) > vdev->config_len) {
652         return (uint32_t)-1;
653     }
654 
655     k->get_config(vdev, vdev->config);
656 
657     val = ldl_p(vdev->config + addr);
658     return val;
659 }
660 
661 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
662 {
663     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
664     uint8_t val = data;
665 
666     if (addr + sizeof(val) > vdev->config_len) {
667         return;
668     }
669 
670     stb_p(vdev->config + addr, val);
671 
672     if (k->set_config) {
673         k->set_config(vdev, vdev->config);
674     }
675 }
676 
677 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
678 {
679     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
680     uint16_t val = data;
681 
682     if (addr + sizeof(val) > vdev->config_len) {
683         return;
684     }
685 
686     stw_p(vdev->config + addr, val);
687 
688     if (k->set_config) {
689         k->set_config(vdev, vdev->config);
690     }
691 }
692 
693 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
694 {
695     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
696     uint32_t val = data;
697 
698     if (addr + sizeof(val) > vdev->config_len) {
699         return;
700     }
701 
702     stl_p(vdev->config + addr, val);
703 
704     if (k->set_config) {
705         k->set_config(vdev, vdev->config);
706     }
707 }
708 
709 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
710 {
711     vdev->vq[n].pa = addr;
712     virtqueue_init(&vdev->vq[n]);
713 }
714 
715 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
716 {
717     return vdev->vq[n].pa;
718 }
719 
720 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
721 {
722     /* Don't allow guest to flip queue between existent and
723      * nonexistent states, or to set it to an invalid size.
724      */
725     if (!!num != !!vdev->vq[n].vring.num ||
726         num > VIRTQUEUE_MAX_SIZE ||
727         num < 0) {
728         return;
729     }
730     vdev->vq[n].vring.num = num;
731     virtqueue_init(&vdev->vq[n]);
732 }
733 
734 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
735 {
736     return QLIST_FIRST(&vdev->vector_queues[vector]);
737 }
738 
739 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
740 {
741     return QLIST_NEXT(vq, node);
742 }
743 
744 int virtio_queue_get_num(VirtIODevice *vdev, int n)
745 {
746     return vdev->vq[n].vring.num;
747 }
748 
749 int virtio_get_num_queues(VirtIODevice *vdev)
750 {
751     int i;
752 
753     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
754         if (!virtio_queue_get_num(vdev, i)) {
755             break;
756         }
757     }
758 
759     return i;
760 }
761 
762 int virtio_queue_get_id(VirtQueue *vq)
763 {
764     VirtIODevice *vdev = vq->vdev;
765     assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]);
766     return vq - &vdev->vq[0];
767 }
768 
769 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
770 {
771     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
772     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
773 
774     /* Check that the transport told us it was going to do this
775      * (so a buggy transport will immediately assert rather than
776      * silently failing to migrate this state)
777      */
778     assert(k->has_variable_vring_alignment);
779 
780     vdev->vq[n].vring.align = align;
781     virtqueue_init(&vdev->vq[n]);
782 }
783 
784 void virtio_queue_notify_vq(VirtQueue *vq)
785 {
786     if (vq->vring.desc && vq->handle_output) {
787         VirtIODevice *vdev = vq->vdev;
788 
789         trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
790         vq->handle_output(vdev, vq);
791     }
792 }
793 
794 void virtio_queue_notify(VirtIODevice *vdev, int n)
795 {
796     virtio_queue_notify_vq(&vdev->vq[n]);
797 }
798 
799 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
800 {
801     return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
802         VIRTIO_NO_VECTOR;
803 }
804 
805 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
806 {
807     VirtQueue *vq = &vdev->vq[n];
808 
809     if (n < VIRTIO_QUEUE_MAX) {
810         if (vdev->vector_queues &&
811             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
812             QLIST_REMOVE(vq, node);
813         }
814         vdev->vq[n].vector = vector;
815         if (vdev->vector_queues &&
816             vector != VIRTIO_NO_VECTOR) {
817             QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
818         }
819     }
820 }
821 
822 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
823                             void (*handle_output)(VirtIODevice *, VirtQueue *))
824 {
825     int i;
826 
827     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
828         if (vdev->vq[i].vring.num == 0)
829             break;
830     }
831 
832     if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
833         abort();
834 
835     vdev->vq[i].vring.num = queue_size;
836     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
837     vdev->vq[i].handle_output = handle_output;
838 
839     return &vdev->vq[i];
840 }
841 
842 void virtio_del_queue(VirtIODevice *vdev, int n)
843 {
844     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
845         abort();
846     }
847 
848     vdev->vq[n].vring.num = 0;
849 }
850 
851 void virtio_irq(VirtQueue *vq)
852 {
853     trace_virtio_irq(vq);
854     vq->vdev->isr |= 0x01;
855     virtio_notify_vector(vq->vdev, vq->vector);
856 }
857 
858 static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
859 {
860     uint16_t old, new;
861     bool v;
862     /* We need to expose used array entries before checking used event. */
863     smp_mb();
864     /* Always notify when queue is empty (when feature acknowledge) */
865     if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
866         !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) {
867         return true;
868     }
869 
870     if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
871         return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
872     }
873 
874     v = vq->signalled_used_valid;
875     vq->signalled_used_valid = true;
876     old = vq->signalled_used;
877     new = vq->signalled_used = vring_used_idx(vq);
878     return !v || vring_need_event(vring_get_used_event(vq), new, old);
879 }
880 
881 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
882 {
883     if (!vring_notify(vdev, vq)) {
884         return;
885     }
886 
887     trace_virtio_notify(vdev, vq);
888     vdev->isr |= 0x01;
889     virtio_notify_vector(vdev, vq->vector);
890 }
891 
892 void virtio_notify_config(VirtIODevice *vdev)
893 {
894     if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
895         return;
896 
897     vdev->isr |= 0x03;
898     virtio_notify_vector(vdev, vdev->config_vector);
899 }
900 
901 static bool virtio_device_endian_needed(void *opaque)
902 {
903     VirtIODevice *vdev = opaque;
904 
905     assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
906     return vdev->device_endian != virtio_default_endian();
907 }
908 
909 static bool virtio_64bit_features_needed(void *opaque)
910 {
911     VirtIODevice *vdev = opaque;
912 
913     return (vdev->host_features >> 32) != 0;
914 }
915 
916 static const VMStateDescription vmstate_virtio_device_endian = {
917     .name = "virtio/device_endian",
918     .version_id = 1,
919     .minimum_version_id = 1,
920     .fields = (VMStateField[]) {
921         VMSTATE_UINT8(device_endian, VirtIODevice),
922         VMSTATE_END_OF_LIST()
923     }
924 };
925 
926 static const VMStateDescription vmstate_virtio_64bit_features = {
927     .name = "virtio/64bit_features",
928     .version_id = 1,
929     .minimum_version_id = 1,
930     .fields = (VMStateField[]) {
931         VMSTATE_UINT64(guest_features, VirtIODevice),
932         VMSTATE_END_OF_LIST()
933     }
934 };
935 
936 static const VMStateDescription vmstate_virtio = {
937     .name = "virtio",
938     .version_id = 1,
939     .minimum_version_id = 1,
940     .minimum_version_id_old = 1,
941     .fields = (VMStateField[]) {
942         VMSTATE_END_OF_LIST()
943     },
944     .subsections = (VMStateSubsection[]) {
945         {
946             .vmsd = &vmstate_virtio_device_endian,
947             .needed = &virtio_device_endian_needed
948         },
949         {
950             .vmsd = &vmstate_virtio_64bit_features,
951             .needed = &virtio_64bit_features_needed
952         },
953         { 0 }
954     }
955 };
956 
957 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
958 {
959     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
960     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
961     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
962     uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
963     int i;
964 
965     if (k->save_config) {
966         k->save_config(qbus->parent, f);
967     }
968 
969     qemu_put_8s(f, &vdev->status);
970     qemu_put_8s(f, &vdev->isr);
971     qemu_put_be16s(f, &vdev->queue_sel);
972     qemu_put_be32s(f, &guest_features_lo);
973     qemu_put_be32(f, vdev->config_len);
974     qemu_put_buffer(f, vdev->config, vdev->config_len);
975 
976     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
977         if (vdev->vq[i].vring.num == 0)
978             break;
979     }
980 
981     qemu_put_be32(f, i);
982 
983     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
984         if (vdev->vq[i].vring.num == 0)
985             break;
986 
987         qemu_put_be32(f, vdev->vq[i].vring.num);
988         if (k->has_variable_vring_alignment) {
989             qemu_put_be32(f, vdev->vq[i].vring.align);
990         }
991         qemu_put_be64(f, vdev->vq[i].pa);
992         qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
993         if (k->save_queue) {
994             k->save_queue(qbus->parent, i, f);
995         }
996     }
997 
998     if (vdc->save != NULL) {
999         vdc->save(vdev, f);
1000     }
1001 
1002     /* Subsections */
1003     vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1004 }
1005 
1006 int virtio_set_features(VirtIODevice *vdev, uint32_t val)
1007 {
1008     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1009     bool bad = (val & ~(vdev->host_features)) != 0;
1010 
1011     val &= vdev->host_features;
1012     if (k->set_features) {
1013         k->set_features(vdev, val);
1014     }
1015     vdev->guest_features = val;
1016     return bad ? -1 : 0;
1017 }
1018 
1019 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
1020 {
1021     int i, ret;
1022     int32_t config_len;
1023     uint32_t num;
1024     uint32_t features;
1025     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1026     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1027     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1028 
1029     /*
1030      * We poison the endianness to ensure it does not get used before
1031      * subsections have been loaded.
1032      */
1033     vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
1034 
1035     if (k->load_config) {
1036         ret = k->load_config(qbus->parent, f);
1037         if (ret)
1038             return ret;
1039     }
1040 
1041     qemu_get_8s(f, &vdev->status);
1042     qemu_get_8s(f, &vdev->isr);
1043     qemu_get_be16s(f, &vdev->queue_sel);
1044     if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
1045         return -1;
1046     }
1047     qemu_get_be32s(f, &features);
1048 
1049     config_len = qemu_get_be32(f);
1050 
1051     /*
1052      * There are cases where the incoming config can be bigger or smaller
1053      * than what we have; so load what we have space for, and skip
1054      * any excess that's in the stream.
1055      */
1056     qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
1057 
1058     while (config_len > vdev->config_len) {
1059         qemu_get_byte(f);
1060         config_len--;
1061     }
1062 
1063     num = qemu_get_be32(f);
1064 
1065     if (num > VIRTIO_QUEUE_MAX) {
1066         error_report("Invalid number of PCI queues: 0x%x", num);
1067         return -1;
1068     }
1069 
1070     for (i = 0; i < num; i++) {
1071         vdev->vq[i].vring.num = qemu_get_be32(f);
1072         if (k->has_variable_vring_alignment) {
1073             vdev->vq[i].vring.align = qemu_get_be32(f);
1074         }
1075         vdev->vq[i].pa = qemu_get_be64(f);
1076         qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
1077         vdev->vq[i].signalled_used_valid = false;
1078         vdev->vq[i].notification = true;
1079 
1080         if (vdev->vq[i].pa) {
1081             virtqueue_init(&vdev->vq[i]);
1082         } else if (vdev->vq[i].last_avail_idx) {
1083             error_report("VQ %d address 0x0 "
1084                          "inconsistent with Host index 0x%x",
1085                          i, vdev->vq[i].last_avail_idx);
1086                 return -1;
1087 	}
1088         if (k->load_queue) {
1089             ret = k->load_queue(qbus->parent, i, f);
1090             if (ret)
1091                 return ret;
1092         }
1093     }
1094 
1095     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1096 
1097     if (vdc->load != NULL) {
1098         ret = vdc->load(vdev, f, version_id);
1099         if (ret) {
1100             return ret;
1101         }
1102     }
1103 
1104     /* Subsections */
1105     ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
1106     if (ret) {
1107         return ret;
1108     }
1109 
1110     if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
1111         vdev->device_endian = virtio_default_endian();
1112     }
1113 
1114     if (virtio_64bit_features_needed(vdev)) {
1115         /*
1116          * Subsection load filled vdev->guest_features.  Run them
1117          * through virtio_set_features to sanity-check them against
1118          * host_features.
1119          */
1120         uint64_t features64 = vdev->guest_features;
1121         if (virtio_set_features(vdev, features64) < 0) {
1122             error_report("Features 0x%" PRIx64 " unsupported. "
1123                          "Allowed features: 0x%" PRIx64,
1124                          features64, vdev->host_features);
1125             return -1;
1126         }
1127     } else {
1128         if (virtio_set_features(vdev, features) < 0) {
1129             error_report("Features 0x%x unsupported. "
1130                          "Allowed features: 0x%" PRIx64,
1131                          features, vdev->host_features);
1132             return -1;
1133         }
1134     }
1135 
1136     for (i = 0; i < num; i++) {
1137         if (vdev->vq[i].pa) {
1138             uint16_t nheads;
1139             nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
1140             /* Check it isn't doing strange things with descriptor numbers. */
1141             if (nheads > vdev->vq[i].vring.num) {
1142                 error_report("VQ %d size 0x%x Guest index 0x%x "
1143                              "inconsistent with Host index 0x%x: delta 0x%x",
1144                              i, vdev->vq[i].vring.num,
1145                              vring_avail_idx(&vdev->vq[i]),
1146                              vdev->vq[i].last_avail_idx, nheads);
1147                 return -1;
1148             }
1149         }
1150     }
1151 
1152     return 0;
1153 }
1154 
1155 void virtio_cleanup(VirtIODevice *vdev)
1156 {
1157     qemu_del_vm_change_state_handler(vdev->vmstate);
1158     g_free(vdev->config);
1159     g_free(vdev->vq);
1160     g_free(vdev->vector_queues);
1161 }
1162 
1163 static void virtio_vmstate_change(void *opaque, int running, RunState state)
1164 {
1165     VirtIODevice *vdev = opaque;
1166     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1167     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1168     bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
1169     vdev->vm_running = running;
1170 
1171     if (backend_run) {
1172         virtio_set_status(vdev, vdev->status);
1173     }
1174 
1175     if (k->vmstate_change) {
1176         k->vmstate_change(qbus->parent, backend_run);
1177     }
1178 
1179     if (!backend_run) {
1180         virtio_set_status(vdev, vdev->status);
1181     }
1182 }
1183 
1184 void virtio_instance_init_common(Object *proxy_obj, void *data,
1185                                  size_t vdev_size, const char *vdev_name)
1186 {
1187     DeviceState *vdev = data;
1188 
1189     object_initialize(vdev, vdev_size, vdev_name);
1190     object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
1191     object_unref(OBJECT(vdev));
1192     qdev_alias_all_properties(vdev, proxy_obj);
1193 }
1194 
1195 void virtio_init(VirtIODevice *vdev, const char *name,
1196                  uint16_t device_id, size_t config_size)
1197 {
1198     BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1199     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1200     int i;
1201     int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
1202 
1203     if (nvectors) {
1204         vdev->vector_queues =
1205             g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
1206     }
1207 
1208     vdev->device_id = device_id;
1209     vdev->status = 0;
1210     vdev->isr = 0;
1211     vdev->queue_sel = 0;
1212     vdev->config_vector = VIRTIO_NO_VECTOR;
1213     vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1214     vdev->vm_running = runstate_is_running();
1215     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1216         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1217         vdev->vq[i].vdev = vdev;
1218         vdev->vq[i].queue_index = i;
1219     }
1220 
1221     vdev->name = name;
1222     vdev->config_len = config_size;
1223     if (vdev->config_len) {
1224         vdev->config = g_malloc0(config_size);
1225     } else {
1226         vdev->config = NULL;
1227     }
1228     vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
1229                                                      vdev);
1230     vdev->device_endian = virtio_default_endian();
1231 }
1232 
1233 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1234 {
1235     return vdev->vq[n].vring.desc;
1236 }
1237 
1238 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1239 {
1240     return vdev->vq[n].vring.avail;
1241 }
1242 
1243 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1244 {
1245     return vdev->vq[n].vring.used;
1246 }
1247 
1248 hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1249 {
1250     return vdev->vq[n].vring.desc;
1251 }
1252 
1253 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1254 {
1255     return sizeof(VRingDesc) * vdev->vq[n].vring.num;
1256 }
1257 
1258 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1259 {
1260     return offsetof(VRingAvail, ring) +
1261         sizeof(uint64_t) * vdev->vq[n].vring.num;
1262 }
1263 
1264 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1265 {
1266     return offsetof(VRingUsed, ring) +
1267         sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
1268 }
1269 
1270 hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1271 {
1272     return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
1273 	    virtio_queue_get_used_size(vdev, n);
1274 }
1275 
1276 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
1277 {
1278     return vdev->vq[n].last_avail_idx;
1279 }
1280 
1281 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
1282 {
1283     vdev->vq[n].last_avail_idx = idx;
1284 }
1285 
1286 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
1287 {
1288     vdev->vq[n].signalled_used_valid = false;
1289 }
1290 
1291 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
1292 {
1293     return vdev->vq + n;
1294 }
1295 
1296 uint16_t virtio_get_queue_index(VirtQueue *vq)
1297 {
1298     return vq->queue_index;
1299 }
1300 
1301 static void virtio_queue_guest_notifier_read(EventNotifier *n)
1302 {
1303     VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
1304     if (event_notifier_test_and_clear(n)) {
1305         virtio_irq(vq);
1306     }
1307 }
1308 
1309 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
1310                                                 bool with_irqfd)
1311 {
1312     if (assign && !with_irqfd) {
1313         event_notifier_set_handler(&vq->guest_notifier,
1314                                    virtio_queue_guest_notifier_read);
1315     } else {
1316         event_notifier_set_handler(&vq->guest_notifier, NULL);
1317     }
1318     if (!assign) {
1319         /* Test and clear notifier before closing it,
1320          * in case poll callback didn't have time to run. */
1321         virtio_queue_guest_notifier_read(&vq->guest_notifier);
1322     }
1323 }
1324 
1325 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
1326 {
1327     return &vq->guest_notifier;
1328 }
1329 
1330 static void virtio_queue_host_notifier_read(EventNotifier *n)
1331 {
1332     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
1333     if (event_notifier_test_and_clear(n)) {
1334         virtio_queue_notify_vq(vq);
1335     }
1336 }
1337 
1338 void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
1339                                                bool set_handler)
1340 {
1341     if (assign && set_handler) {
1342         event_notifier_set_handler(&vq->host_notifier,
1343                                    virtio_queue_host_notifier_read);
1344     } else {
1345         event_notifier_set_handler(&vq->host_notifier, NULL);
1346     }
1347     if (!assign) {
1348         /* Test and clear notifier before after disabling event,
1349          * in case poll callback didn't have time to run. */
1350         virtio_queue_host_notifier_read(&vq->host_notifier);
1351     }
1352 }
1353 
1354 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
1355 {
1356     return &vq->host_notifier;
1357 }
1358 
1359 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
1360 {
1361     g_free(vdev->bus_name);
1362     vdev->bus_name = g_strdup(bus_name);
1363 }
1364 
1365 static void virtio_device_realize(DeviceState *dev, Error **errp)
1366 {
1367     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1368     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
1369     Error *err = NULL;
1370 
1371     if (vdc->realize != NULL) {
1372         vdc->realize(dev, &err);
1373         if (err != NULL) {
1374             error_propagate(errp, err);
1375             return;
1376         }
1377     }
1378 
1379     virtio_bus_device_plugged(vdev, &err);
1380     if (err != NULL) {
1381         error_propagate(errp, err);
1382         return;
1383     }
1384 }
1385 
1386 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
1387 {
1388     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1389     VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
1390     Error *err = NULL;
1391 
1392     virtio_bus_device_unplugged(vdev);
1393 
1394     if (vdc->unrealize != NULL) {
1395         vdc->unrealize(dev, &err);
1396         if (err != NULL) {
1397             error_propagate(errp, err);
1398             return;
1399         }
1400     }
1401 
1402     g_free(vdev->bus_name);
1403     vdev->bus_name = NULL;
1404 }
1405 
1406 static Property virtio_properties[] = {
1407     DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
1408     DEFINE_PROP_END_OF_LIST(),
1409 };
1410 
1411 static void virtio_device_class_init(ObjectClass *klass, void *data)
1412 {
1413     /* Set the default value here. */
1414     DeviceClass *dc = DEVICE_CLASS(klass);
1415 
1416     dc->realize = virtio_device_realize;
1417     dc->unrealize = virtio_device_unrealize;
1418     dc->bus_type = TYPE_VIRTIO_BUS;
1419     dc->props = virtio_properties;
1420 }
1421 
1422 static const TypeInfo virtio_device_info = {
1423     .name = TYPE_VIRTIO_DEVICE,
1424     .parent = TYPE_DEVICE,
1425     .instance_size = sizeof(VirtIODevice),
1426     .class_init = virtio_device_class_init,
1427     .abstract = true,
1428     .class_size = sizeof(VirtioDeviceClass),
1429 };
1430 
1431 static void virtio_register_types(void)
1432 {
1433     type_register_static(&virtio_device_info);
1434 }
1435 
1436 type_init(virtio_register_types)
1437