virtio.c (ca0176ad8368668c5ad2b428361652e05984e930) virtio.c (97cd965c070152bc626c7507df9fb356bbe1cd81)
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *

--- 159 unchanged lines hidden (view full) ---

168 }
169 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
170 vring->used = vring_align(vring->avail +
171 offsetof(VRingAvail, ring[vring->num]),
172 vring->align);
173 virtio_init_region_cache(vdev, n);
174}
175
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *

--- 159 unchanged lines hidden (view full) ---

168 }
169 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
170 vring->used = vring_align(vring->avail +
171 offsetof(VRingAvail, ring[vring->num]),
172 vring->align);
173 virtio_init_region_cache(vdev, n);
174}
175
176/* Called within rcu_read_lock(). */
176static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
177 MemoryRegionCache *cache, int i)
178{
179 address_space_read_cached(cache, i * sizeof(VRingDesc),
180 desc, sizeof(VRingDesc));
181 virtio_tswap64s(vdev, &desc->addr);
182 virtio_tswap32s(vdev, &desc->len);
183 virtio_tswap16s(vdev, &desc->flags);
184 virtio_tswap16s(vdev, &desc->next);
185}
186
177static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
178 MemoryRegionCache *cache, int i)
179{
180 address_space_read_cached(cache, i * sizeof(VRingDesc),
181 desc, sizeof(VRingDesc));
182 virtio_tswap64s(vdev, &desc->addr);
183 virtio_tswap32s(vdev, &desc->len);
184 virtio_tswap16s(vdev, &desc->flags);
185 virtio_tswap16s(vdev, &desc->next);
186}
187
188/* Called within rcu_read_lock(). */
187static inline uint16_t vring_avail_flags(VirtQueue *vq)
188{
189static inline uint16_t vring_avail_flags(VirtQueue *vq)
190{
189 hwaddr pa;
190 pa = vq->vring.avail + offsetof(VRingAvail, flags);
191 return virtio_lduw_phys(vq->vdev, pa);
191 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
192 hwaddr pa = offsetof(VRingAvail, flags);
193 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
192}
193
194}
195
196/* Called within rcu_read_lock(). */
194static inline uint16_t vring_avail_idx(VirtQueue *vq)
195{
197static inline uint16_t vring_avail_idx(VirtQueue *vq)
198{
196 hwaddr pa;
197 pa = vq->vring.avail + offsetof(VRingAvail, idx);
198 vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
199 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
200 hwaddr pa = offsetof(VRingAvail, idx);
201 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
199 return vq->shadow_avail_idx;
200}
201
202 return vq->shadow_avail_idx;
203}
204
205/* Called within rcu_read_lock(). */
202static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
203{
206static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
207{
204 hwaddr pa;
205 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
206 return virtio_lduw_phys(vq->vdev, pa);
208 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
209 hwaddr pa = offsetof(VRingAvail, ring[i]);
210 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
207}
208
211}
212
213/* Called within rcu_read_lock(). */
209static inline uint16_t vring_get_used_event(VirtQueue *vq)
210{
211 return vring_avail_ring(vq, vq->vring.num);
212}
213
214static inline uint16_t vring_get_used_event(VirtQueue *vq)
215{
216 return vring_avail_ring(vq, vq->vring.num);
217}
218
219/* Called within rcu_read_lock(). */
214static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
215 int i)
216{
220static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
221 int i)
222{
217 hwaddr pa;
223 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
224 hwaddr pa = offsetof(VRingUsed, ring[i]);
218 virtio_tswap32s(vq->vdev, &uelem->id);
219 virtio_tswap32s(vq->vdev, &uelem->len);
225 virtio_tswap32s(vq->vdev, &uelem->id);
226 virtio_tswap32s(vq->vdev, &uelem->len);
220 pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
221 address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
222 (void *)uelem, sizeof(VRingUsedElem));
227 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
228 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
223}
224
229}
230
231/* Called within rcu_read_lock(). */
225static uint16_t vring_used_idx(VirtQueue *vq)
226{
232static uint16_t vring_used_idx(VirtQueue *vq)
233{
227 hwaddr pa;
228 pa = vq->vring.used + offsetof(VRingUsed, idx);
229 return virtio_lduw_phys(vq->vdev, pa);
234 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
235 hwaddr pa = offsetof(VRingUsed, idx);
236 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
230}
231
237}
238
239/* Called within rcu_read_lock(). */
232static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
233{
240static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
241{
234 hwaddr pa;
235 pa = vq->vring.used + offsetof(VRingUsed, idx);
236 virtio_stw_phys(vq->vdev, pa, val);
242 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
243 hwaddr pa = offsetof(VRingUsed, idx);
244 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
245 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
237 vq->used_idx = val;
238}
239
246 vq->used_idx = val;
247}
248
249/* Called within rcu_read_lock(). */
240static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
241{
250static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
251{
252 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
242 VirtIODevice *vdev = vq->vdev;
253 VirtIODevice *vdev = vq->vdev;
243 hwaddr pa;
244 pa = vq->vring.used + offsetof(VRingUsed, flags);
245 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
254 hwaddr pa = offsetof(VRingUsed, flags);
255 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
256
257 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
258 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
246}
247
259}
260
261/* Called within rcu_read_lock(). */
248static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
249{
262static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
263{
264 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
250 VirtIODevice *vdev = vq->vdev;
265 VirtIODevice *vdev = vq->vdev;
251 hwaddr pa;
252 pa = vq->vring.used + offsetof(VRingUsed, flags);
253 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
266 hwaddr pa = offsetof(VRingUsed, flags);
267 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
268
269 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
270 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
254}
255
271}
272
273/* Called within rcu_read_lock(). */
256static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
257{
274static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
275{
276 VRingMemoryRegionCaches *caches;
258 hwaddr pa;
259 if (!vq->notification) {
260 return;
261 }
277 hwaddr pa;
278 if (!vq->notification) {
279 return;
280 }
262 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
263 virtio_stw_phys(vq->vdev, pa, val);
281
282 caches = atomic_rcu_read(&vq->vring.caches);
283 pa = offsetof(VRingUsed, ring[vq->vring.num]);
284 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
264}
265
266void virtio_queue_set_notification(VirtQueue *vq, int enable)
267{
268 vq->notification = enable;
285}
286
287void virtio_queue_set_notification(VirtQueue *vq, int enable)
288{
289 vq->notification = enable;
290
291 rcu_read_lock();
269 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
270 vring_set_avail_event(vq, vring_avail_idx(vq));
271 } else if (enable) {
272 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
273 } else {
274 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
275 }
276 if (enable) {
277 /* Expose avail event/used flags before caller checks the avail idx. */
278 smp_mb();
279 }
292 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
293 vring_set_avail_event(vq, vring_avail_idx(vq));
294 } else if (enable) {
295 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
296 } else {
297 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
298 }
299 if (enable) {
300 /* Expose avail event/used flags before caller checks the avail idx. */
301 smp_mb();
302 }
303 rcu_read_unlock();
280}
281
282int virtio_queue_ready(VirtQueue *vq)
283{
284 return vq->vring.avail != 0;
285}
286
287/* Fetch avail_idx from VQ memory only when we really need to know if
304}
305
306int virtio_queue_ready(VirtQueue *vq)
307{
308 return vq->vring.avail != 0;
309}
310
311/* Fetch avail_idx from VQ memory only when we really need to know if
288 * guest has added some buffers. */
289int virtio_queue_empty(VirtQueue *vq)
312 * guest has added some buffers.
313 * Called within rcu_read_lock(). */
314static int virtio_queue_empty_rcu(VirtQueue *vq)
290{
291 if (vq->shadow_avail_idx != vq->last_avail_idx) {
292 return 0;
293 }
294
295 return vring_avail_idx(vq) == vq->last_avail_idx;
296}
297
315{
316 if (vq->shadow_avail_idx != vq->last_avail_idx) {
317 return 0;
318 }
319
320 return vring_avail_idx(vq) == vq->last_avail_idx;
321}
322
323int virtio_queue_empty(VirtQueue *vq)
324{
325 bool empty;
326
327 if (vq->shadow_avail_idx != vq->last_avail_idx) {
328 return 0;
329 }
330
331 rcu_read_lock();
332 empty = vring_avail_idx(vq) == vq->last_avail_idx;
333 rcu_read_unlock();
334 return empty;
335}
336
298static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
299 unsigned int len)
300{
301 AddressSpace *dma_as = vq->vdev->dma_as;
302 unsigned int offset;
303 int i;
304
305 offset = 0;

--- 62 unchanged lines hidden (view full) ---

368 if (num > vq->inuse) {
369 return false;
370 }
371 vq->last_avail_idx -= num;
372 vq->inuse -= num;
373 return true;
374}
375
337static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
338 unsigned int len)
339{
340 AddressSpace *dma_as = vq->vdev->dma_as;
341 unsigned int offset;
342 int i;
343
344 offset = 0;

--- 62 unchanged lines hidden (view full) ---

407 if (num > vq->inuse) {
408 return false;
409 }
410 vq->last_avail_idx -= num;
411 vq->inuse -= num;
412 return true;
413}
414
415/* Called within rcu_read_lock(). */
376void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
377 unsigned int len, unsigned int idx)
378{
379 VRingUsedElem uelem;
380
381 trace_virtqueue_fill(vq, elem, len, idx);
382
383 virtqueue_unmap_sg(vq, elem, len);

--- 4 unchanged lines hidden (view full) ---

388
389 idx = (idx + vq->used_idx) % vq->vring.num;
390
391 uelem.id = elem->index;
392 uelem.len = len;
393 vring_used_write(vq, &uelem, idx);
394}
395
416void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
417 unsigned int len, unsigned int idx)
418{
419 VRingUsedElem uelem;
420
421 trace_virtqueue_fill(vq, elem, len, idx);
422
423 virtqueue_unmap_sg(vq, elem, len);

--- 4 unchanged lines hidden (view full) ---

428
429 idx = (idx + vq->used_idx) % vq->vring.num;
430
431 uelem.id = elem->index;
432 uelem.len = len;
433 vring_used_write(vq, &uelem, idx);
434}
435
436/* Called within rcu_read_lock(). */
396void virtqueue_flush(VirtQueue *vq, unsigned int count)
397{
398 uint16_t old, new;
399
400 if (unlikely(vq->vdev->broken)) {
401 vq->inuse -= count;
402 return;
403 }

--- 7 unchanged lines hidden (view full) ---

411 vq->inuse -= count;
412 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
413 vq->signalled_used_valid = false;
414}
415
416void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
417 unsigned int len)
418{
437void virtqueue_flush(VirtQueue *vq, unsigned int count)
438{
439 uint16_t old, new;
440
441 if (unlikely(vq->vdev->broken)) {
442 vq->inuse -= count;
443 return;
444 }

--- 7 unchanged lines hidden (view full) ---

452 vq->inuse -= count;
453 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
454 vq->signalled_used_valid = false;
455}
456
457void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
458 unsigned int len)
459{
460 rcu_read_lock();
419 virtqueue_fill(vq, elem, len, 0);
420 virtqueue_flush(vq, 1);
461 virtqueue_fill(vq, elem, len, 0);
462 virtqueue_flush(vq, 1);
463 rcu_read_unlock();
421}
422
464}
465
466/* Called within rcu_read_lock(). */
423static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
424{
425 uint16_t num_heads = vring_avail_idx(vq) - idx;
426
427 /* Check it isn't doing very strange things with descriptor numbers. */
428 if (num_heads > vq->vring.num) {
429 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
430 idx, vq->shadow_avail_idx);
431 return -EINVAL;
432 }
433 /* On success, callers read a descriptor at vq->last_avail_idx.
434 * Make sure descriptor read does not bypass avail index read. */
435 if (num_heads) {
436 smp_rmb();
437 }
438
439 return num_heads;
440}
441
467static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
468{
469 uint16_t num_heads = vring_avail_idx(vq) - idx;
470
471 /* Check it isn't doing very strange things with descriptor numbers. */
472 if (num_heads > vq->vring.num) {
473 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
474 idx, vq->shadow_avail_idx);
475 return -EINVAL;
476 }
477 /* On success, callers read a descriptor at vq->last_avail_idx.
478 * Make sure descriptor read does not bypass avail index read. */
479 if (num_heads) {
480 smp_rmb();
481 }
482
483 return num_heads;
484}
485
486/* Called within rcu_read_lock(). */
442static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
443 unsigned int *head)
444{
445 /* Grab the next descriptor number they're advertising, and increment
446 * the index we've seen. */
447 *head = vring_avail_ring(vq, idx % vq->vring.num);
448
449 /* If their number is silly, that's a fatal mistake. */

--- 285 unchanged lines hidden (view full) ---

735 hwaddr addr[VIRTQUEUE_MAX_SIZE];
736 struct iovec iov[VIRTQUEUE_MAX_SIZE];
737 VRingDesc desc;
738 int rc;
739
740 if (unlikely(vdev->broken)) {
741 return NULL;
742 }
487static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
488 unsigned int *head)
489{
490 /* Grab the next descriptor number they're advertising, and increment
491 * the index we've seen. */
492 *head = vring_avail_ring(vq, idx % vq->vring.num);
493
494 /* If their number is silly, that's a fatal mistake. */

--- 285 unchanged lines hidden (view full) ---

780 hwaddr addr[VIRTQUEUE_MAX_SIZE];
781 struct iovec iov[VIRTQUEUE_MAX_SIZE];
782 VRingDesc desc;
783 int rc;
784
785 if (unlikely(vdev->broken)) {
786 return NULL;
787 }
743 if (virtio_queue_empty(vq)) {
744 return NULL;
788 rcu_read_lock();
789 if (virtio_queue_empty_rcu(vq)) {
790 goto done;
745 }
746 /* Needed after virtio_queue_empty(), see comment in
747 * virtqueue_num_heads(). */
748 smp_rmb();
749
750 /* When we start there are none of either input nor output. */
751 out_num = in_num = 0;
752
753 max = vq->vring.num;
754
755 if (vq->inuse >= vq->vring.num) {
756 virtio_error(vdev, "Virtqueue size exceeded");
791 }
792 /* Needed after virtio_queue_empty(), see comment in
793 * virtqueue_num_heads(). */
794 smp_rmb();
795
796 /* When we start there are none of either input nor output. */
797 out_num = in_num = 0;
798
799 max = vq->vring.num;
800
801 if (vq->inuse >= vq->vring.num) {
802 virtio_error(vdev, "Virtqueue size exceeded");
757 return NULL;
803 goto done;
758 }
759
760 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
804 }
805
806 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
761 return NULL;
807 goto done;
762 }
763
764 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
765 vring_set_avail_event(vq, vq->last_avail_idx);
766 }
767
768 i = head;
769
808 }
809
810 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
811 vring_set_avail_event(vq, vq->last_avail_idx);
812 }
813
814 i = head;
815
770 rcu_read_lock();
771 caches = atomic_rcu_read(&vq->vring.caches);
772 if (caches->desc.len < max * sizeof(VRingDesc)) {
773 virtio_error(vdev, "Cannot map descriptor ring");
774 goto done;
775 }
776
777 desc_cache = &caches->desc;
778 vring_desc_read(vdev, &desc, desc_cache, i);

--- 699 unchanged lines hidden (view full) ---

1478 /* Do not write ISR if it does not change, so that its cacheline remains
1479 * shared in the common case where the guest does not read it.
1480 */
1481 if ((old & value) != value) {
1482 atomic_or(&vdev->isr, value);
1483 }
1484}
1485
816 caches = atomic_rcu_read(&vq->vring.caches);
817 if (caches->desc.len < max * sizeof(VRingDesc)) {
818 virtio_error(vdev, "Cannot map descriptor ring");
819 goto done;
820 }
821
822 desc_cache = &caches->desc;
823 vring_desc_read(vdev, &desc, desc_cache, i);

--- 699 unchanged lines hidden (view full) ---

1523 /* Do not write ISR if it does not change, so that its cacheline remains
1524 * shared in the common case where the guest does not read it.
1525 */
1526 if ((old & value) != value) {
1527 atomic_or(&vdev->isr, value);
1528 }
1529}
1530
1531/* Called within rcu_read_lock(). */
1486static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1487{
1488 uint16_t old, new;
1489 bool v;
1490 /* We need to expose used array entries before checking used event. */
1491 smp_mb();
1492 /* Always notify when queue is empty (when feature acknowledge) */
1493 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&

--- 9 unchanged lines hidden (view full) ---

1503 vq->signalled_used_valid = true;
1504 old = vq->signalled_used;
1505 new = vq->signalled_used = vq->used_idx;
1506 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1507}
1508
1509void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1510{
1532static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1533{
1534 uint16_t old, new;
1535 bool v;
1536 /* We need to expose used array entries before checking used event. */
1537 smp_mb();
1538 /* Always notify when queue is empty (when feature acknowledge) */
1539 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&

--- 9 unchanged lines hidden (view full) ---

1549 vq->signalled_used_valid = true;
1550 old = vq->signalled_used;
1551 new = vq->signalled_used = vq->used_idx;
1552 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1553}
1554
1555void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1556{
1511 if (!virtio_should_notify(vdev, vq)) {
1557 bool should_notify;
1558 rcu_read_lock();
1559 should_notify = virtio_should_notify(vdev, vq);
1560 rcu_read_unlock();
1561
1562 if (!should_notify) {
1512 return;
1513 }
1514
1515 trace_virtio_notify_irqfd(vdev, vq);
1516
1517 /*
1518 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1519 * windows drivers included in virtio-win 1.8.0 (circa 2015) are

--- 10 unchanged lines hidden (view full) ---

1530 * to an atomic operation.
1531 */
1532 virtio_set_isr(vq->vdev, 0x1);
1533 event_notifier_set(&vq->guest_notifier);
1534}
1535
1536void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1537{
1563 return;
1564 }
1565
1566 trace_virtio_notify_irqfd(vdev, vq);
1567
1568 /*
1569 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1570 * windows drivers included in virtio-win 1.8.0 (circa 2015) are

--- 10 unchanged lines hidden (view full) ---

1581 * to an atomic operation.
1582 */
1583 virtio_set_isr(vq->vdev, 0x1);
1584 event_notifier_set(&vq->guest_notifier);
1585}
1586
1587void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1588{
1538 if (!virtio_should_notify(vdev, vq)) {
1589 bool should_notify;
1590 rcu_read_lock();
1591 should_notify = virtio_should_notify(vdev, vq);
1592 rcu_read_unlock();
1593
1594 if (!should_notify) {
1539 return;
1540 }
1541
1542 trace_virtio_notify(vdev, vq);
1543 virtio_set_isr(vq->vdev, 0x1);
1544 virtio_notify_vector(vdev, vq->vector);
1545}
1546

--- 444 unchanged lines hidden (view full) ---

1991 if (virtio_set_features_nocheck(vdev, features) < 0) {
1992 error_report("Features 0x%x unsupported. "
1993 "Allowed features: 0x%" PRIx64,
1994 features, vdev->host_features);
1995 return -1;
1996 }
1997 }
1998
1595 return;
1596 }
1597
1598 trace_virtio_notify(vdev, vq);
1599 virtio_set_isr(vq->vdev, 0x1);
1600 virtio_notify_vector(vdev, vq->vector);
1601}
1602

--- 444 unchanged lines hidden (view full) ---

2047 if (virtio_set_features_nocheck(vdev, features) < 0) {
2048 error_report("Features 0x%x unsupported. "
2049 "Allowed features: 0x%" PRIx64,
2050 features, vdev->host_features);
2051 return -1;
2052 }
2053 }
2054
2055 rcu_read_lock();
1999 for (i = 0; i < num; i++) {
2000 if (vdev->vq[i].vring.desc) {
2001 uint16_t nheads;
2002 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2003 /* Check it isn't doing strange things with descriptor numbers. */
2004 if (nheads > vdev->vq[i].vring.num) {
2005 error_report("VQ %d size 0x%x Guest index 0x%x "
2006 "inconsistent with Host index 0x%x: delta 0x%x",

--- 18 unchanged lines hidden (view full) ---

2025 "used_idx 0x%x",
2026 i, vdev->vq[i].vring.num,
2027 vdev->vq[i].last_avail_idx,
2028 vdev->vq[i].used_idx);
2029 return -1;
2030 }
2031 }
2032 }
2056 for (i = 0; i < num; i++) {
2057 if (vdev->vq[i].vring.desc) {
2058 uint16_t nheads;
2059 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2060 /* Check it isn't doing strange things with descriptor numbers. */
2061 if (nheads > vdev->vq[i].vring.num) {
2062 error_report("VQ %d size 0x%x Guest index 0x%x "
2063 "inconsistent with Host index 0x%x: delta 0x%x",

--- 18 unchanged lines hidden (view full) ---

2082 "used_idx 0x%x",
2083 i, vdev->vq[i].vring.num,
2084 vdev->vq[i].last_avail_idx,
2085 vdev->vq[i].used_idx);
2086 return -1;
2087 }
2088 }
2089 }
2090 rcu_read_unlock();
2033
2034 return 0;
2035}
2036
2037void virtio_cleanup(VirtIODevice *vdev)
2038{
2039 qemu_del_vm_change_state_handler(vdev->vmstate);
2040}

--- 110 unchanged lines hidden (view full) ---

2151void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2152{
2153 vdev->vq[n].last_avail_idx = idx;
2154 vdev->vq[n].shadow_avail_idx = idx;
2155}
2156
2157void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2158{
2091
2092 return 0;
2093}
2094
2095void virtio_cleanup(VirtIODevice *vdev)
2096{
2097 qemu_del_vm_change_state_handler(vdev->vmstate);
2098}

--- 110 unchanged lines hidden (view full) ---

2209void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2210{
2211 vdev->vq[n].last_avail_idx = idx;
2212 vdev->vq[n].shadow_avail_idx = idx;
2213}
2214
2215void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2216{
2217 rcu_read_lock();
2159 if (vdev->vq[n].vring.desc) {
2160 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2161 }
2218 if (vdev->vq[n].vring.desc) {
2219 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2220 }
2221 rcu_read_unlock();
2162}
2163
2164void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2165{
2166 vdev->vq[n].signalled_used_valid = false;
2167}
2168
2169VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)

--- 360 unchanged lines hidden ---
2222}
2223
2224void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2225{
2226 vdev->vq[n].signalled_used_valid = false;
2227}
2228
2229VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)

--- 360 unchanged lines hidden ---