xref: /openbmc/qemu/hw/net/vhost_net.c (revision 6a0e10b7)
1 /*
2  * vhost-net support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "net/net.h"
18 #include "net/tap.h"
19 #include "net/vhost-user.h"
20 #include "net/vhost-vdpa.h"
21 
22 #include "standard-headers/linux/vhost_types.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "net/vhost_net.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 
29 #include <sys/socket.h>
30 #include <net/if.h>
31 #include <netinet/in.h>
32 
33 
34 #include "standard-headers/linux/virtio_ring.h"
35 #include "hw/virtio/vhost.h"
36 #include "hw/virtio/virtio-bus.h"
37 #include "linux-headers/linux/vhost.h"
38 
39 
40 /* Features supported by host kernel. */
41 static const int kernel_feature_bits[] = {
42     VIRTIO_F_NOTIFY_ON_EMPTY,
43     VIRTIO_RING_F_INDIRECT_DESC,
44     VIRTIO_RING_F_EVENT_IDX,
45     VIRTIO_NET_F_MRG_RXBUF,
46     VIRTIO_F_VERSION_1,
47     VIRTIO_NET_F_MTU,
48     VIRTIO_F_IOMMU_PLATFORM,
49     VIRTIO_F_RING_PACKED,
50     VIRTIO_F_RING_RESET,
51     VIRTIO_F_IN_ORDER,
52     VIRTIO_F_NOTIFICATION_DATA,
53     VIRTIO_NET_F_RSC_EXT,
54     VIRTIO_NET_F_HASH_REPORT,
55     VHOST_INVALID_FEATURE_BIT
56 };
57 
58 /* Features supported by others. */
59 static const int user_feature_bits[] = {
60     VIRTIO_F_NOTIFY_ON_EMPTY,
61     VIRTIO_F_NOTIFICATION_DATA,
62     VIRTIO_RING_F_INDIRECT_DESC,
63     VIRTIO_RING_F_EVENT_IDX,
64 
65     VIRTIO_F_ANY_LAYOUT,
66     VIRTIO_F_VERSION_1,
67     VIRTIO_NET_F_CSUM,
68     VIRTIO_NET_F_GUEST_CSUM,
69     VIRTIO_NET_F_GSO,
70     VIRTIO_NET_F_GUEST_TSO4,
71     VIRTIO_NET_F_GUEST_TSO6,
72     VIRTIO_NET_F_GUEST_ECN,
73     VIRTIO_NET_F_GUEST_UFO,
74     VIRTIO_NET_F_HOST_TSO4,
75     VIRTIO_NET_F_HOST_TSO6,
76     VIRTIO_NET_F_HOST_ECN,
77     VIRTIO_NET_F_HOST_UFO,
78     VIRTIO_NET_F_MRG_RXBUF,
79     VIRTIO_NET_F_MTU,
80     VIRTIO_F_IOMMU_PLATFORM,
81     VIRTIO_F_RING_PACKED,
82     VIRTIO_F_RING_RESET,
83     VIRTIO_F_IN_ORDER,
84     VIRTIO_NET_F_RSS,
85     VIRTIO_NET_F_RSC_EXT,
86     VIRTIO_NET_F_HASH_REPORT,
87     VIRTIO_NET_F_GUEST_USO4,
88     VIRTIO_NET_F_GUEST_USO6,
89     VIRTIO_NET_F_HOST_USO,
90 
91     /* This bit implies RARP isn't sent by QEMU out of band */
92     VIRTIO_NET_F_GUEST_ANNOUNCE,
93 
94     VIRTIO_NET_F_MQ,
95 
96     VHOST_INVALID_FEATURE_BIT
97 };
98 
99 static const int *vhost_net_get_feature_bits(struct vhost_net *net)
100 {
101     const int *feature_bits = 0;
102 
103     switch (net->nc->info->type) {
104     case NET_CLIENT_DRIVER_TAP:
105         feature_bits = kernel_feature_bits;
106         break;
107     case NET_CLIENT_DRIVER_VHOST_USER:
108         feature_bits = user_feature_bits;
109         break;
110 #ifdef CONFIG_VHOST_NET_VDPA
111     case NET_CLIENT_DRIVER_VHOST_VDPA:
112         feature_bits = vdpa_feature_bits;
113         break;
114 #endif
115     default:
116         error_report("Feature bits not defined for this type: %d",
117                 net->nc->info->type);
118         break;
119     }
120 
121     return feature_bits;
122 }
123 
124 uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features)
125 {
126     return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net),
127             features);
128 }
129 int vhost_net_get_config(struct vhost_net *net,  uint8_t *config,
130                          uint32_t config_len)
131 {
132     return vhost_dev_get_config(&net->dev, config, config_len, NULL);
133 }
134 int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
135                          uint32_t offset, uint32_t size, uint32_t flags)
136 {
137     return vhost_dev_set_config(&net->dev, data, offset, size, flags);
138 }
139 
140 void vhost_net_ack_features(struct vhost_net *net, uint64_t features)
141 {
142     net->dev.acked_features = net->dev.backend_features;
143     vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features);
144 }
145 
146 uint64_t vhost_net_get_max_queues(VHostNetState *net)
147 {
148     return net->dev.max_queues;
149 }
150 
151 uint64_t vhost_net_get_acked_features(VHostNetState *net)
152 {
153     return net->dev.acked_features;
154 }
155 
156 void vhost_net_save_acked_features(NetClientState *nc)
157 {
158 #ifdef CONFIG_VHOST_NET_USER
159     if (nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
160         vhost_user_save_acked_features(nc);
161     }
162 #endif
163 }
164 
165 static void vhost_net_disable_notifiers_nvhosts(VirtIODevice *dev,
166                 NetClientState *ncs, int data_queue_pairs, int nvhosts)
167 {
168     VirtIONet *n = VIRTIO_NET(dev);
169     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
170     struct vhost_net *net;
171     struct vhost_dev *hdev;
172     int r, i, j;
173     NetClientState *peer;
174 
175     /*
176      * Batch all the host notifiers in a single transaction to avoid
177      * quadratic time complexity in address_space_update_ioeventfds().
178      */
179     memory_region_transaction_begin();
180 
181     for (i = 0; i < nvhosts; i++) {
182         if (i < data_queue_pairs) {
183             peer = qemu_get_peer(ncs, i);
184         } else {
185             peer = qemu_get_peer(ncs, n->max_queue_pairs);
186         }
187 
188         net = get_vhost_net(peer);
189         hdev = &net->dev;
190         for (j = 0; j < hdev->nvqs; j++) {
191             r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
192                                              hdev->vq_index + j,
193                                              false);
194             if (r < 0) {
195                 error_report("vhost %d VQ %d notifier cleanup failed: %d",
196                               i, j, -r);
197             }
198             assert(r >= 0);
199         }
200     }
201     /*
202      * The transaction expects the ioeventfds to be open when it
203      * commits. Do it now, before the cleanup loop.
204      */
205     memory_region_transaction_commit();
206 
207     for (i = 0; i < nvhosts; i++) {
208         if (i < data_queue_pairs) {
209             peer = qemu_get_peer(ncs, i);
210         } else {
211             peer = qemu_get_peer(ncs, n->max_queue_pairs);
212         }
213 
214         net = get_vhost_net(peer);
215         hdev = &net->dev;
216         for (j = 0; j < hdev->nvqs; j++) {
217             virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus),
218                                              hdev->vq_index + j);
219         }
220         virtio_device_release_ioeventfd(dev);
221     }
222 }
223 
224 static int vhost_net_enable_notifiers(VirtIODevice *dev,
225                 NetClientState *ncs, int data_queue_pairs, int cvq)
226 {
227     VirtIONet *n = VIRTIO_NET(dev);
228     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
229     int nvhosts = data_queue_pairs + cvq;
230     struct vhost_net *net;
231     struct vhost_dev *hdev;
232     int r, i, j;
233     NetClientState *peer;
234 
235     /*
236      * Batch all the host notifiers in a single transaction to avoid
237      * quadratic time complexity in address_space_update_ioeventfds().
238      */
239     memory_region_transaction_begin();
240 
241     for (i = 0; i < nvhosts; i++) {
242         if (i < data_queue_pairs) {
243             peer = qemu_get_peer(ncs, i);
244         } else {
245             peer = qemu_get_peer(ncs, n->max_queue_pairs);
246         }
247 
248         net = get_vhost_net(peer);
249         hdev = &net->dev;
250         /*
251          * We will pass the notifiers to the kernel, make sure that QEMU
252          * doesn't interfere.
253          */
254         r = virtio_device_grab_ioeventfd(dev);
255         if (r < 0) {
256             error_report("binding does not support host notifiers");
257             memory_region_transaction_commit();
258             goto fail_nvhosts;
259         }
260 
261         for (j = 0; j < hdev->nvqs; j++) {
262             r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
263                                              hdev->vq_index + j,
264                                              true);
265             if (r < 0) {
266                 error_report("vhost %d VQ %d notifier binding failed: %d",
267                               i, j, -r);
268                 memory_region_transaction_commit();
269                 vhost_dev_disable_notifiers_nvqs(hdev, dev, j);
270                 goto fail_nvhosts;
271             }
272         }
273     }
274 
275     memory_region_transaction_commit();
276 
277     return 0;
278 fail_nvhosts:
279     vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, i);
280     return r;
281 }
282 
283 /*
284  * Stop processing guest IO notifications in qemu.
285  * Start processing them in vhost in kernel.
286  */
287 static void vhost_net_disable_notifiers(VirtIODevice *dev,
288                 NetClientState *ncs, int data_queue_pairs, int cvq)
289 {
290     vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs,
291                                         data_queue_pairs + cvq);
292 }
293 
294 static int vhost_net_get_fd(NetClientState *backend)
295 {
296     switch (backend->info->type) {
297     case NET_CLIENT_DRIVER_TAP:
298         return tap_get_fd(backend);
299     default:
300         fprintf(stderr, "vhost-net requires tap backend\n");
301         return -ENOSYS;
302     }
303 }
304 
305 struct vhost_net *vhost_net_init(VhostNetOptions *options)
306 {
307     int r;
308     bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL;
309     struct vhost_net *net = g_new0(struct vhost_net, 1);
310     uint64_t features = 0;
311     Error *local_err = NULL;
312 
313     if (!options->net_backend) {
314         fprintf(stderr, "vhost-net requires net backend to be setup\n");
315         goto fail;
316     }
317     net->nc = options->net_backend;
318     net->dev.nvqs = options->nvqs;
319 
320     net->dev.max_queues = 1;
321     net->dev.vqs = net->vqs;
322 
323     if (backend_kernel) {
324         r = vhost_net_get_fd(options->net_backend);
325         if (r < 0) {
326             goto fail;
327         }
328         net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend)
329             ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR);
330         net->backend = r;
331         net->dev.protocol_features = 0;
332     } else {
333         net->dev.backend_features = 0;
334         net->dev.protocol_features = 0;
335         net->backend = -1;
336 
337         /* vhost-user needs vq_index to initiate a specific queue pair */
338         net->dev.vq_index = net->nc->queue_index * net->dev.nvqs;
339     }
340 
341     r = vhost_dev_init(&net->dev, options->opaque,
342                        options->backend_type, options->busyloop_timeout,
343                        &local_err);
344     if (r < 0) {
345         error_report_err(local_err);
346         goto fail;
347     }
348     if (backend_kernel) {
349         if (!qemu_has_vnet_hdr_len(options->net_backend,
350                                sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
351             net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF);
352         }
353         if (~net->dev.features & net->dev.backend_features) {
354             fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64
355                    " for backend\n",
356                    (uint64_t)(~net->dev.features & net->dev.backend_features));
357             goto fail;
358         }
359     }
360 
361     /* Set sane init value. Override when guest acks. */
362 #ifdef CONFIG_VHOST_NET_USER
363     if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
364         features = vhost_user_get_acked_features(net->nc);
365         if (~net->dev.features & features) {
366             fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64
367                     " for backend\n",
368                     (uint64_t)(~net->dev.features & features));
369             goto fail;
370         }
371     }
372 #endif
373 
374     vhost_net_ack_features(net, features);
375 
376     return net;
377 
378 fail:
379     vhost_dev_cleanup(&net->dev);
380     g_free(net);
381     return NULL;
382 }
383 
384 static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index,
385                                    int vq_index_end)
386 {
387     net->dev.vq_index = vq_index;
388     net->dev.vq_index_end = vq_index_end;
389 }
390 
391 static int vhost_net_start_one(struct vhost_net *net,
392                                VirtIODevice *dev)
393 {
394     struct vhost_vring_file file = { };
395     int r;
396 
397     if (net->nc->info->start) {
398         r = net->nc->info->start(net->nc);
399         if (r < 0) {
400             return r;
401         }
402     }
403 
404     r = vhost_dev_start(&net->dev, dev, false);
405     if (r < 0) {
406         goto fail_start;
407     }
408 
409     if (net->nc->info->poll) {
410         net->nc->info->poll(net->nc, false);
411     }
412 
413     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
414         qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
415         file.fd = net->backend;
416         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
417             if (!virtio_queue_enabled(dev, net->dev.vq_index +
418                                       file.index)) {
419                 /* Queue might not be ready for start */
420                 continue;
421             }
422             r = vhost_net_set_backend(&net->dev, &file);
423             if (r < 0) {
424                 r = -errno;
425                 goto fail;
426             }
427         }
428     }
429 
430     if (net->nc->info->load) {
431         r = net->nc->info->load(net->nc);
432         if (r < 0) {
433             goto fail;
434         }
435     }
436     return 0;
437 fail:
438     file.fd = -1;
439     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
440         while (file.index-- > 0) {
441             if (!virtio_queue_enabled(dev, net->dev.vq_index +
442                                       file.index)) {
443                 /* Queue might not be ready for start */
444                 continue;
445             }
446             int ret = vhost_net_set_backend(&net->dev, &file);
447             assert(ret >= 0);
448         }
449     }
450     if (net->nc->info->poll) {
451         net->nc->info->poll(net->nc, true);
452     }
453     vhost_dev_stop(&net->dev, dev, false);
454 fail_start:
455     return r;
456 }
457 
458 static void vhost_net_stop_one(struct vhost_net *net,
459                                VirtIODevice *dev)
460 {
461     struct vhost_vring_file file = { .fd = -1 };
462 
463     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
464         for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
465             int r = vhost_net_set_backend(&net->dev, &file);
466             assert(r >= 0);
467         }
468     }
469     if (net->nc->info->poll) {
470         net->nc->info->poll(net->nc, true);
471     }
472     vhost_dev_stop(&net->dev, dev, false);
473     if (net->nc->info->stop) {
474         net->nc->info->stop(net->nc);
475     }
476 }
477 
478 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
479                     int data_queue_pairs, int cvq)
480 {
481     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
482     VirtioBusState *vbus = VIRTIO_BUS(qbus);
483     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
484     int total_notifiers = data_queue_pairs * 2 + cvq;
485     VirtIONet *n = VIRTIO_NET(dev);
486     int nvhosts = data_queue_pairs + cvq;
487     struct vhost_net *net;
488     int r, e, i, index_end = data_queue_pairs * 2;
489     NetClientState *peer;
490 
491     if (cvq) {
492         index_end += 1;
493     }
494 
495     if (!k->set_guest_notifiers) {
496         error_report("binding does not support guest notifiers");
497         return -ENOSYS;
498     }
499 
500     for (i = 0; i < nvhosts; i++) {
501 
502         if (i < data_queue_pairs) {
503             peer = qemu_get_peer(ncs, i);
504         } else { /* Control Virtqueue */
505             peer = qemu_get_peer(ncs, n->max_queue_pairs);
506         }
507 
508         net = get_vhost_net(peer);
509         vhost_net_set_vq_index(net, i * 2, index_end);
510 
511         /* Suppress the masking guest notifiers on vhost user
512          * because vhost user doesn't interrupt masking/unmasking
513          * properly.
514          */
515         if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
516             dev->use_guest_notifier_mask = false;
517         }
518      }
519 
520     r = vhost_net_enable_notifiers(dev, ncs, data_queue_pairs, cvq);
521     if (r < 0) {
522         error_report("Error enabling host notifiers: %d", -r);
523         goto err;
524     }
525 
526     r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
527     if (r < 0) {
528         error_report("Error binding guest notifier: %d", -r);
529         goto err_host_notifiers;
530     }
531 
532     for (i = 0; i < nvhosts; i++) {
533         if (i < data_queue_pairs) {
534             peer = qemu_get_peer(ncs, i);
535         } else {
536             peer = qemu_get_peer(ncs, n->max_queue_pairs);
537         }
538 
539         if (peer->vring_enable) {
540             /* restore vring enable state */
541             r = vhost_set_vring_enable(peer, peer->vring_enable);
542 
543             if (r < 0) {
544                 goto err_guest_notifiers;
545             }
546         }
547 
548         r = vhost_net_start_one(get_vhost_net(peer), dev);
549         if (r < 0) {
550             goto err_guest_notifiers;
551         }
552     }
553 
554     return 0;
555 
556 err_guest_notifiers:
557     while (--i >= 0) {
558         peer = qemu_get_peer(ncs, i < data_queue_pairs ?
559                                   i : n->max_queue_pairs);
560         vhost_net_stop_one(get_vhost_net(peer), dev);
561     }
562     e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
563     if (e < 0) {
564         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
565         fflush(stderr);
566     }
567 err_host_notifiers:
568     vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
569 err:
570     return r;
571 }
572 
573 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
574                     int data_queue_pairs, int cvq)
575 {
576     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
577     VirtioBusState *vbus = VIRTIO_BUS(qbus);
578     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
579     VirtIONet *n = VIRTIO_NET(dev);
580     NetClientState *peer;
581     int total_notifiers = data_queue_pairs * 2 + cvq;
582     int nvhosts = data_queue_pairs + cvq;
583     int i, r;
584 
585     for (i = 0; i < nvhosts; i++) {
586         if (i < data_queue_pairs) {
587             peer = qemu_get_peer(ncs, i);
588         } else {
589             peer = qemu_get_peer(ncs, n->max_queue_pairs);
590         }
591         vhost_net_stop_one(get_vhost_net(peer), dev);
592     }
593 
594     r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
595     if (r < 0) {
596         fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
597         fflush(stderr);
598     }
599     assert(r >= 0);
600 
601     vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
602 }
603 
604 void vhost_net_cleanup(struct vhost_net *net)
605 {
606     vhost_dev_cleanup(&net->dev);
607 }
608 
609 int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr)
610 {
611     const VhostOps *vhost_ops = net->dev.vhost_ops;
612 
613     assert(vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
614     assert(vhost_ops->vhost_migration_done);
615 
616     return vhost_ops->vhost_migration_done(&net->dev, mac_addr);
617 }
618 
619 bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
620 {
621     return vhost_virtqueue_pending(&net->dev, idx);
622 }
623 
624 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
625                               int idx, bool mask)
626 {
627     vhost_virtqueue_mask(&net->dev, dev, idx, mask);
628 }
629 
630 bool vhost_net_config_pending(VHostNetState *net)
631 {
632     return vhost_config_pending(&net->dev);
633 }
634 
635 void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask)
636 {
637     vhost_config_mask(&net->dev, dev, mask);
638 }
639 VHostNetState *get_vhost_net(NetClientState *nc)
640 {
641     VHostNetState *vhost_net = 0;
642 
643     if (!nc) {
644         return 0;
645     }
646 
647     switch (nc->info->type) {
648     case NET_CLIENT_DRIVER_TAP:
649         vhost_net = tap_get_vhost_net(nc);
650         /*
651          * tap_get_vhost_net() can return NULL if a tap net-device backend is
652          * created with 'vhost=off' option, 'vhostforce=off' or no vhost or
653          * vhostforce or vhostfd options at all. Please see net_init_tap_one().
654          * Hence, we omit the assertion here.
655          */
656         break;
657 #ifdef CONFIG_VHOST_NET_USER
658     case NET_CLIENT_DRIVER_VHOST_USER:
659         vhost_net = vhost_user_get_vhost_net(nc);
660         assert(vhost_net);
661         break;
662 #endif
663 #ifdef CONFIG_VHOST_NET_VDPA
664     case NET_CLIENT_DRIVER_VHOST_VDPA:
665         vhost_net = vhost_vdpa_get_vhost_net(nc);
666         assert(vhost_net);
667         break;
668 #endif
669     default:
670         break;
671     }
672 
673     return vhost_net;
674 }
675 
676 int vhost_set_vring_enable(NetClientState *nc, int enable)
677 {
678     VHostNetState *net = get_vhost_net(nc);
679     const VhostOps *vhost_ops = net->dev.vhost_ops;
680 
681     /*
682      * vhost-vdpa network devices need to enable dataplane virtqueues after
683      * DRIVER_OK, so they can recover device state before starting dataplane.
684      * Because of that, we don't enable virtqueues here and leave it to
685      * net/vhost-vdpa.c.
686      */
687     if (nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
688         return 0;
689     }
690 
691     nc->vring_enable = enable;
692 
693     if (vhost_ops && vhost_ops->vhost_set_vring_enable) {
694         return vhost_ops->vhost_set_vring_enable(&net->dev, enable);
695     }
696 
697     return 0;
698 }
699 
700 int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
701 {
702     const VhostOps *vhost_ops = net->dev.vhost_ops;
703 
704     if (!vhost_ops->vhost_net_set_mtu) {
705         return 0;
706     }
707 
708     return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
709 }
710 
711 void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
712                                int vq_index)
713 {
714     VHostNetState *net = get_vhost_net(nc->peer);
715     const VhostOps *vhost_ops = net->dev.vhost_ops;
716     struct vhost_vring_file file = { .fd = -1 };
717     int idx;
718 
719     /* should only be called after backend is connected */
720     assert(vhost_ops);
721 
722     idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
723 
724     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
725         file.index = idx;
726         int r = vhost_net_set_backend(&net->dev, &file);
727         assert(r >= 0);
728     }
729 
730     vhost_virtqueue_stop(&net->dev,
731                          vdev,
732                          net->dev.vqs + idx,
733                          net->dev.vq_index + idx);
734 }
735 
736 int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
737                                 int vq_index)
738 {
739     VHostNetState *net = get_vhost_net(nc->peer);
740     const VhostOps *vhost_ops = net->dev.vhost_ops;
741     struct vhost_vring_file file = { };
742     int idx, r;
743 
744     if (!net->dev.started) {
745         return -EBUSY;
746     }
747 
748     /* should only be called after backend is connected */
749     assert(vhost_ops);
750 
751     idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
752 
753     r = vhost_virtqueue_start(&net->dev,
754                               vdev,
755                               net->dev.vqs + idx,
756                               net->dev.vq_index + idx);
757     if (r < 0) {
758         goto err_start;
759     }
760 
761     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
762         file.index = idx;
763         file.fd = net->backend;
764         r = vhost_net_set_backend(&net->dev, &file);
765         if (r < 0) {
766             r = -errno;
767             goto err_start;
768         }
769     }
770 
771     return 0;
772 
773 err_start:
774     error_report("Error when restarting the queue.");
775 
776     if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
777         file.fd = VHOST_FILE_UNBIND;
778         file.index = idx;
779         int ret = vhost_net_set_backend(&net->dev, &file);
780         assert(ret >= 0);
781     }
782 
783     vhost_dev_stop(&net->dev, vdev, false);
784 
785     return r;
786 }
787