1 /*
2 * vhost-net support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "net/net.h"
18 #include "net/tap.h"
19 #include "net/vhost-user.h"
20 #include "net/vhost-vdpa.h"
21
22 #include "standard-headers/linux/vhost_types.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "net/vhost_net.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28
29 #include <sys/socket.h>
30 #include <net/if.h>
31 #include <netinet/in.h>
32
33
34 #include "standard-headers/linux/virtio_ring.h"
35 #include "hw/virtio/vhost.h"
36 #include "hw/virtio/virtio-bus.h"
37 #include "linux-headers/linux/vhost.h"
38
39
40 /* Features supported by host kernel. */
41 static const int kernel_feature_bits[] = {
42 VIRTIO_F_NOTIFY_ON_EMPTY,
43 VIRTIO_RING_F_INDIRECT_DESC,
44 VIRTIO_RING_F_EVENT_IDX,
45 VIRTIO_NET_F_MRG_RXBUF,
46 VIRTIO_F_VERSION_1,
47 VIRTIO_NET_F_MTU,
48 VIRTIO_F_IOMMU_PLATFORM,
49 VIRTIO_F_RING_PACKED,
50 VIRTIO_F_RING_RESET,
51 VIRTIO_F_IN_ORDER,
52 VIRTIO_F_NOTIFICATION_DATA,
53 VIRTIO_NET_F_RSC_EXT,
54 VIRTIO_NET_F_HASH_REPORT,
55 VHOST_INVALID_FEATURE_BIT
56 };
57
58 /* Features supported by others. */
59 static const int user_feature_bits[] = {
60 VIRTIO_F_NOTIFY_ON_EMPTY,
61 VIRTIO_F_NOTIFICATION_DATA,
62 VIRTIO_RING_F_INDIRECT_DESC,
63 VIRTIO_RING_F_EVENT_IDX,
64
65 VIRTIO_F_ANY_LAYOUT,
66 VIRTIO_F_VERSION_1,
67 VIRTIO_NET_F_CSUM,
68 VIRTIO_NET_F_GUEST_CSUM,
69 VIRTIO_NET_F_GSO,
70 VIRTIO_NET_F_GUEST_TSO4,
71 VIRTIO_NET_F_GUEST_TSO6,
72 VIRTIO_NET_F_GUEST_ECN,
73 VIRTIO_NET_F_GUEST_UFO,
74 VIRTIO_NET_F_HOST_TSO4,
75 VIRTIO_NET_F_HOST_TSO6,
76 VIRTIO_NET_F_HOST_ECN,
77 VIRTIO_NET_F_HOST_UFO,
78 VIRTIO_NET_F_MRG_RXBUF,
79 VIRTIO_NET_F_MTU,
80 VIRTIO_F_IOMMU_PLATFORM,
81 VIRTIO_F_RING_PACKED,
82 VIRTIO_F_RING_RESET,
83 VIRTIO_F_IN_ORDER,
84 VIRTIO_NET_F_RSS,
85 VIRTIO_NET_F_RSC_EXT,
86 VIRTIO_NET_F_HASH_REPORT,
87 VIRTIO_NET_F_GUEST_USO4,
88 VIRTIO_NET_F_GUEST_USO6,
89 VIRTIO_NET_F_HOST_USO,
90
91 /* This bit implies RARP isn't sent by QEMU out of band */
92 VIRTIO_NET_F_GUEST_ANNOUNCE,
93
94 VIRTIO_NET_F_MQ,
95
96 VHOST_INVALID_FEATURE_BIT
97 };
98
vhost_net_get_feature_bits(struct vhost_net * net)99 static const int *vhost_net_get_feature_bits(struct vhost_net *net)
100 {
101 const int *feature_bits = 0;
102
103 switch (net->nc->info->type) {
104 case NET_CLIENT_DRIVER_TAP:
105 feature_bits = kernel_feature_bits;
106 break;
107 case NET_CLIENT_DRIVER_VHOST_USER:
108 feature_bits = user_feature_bits;
109 break;
110 #ifdef CONFIG_VHOST_NET_VDPA
111 case NET_CLIENT_DRIVER_VHOST_VDPA:
112 feature_bits = vdpa_feature_bits;
113 break;
114 #endif
115 default:
116 error_report("Feature bits not defined for this type: %d",
117 net->nc->info->type);
118 break;
119 }
120
121 return feature_bits;
122 }
123
vhost_net_get_features(struct vhost_net * net,uint64_t features)124 uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features)
125 {
126 return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net),
127 features);
128 }
vhost_net_get_config(struct vhost_net * net,uint8_t * config,uint32_t config_len)129 int vhost_net_get_config(struct vhost_net *net, uint8_t *config,
130 uint32_t config_len)
131 {
132 return vhost_dev_get_config(&net->dev, config, config_len, NULL);
133 }
vhost_net_set_config(struct vhost_net * net,const uint8_t * data,uint32_t offset,uint32_t size,uint32_t flags)134 int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
135 uint32_t offset, uint32_t size, uint32_t flags)
136 {
137 return vhost_dev_set_config(&net->dev, data, offset, size, flags);
138 }
139
vhost_net_ack_features(struct vhost_net * net,uint64_t features)140 void vhost_net_ack_features(struct vhost_net *net, uint64_t features)
141 {
142 net->dev.acked_features = net->dev.backend_features;
143 vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features);
144 }
145
vhost_net_get_max_queues(VHostNetState * net)146 uint64_t vhost_net_get_max_queues(VHostNetState *net)
147 {
148 return net->dev.max_queues;
149 }
150
vhost_net_get_acked_features(VHostNetState * net)151 uint64_t vhost_net_get_acked_features(VHostNetState *net)
152 {
153 return net->dev.acked_features;
154 }
155
vhost_net_save_acked_features(NetClientState * nc)156 void vhost_net_save_acked_features(NetClientState *nc)
157 {
158 #ifdef CONFIG_VHOST_NET_USER
159 if (nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
160 vhost_user_save_acked_features(nc);
161 }
162 #endif
163 }
164
vhost_net_disable_notifiers_nvhosts(VirtIODevice * dev,NetClientState * ncs,int data_queue_pairs,int nvhosts)165 static void vhost_net_disable_notifiers_nvhosts(VirtIODevice *dev,
166 NetClientState *ncs, int data_queue_pairs, int nvhosts)
167 {
168 VirtIONet *n = VIRTIO_NET(dev);
169 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
170 struct vhost_net *net;
171 struct vhost_dev *hdev;
172 int r, i, j;
173 NetClientState *peer;
174
175 /*
176 * Batch all the host notifiers in a single transaction to avoid
177 * quadratic time complexity in address_space_update_ioeventfds().
178 */
179 memory_region_transaction_begin();
180
181 for (i = 0; i < nvhosts; i++) {
182 if (i < data_queue_pairs) {
183 peer = qemu_get_peer(ncs, i);
184 } else {
185 peer = qemu_get_peer(ncs, n->max_queue_pairs);
186 }
187
188 net = get_vhost_net(peer);
189 hdev = &net->dev;
190 for (j = 0; j < hdev->nvqs; j++) {
191 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
192 hdev->vq_index + j,
193 false);
194 if (r < 0) {
195 error_report("vhost %d VQ %d notifier cleanup failed: %d",
196 i, j, -r);
197 }
198 assert(r >= 0);
199 }
200 }
201 /*
202 * The transaction expects the ioeventfds to be open when it
203 * commits. Do it now, before the cleanup loop.
204 */
205 memory_region_transaction_commit();
206
207 for (i = 0; i < nvhosts; i++) {
208 if (i < data_queue_pairs) {
209 peer = qemu_get_peer(ncs, i);
210 } else {
211 peer = qemu_get_peer(ncs, n->max_queue_pairs);
212 }
213
214 net = get_vhost_net(peer);
215 hdev = &net->dev;
216 for (j = 0; j < hdev->nvqs; j++) {
217 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus),
218 hdev->vq_index + j);
219 }
220 virtio_device_release_ioeventfd(dev);
221 }
222 }
223
vhost_net_enable_notifiers(VirtIODevice * dev,NetClientState * ncs,int data_queue_pairs,int cvq)224 static int vhost_net_enable_notifiers(VirtIODevice *dev,
225 NetClientState *ncs, int data_queue_pairs, int cvq)
226 {
227 VirtIONet *n = VIRTIO_NET(dev);
228 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
229 int nvhosts = data_queue_pairs + cvq;
230 struct vhost_net *net;
231 struct vhost_dev *hdev;
232 int r, i, j, k;
233 NetClientState *peer;
234
235 /*
236 * We will pass the notifiers to the kernel, make sure that QEMU
237 * doesn't interfere.
238 */
239 for (i = 0; i < nvhosts; i++) {
240 r = virtio_device_grab_ioeventfd(dev);
241 if (r < 0) {
242 error_report("vhost %d binding does not support host notifiers", i);
243 for (k = 0; k < i; k++) {
244 virtio_device_release_ioeventfd(dev);
245 }
246 return r;
247 }
248 }
249
250 /*
251 * Batch all the host notifiers in a single transaction to avoid
252 * quadratic time complexity in address_space_update_ioeventfds().
253 */
254 memory_region_transaction_begin();
255
256 for (i = 0; i < nvhosts; i++) {
257 if (i < data_queue_pairs) {
258 peer = qemu_get_peer(ncs, i);
259 } else {
260 peer = qemu_get_peer(ncs, n->max_queue_pairs);
261 }
262
263 net = get_vhost_net(peer);
264 hdev = &net->dev;
265
266 for (j = 0; j < hdev->nvqs; j++) {
267 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
268 hdev->vq_index + j,
269 true);
270 if (r < 0) {
271 error_report("vhost %d VQ %d notifier binding failed: %d",
272 i, j, -r);
273 memory_region_transaction_commit();
274 vhost_dev_disable_notifiers_nvqs(hdev, dev, j);
275 goto fail_nvhosts;
276 }
277 }
278 }
279
280 memory_region_transaction_commit();
281
282 return 0;
283 fail_nvhosts:
284 vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, i);
285 /*
286 * This for loop starts from i+1, not i, because the i-th ioeventfd
287 * has already been released in vhost_dev_disable_notifiers_nvqs().
288 */
289 for (k = i + 1; k < nvhosts; k++) {
290 virtio_device_release_ioeventfd(dev);
291 }
292
293 return r;
294 }
295
296 /*
297 * Stop processing guest IO notifications in qemu.
298 * Start processing them in vhost in kernel.
299 */
vhost_net_disable_notifiers(VirtIODevice * dev,NetClientState * ncs,int data_queue_pairs,int cvq)300 static void vhost_net_disable_notifiers(VirtIODevice *dev,
301 NetClientState *ncs, int data_queue_pairs, int cvq)
302 {
303 vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs,
304 data_queue_pairs + cvq);
305 }
306
vhost_net_get_fd(NetClientState * backend)307 static int vhost_net_get_fd(NetClientState *backend)
308 {
309 switch (backend->info->type) {
310 case NET_CLIENT_DRIVER_TAP:
311 return tap_get_fd(backend);
312 default:
313 fprintf(stderr, "vhost-net requires tap backend\n");
314 return -ENOSYS;
315 }
316 }
317
vhost_net_init(VhostNetOptions * options)318 struct vhost_net *vhost_net_init(VhostNetOptions *options)
319 {
320 int r;
321 bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL;
322 struct vhost_net *net = g_new0(struct vhost_net, 1);
323 uint64_t features = 0;
324 Error *local_err = NULL;
325
326 if (!options->net_backend) {
327 fprintf(stderr, "vhost-net requires net backend to be setup\n");
328 goto fail;
329 }
330 net->nc = options->net_backend;
331 net->dev.nvqs = options->nvqs;
332
333 net->dev.max_queues = 1;
334 net->dev.vqs = net->vqs;
335
336 if (backend_kernel) {
337 r = vhost_net_get_fd(options->net_backend);
338 if (r < 0) {
339 goto fail;
340 }
341 net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend)
342 ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR);
343 net->backend = r;
344 net->dev.protocol_features = 0;
345 } else {
346 net->dev.backend_features = 0;
347 net->dev.protocol_features = 0;
348 net->backend = -1;
349
350 /* vhost-user needs vq_index to initiate a specific queue pair */
351 net->dev.vq_index = net->nc->queue_index * net->dev.nvqs;
352 }
353
354 r = vhost_dev_init(&net->dev, options->opaque,
355 options->backend_type, options->busyloop_timeout,
356 &local_err);
357 if (r < 0) {
358 error_report_err(local_err);
359 goto fail;
360 }
361 if (backend_kernel) {
362 if (!qemu_has_vnet_hdr_len(options->net_backend,
363 sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
364 net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF);
365 }
366 if (~net->dev.features & net->dev.backend_features) {
367 fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64
368 " for backend\n",
369 (uint64_t)(~net->dev.features & net->dev.backend_features));
370 goto fail;
371 }
372 }
373
374 /* Set sane init value. Override when guest acks. */
375 #ifdef CONFIG_VHOST_NET_USER
376 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
377 features = vhost_user_get_acked_features(net->nc);
378 if (~net->dev.features & features) {
379 fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64
380 " for backend\n",
381 (uint64_t)(~net->dev.features & features));
382 goto fail;
383 }
384 }
385 #endif
386
387 vhost_net_ack_features(net, features);
388
389 return net;
390
391 fail:
392 vhost_dev_cleanup(&net->dev);
393 g_free(net);
394 return NULL;
395 }
396
vhost_net_set_vq_index(struct vhost_net * net,int vq_index,int vq_index_end)397 static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index,
398 int vq_index_end)
399 {
400 net->dev.vq_index = vq_index;
401 net->dev.vq_index_end = vq_index_end;
402 }
403
vhost_net_start_one(struct vhost_net * net,VirtIODevice * dev)404 static int vhost_net_start_one(struct vhost_net *net,
405 VirtIODevice *dev)
406 {
407 struct vhost_vring_file file = { };
408 int r;
409
410 if (net->nc->info->start) {
411 r = net->nc->info->start(net->nc);
412 if (r < 0) {
413 return r;
414 }
415 }
416
417 r = vhost_dev_start(&net->dev, dev, false);
418 if (r < 0) {
419 goto fail_start;
420 }
421
422 if (net->nc->info->poll) {
423 net->nc->info->poll(net->nc, false);
424 }
425
426 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
427 qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
428 file.fd = net->backend;
429 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
430 if (!virtio_queue_enabled(dev, net->dev.vq_index +
431 file.index)) {
432 /* Queue might not be ready for start */
433 continue;
434 }
435 r = vhost_net_set_backend(&net->dev, &file);
436 if (r < 0) {
437 r = -errno;
438 goto fail;
439 }
440 }
441 }
442
443 if (net->nc->info->load) {
444 r = net->nc->info->load(net->nc);
445 if (r < 0) {
446 goto fail;
447 }
448 }
449 return 0;
450 fail:
451 file.fd = -1;
452 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
453 while (file.index-- > 0) {
454 if (!virtio_queue_enabled(dev, net->dev.vq_index +
455 file.index)) {
456 /* Queue might not be ready for start */
457 continue;
458 }
459 int ret = vhost_net_set_backend(&net->dev, &file);
460 assert(ret >= 0);
461 }
462 }
463 if (net->nc->info->poll) {
464 net->nc->info->poll(net->nc, true);
465 }
466 vhost_dev_stop(&net->dev, dev, false);
467 fail_start:
468 return r;
469 }
470
vhost_net_stop_one(struct vhost_net * net,VirtIODevice * dev)471 static void vhost_net_stop_one(struct vhost_net *net,
472 VirtIODevice *dev)
473 {
474 struct vhost_vring_file file = { .fd = -1 };
475
476 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
477 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
478 int r = vhost_net_set_backend(&net->dev, &file);
479 assert(r >= 0);
480 }
481 }
482 if (net->nc->info->poll) {
483 net->nc->info->poll(net->nc, true);
484 }
485 vhost_dev_stop(&net->dev, dev, false);
486 if (net->nc->info->stop) {
487 net->nc->info->stop(net->nc);
488 }
489 }
490
vhost_net_start(VirtIODevice * dev,NetClientState * ncs,int data_queue_pairs,int cvq)491 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
492 int data_queue_pairs, int cvq)
493 {
494 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
495 VirtioBusState *vbus = VIRTIO_BUS(qbus);
496 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
497 int total_notifiers = data_queue_pairs * 2 + cvq;
498 VirtIONet *n = VIRTIO_NET(dev);
499 int nvhosts = data_queue_pairs + cvq;
500 struct vhost_net *net;
501 int r, e, i, index_end = data_queue_pairs * 2;
502 NetClientState *peer;
503
504 if (cvq) {
505 index_end += 1;
506 }
507
508 if (!k->set_guest_notifiers) {
509 error_report("binding does not support guest notifiers");
510 return -ENOSYS;
511 }
512
513 for (i = 0; i < nvhosts; i++) {
514
515 if (i < data_queue_pairs) {
516 peer = qemu_get_peer(ncs, i);
517 } else { /* Control Virtqueue */
518 peer = qemu_get_peer(ncs, n->max_queue_pairs);
519 }
520
521 net = get_vhost_net(peer);
522 vhost_net_set_vq_index(net, i * 2, index_end);
523
524 /* Suppress the masking guest notifiers on vhost user
525 * because vhost user doesn't interrupt masking/unmasking
526 * properly.
527 */
528 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
529 dev->use_guest_notifier_mask = false;
530 }
531 }
532
533 r = vhost_net_enable_notifiers(dev, ncs, data_queue_pairs, cvq);
534 if (r < 0) {
535 error_report("Error enabling host notifiers: %d", -r);
536 goto err;
537 }
538
539 r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
540 if (r < 0) {
541 error_report("Error binding guest notifier: %d", -r);
542 goto err_host_notifiers;
543 }
544
545 for (i = 0; i < nvhosts; i++) {
546 if (i < data_queue_pairs) {
547 peer = qemu_get_peer(ncs, i);
548 } else {
549 peer = qemu_get_peer(ncs, n->max_queue_pairs);
550 }
551
552 if (peer->vring_enable) {
553 /* restore vring enable state */
554 r = vhost_set_vring_enable(peer, peer->vring_enable);
555
556 if (r < 0) {
557 goto err_guest_notifiers;
558 }
559 }
560
561 r = vhost_net_start_one(get_vhost_net(peer), dev);
562 if (r < 0) {
563 goto err_guest_notifiers;
564 }
565 }
566
567 return 0;
568
569 err_guest_notifiers:
570 while (--i >= 0) {
571 peer = qemu_get_peer(ncs, i < data_queue_pairs ?
572 i : n->max_queue_pairs);
573 vhost_net_stop_one(get_vhost_net(peer), dev);
574 }
575 e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
576 if (e < 0) {
577 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
578 fflush(stderr);
579 }
580 err_host_notifiers:
581 vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
582 err:
583 return r;
584 }
585
vhost_net_stop(VirtIODevice * dev,NetClientState * ncs,int data_queue_pairs,int cvq)586 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
587 int data_queue_pairs, int cvq)
588 {
589 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
590 VirtioBusState *vbus = VIRTIO_BUS(qbus);
591 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
592 VirtIONet *n = VIRTIO_NET(dev);
593 NetClientState *peer;
594 int total_notifiers = data_queue_pairs * 2 + cvq;
595 int nvhosts = data_queue_pairs + cvq;
596 int i, r;
597
598 for (i = 0; i < nvhosts; i++) {
599 if (i < data_queue_pairs) {
600 peer = qemu_get_peer(ncs, i);
601 } else {
602 peer = qemu_get_peer(ncs, n->max_queue_pairs);
603 }
604 vhost_net_stop_one(get_vhost_net(peer), dev);
605 }
606
607 r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
608 if (r < 0) {
609 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
610 fflush(stderr);
611 }
612 assert(r >= 0);
613
614 vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
615 }
616
vhost_net_cleanup(struct vhost_net * net)617 void vhost_net_cleanup(struct vhost_net *net)
618 {
619 vhost_dev_cleanup(&net->dev);
620 }
621
vhost_net_notify_migration_done(struct vhost_net * net,char * mac_addr)622 int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr)
623 {
624 const VhostOps *vhost_ops = net->dev.vhost_ops;
625
626 assert(vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
627 assert(vhost_ops->vhost_migration_done);
628
629 return vhost_ops->vhost_migration_done(&net->dev, mac_addr);
630 }
631
vhost_net_virtqueue_pending(VHostNetState * net,int idx)632 bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
633 {
634 return vhost_virtqueue_pending(&net->dev, idx);
635 }
636
vhost_net_virtqueue_mask(VHostNetState * net,VirtIODevice * dev,int idx,bool mask)637 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
638 int idx, bool mask)
639 {
640 vhost_virtqueue_mask(&net->dev, dev, idx, mask);
641 }
642
vhost_net_config_pending(VHostNetState * net)643 bool vhost_net_config_pending(VHostNetState *net)
644 {
645 return vhost_config_pending(&net->dev);
646 }
647
vhost_net_config_mask(VHostNetState * net,VirtIODevice * dev,bool mask)648 void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask)
649 {
650 vhost_config_mask(&net->dev, dev, mask);
651 }
get_vhost_net(NetClientState * nc)652 VHostNetState *get_vhost_net(NetClientState *nc)
653 {
654 VHostNetState *vhost_net = 0;
655
656 if (!nc) {
657 return 0;
658 }
659
660 switch (nc->info->type) {
661 case NET_CLIENT_DRIVER_TAP:
662 vhost_net = tap_get_vhost_net(nc);
663 /*
664 * tap_get_vhost_net() can return NULL if a tap net-device backend is
665 * created with 'vhost=off' option, 'vhostforce=off' or no vhost or
666 * vhostforce or vhostfd options at all. Please see net_init_tap_one().
667 * Hence, we omit the assertion here.
668 */
669 break;
670 #ifdef CONFIG_VHOST_NET_USER
671 case NET_CLIENT_DRIVER_VHOST_USER:
672 vhost_net = vhost_user_get_vhost_net(nc);
673 assert(vhost_net);
674 break;
675 #endif
676 #ifdef CONFIG_VHOST_NET_VDPA
677 case NET_CLIENT_DRIVER_VHOST_VDPA:
678 vhost_net = vhost_vdpa_get_vhost_net(nc);
679 assert(vhost_net);
680 break;
681 #endif
682 default:
683 break;
684 }
685
686 return vhost_net;
687 }
688
vhost_set_vring_enable(NetClientState * nc,int enable)689 int vhost_set_vring_enable(NetClientState *nc, int enable)
690 {
691 VHostNetState *net = get_vhost_net(nc);
692 const VhostOps *vhost_ops = net->dev.vhost_ops;
693
694 /*
695 * vhost-vdpa network devices need to enable dataplane virtqueues after
696 * DRIVER_OK, so they can recover device state before starting dataplane.
697 * Because of that, we don't enable virtqueues here and leave it to
698 * net/vhost-vdpa.c.
699 */
700 if (nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
701 return 0;
702 }
703
704 nc->vring_enable = enable;
705
706 if (vhost_ops && vhost_ops->vhost_set_vring_enable) {
707 return vhost_ops->vhost_set_vring_enable(&net->dev, enable);
708 }
709
710 return 0;
711 }
712
vhost_net_set_mtu(struct vhost_net * net,uint16_t mtu)713 int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
714 {
715 const VhostOps *vhost_ops = net->dev.vhost_ops;
716
717 if (!vhost_ops->vhost_net_set_mtu) {
718 return 0;
719 }
720
721 return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
722 }
723
vhost_net_virtqueue_reset(VirtIODevice * vdev,NetClientState * nc,int vq_index)724 void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
725 int vq_index)
726 {
727 VHostNetState *net = get_vhost_net(nc->peer);
728 const VhostOps *vhost_ops = net->dev.vhost_ops;
729 struct vhost_vring_file file = { .fd = -1 };
730 int idx;
731
732 /* should only be called after backend is connected */
733 assert(vhost_ops);
734
735 idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
736
737 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
738 file.index = idx;
739 int r = vhost_net_set_backend(&net->dev, &file);
740 assert(r >= 0);
741 }
742
743 vhost_virtqueue_stop(&net->dev,
744 vdev,
745 net->dev.vqs + idx,
746 net->dev.vq_index + idx);
747 }
748
vhost_net_virtqueue_restart(VirtIODevice * vdev,NetClientState * nc,int vq_index)749 int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
750 int vq_index)
751 {
752 VHostNetState *net = get_vhost_net(nc->peer);
753 const VhostOps *vhost_ops = net->dev.vhost_ops;
754 struct vhost_vring_file file = { };
755 int idx, r;
756
757 if (!net->dev.started) {
758 return -EBUSY;
759 }
760
761 /* should only be called after backend is connected */
762 assert(vhost_ops);
763
764 idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
765
766 r = vhost_virtqueue_start(&net->dev,
767 vdev,
768 net->dev.vqs + idx,
769 net->dev.vq_index + idx);
770 if (r < 0) {
771 goto err_start;
772 }
773
774 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
775 file.index = idx;
776 file.fd = net->backend;
777 r = vhost_net_set_backend(&net->dev, &file);
778 if (r < 0) {
779 r = -errno;
780 goto err_start;
781 }
782 }
783
784 return 0;
785
786 err_start:
787 error_report("Error when restarting the queue.");
788
789 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
790 file.fd = VHOST_FILE_UNBIND;
791 file.index = idx;
792 int ret = vhost_net_set_backend(&net->dev, &file);
793 assert(ret >= 0);
794 }
795
796 vhost_dev_stop(&net->dev, vdev, false);
797
798 return r;
799 }
800