1 /* 2 * vhost-net support 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "net/net.h" 18 #include "net/tap.h" 19 #include "net/vhost-user.h" 20 #include "net/vhost-vdpa.h" 21 22 #include "standard-headers/linux/vhost_types.h" 23 #include "hw/virtio/virtio-net.h" 24 #include "net/vhost_net.h" 25 #include "qapi/error.h" 26 #include "qemu/error-report.h" 27 #include "qemu/main-loop.h" 28 29 #include <sys/socket.h> 30 #include <net/if.h> 31 #include <netinet/in.h> 32 33 34 #include "standard-headers/linux/virtio_ring.h" 35 #include "hw/virtio/vhost.h" 36 #include "hw/virtio/virtio-bus.h" 37 38 39 /* Features supported by host kernel. */ 40 static const int kernel_feature_bits[] = { 41 VIRTIO_F_NOTIFY_ON_EMPTY, 42 VIRTIO_RING_F_INDIRECT_DESC, 43 VIRTIO_RING_F_EVENT_IDX, 44 VIRTIO_NET_F_MRG_RXBUF, 45 VIRTIO_F_VERSION_1, 46 VIRTIO_NET_F_MTU, 47 VIRTIO_F_IOMMU_PLATFORM, 48 VIRTIO_F_RING_PACKED, 49 VIRTIO_NET_F_HASH_REPORT, 50 VHOST_INVALID_FEATURE_BIT 51 }; 52 53 /* Features supported by others. */ 54 static const int user_feature_bits[] = { 55 VIRTIO_F_NOTIFY_ON_EMPTY, 56 VIRTIO_RING_F_INDIRECT_DESC, 57 VIRTIO_RING_F_EVENT_IDX, 58 59 VIRTIO_F_ANY_LAYOUT, 60 VIRTIO_F_VERSION_1, 61 VIRTIO_NET_F_CSUM, 62 VIRTIO_NET_F_GUEST_CSUM, 63 VIRTIO_NET_F_GSO, 64 VIRTIO_NET_F_GUEST_TSO4, 65 VIRTIO_NET_F_GUEST_TSO6, 66 VIRTIO_NET_F_GUEST_ECN, 67 VIRTIO_NET_F_GUEST_UFO, 68 VIRTIO_NET_F_HOST_TSO4, 69 VIRTIO_NET_F_HOST_TSO6, 70 VIRTIO_NET_F_HOST_ECN, 71 VIRTIO_NET_F_HOST_UFO, 72 VIRTIO_NET_F_MRG_RXBUF, 73 VIRTIO_NET_F_MTU, 74 VIRTIO_F_IOMMU_PLATFORM, 75 VIRTIO_F_RING_PACKED, 76 VIRTIO_NET_F_RSS, 77 VIRTIO_NET_F_HASH_REPORT, 78 79 /* This bit implies RARP isn't sent by QEMU out of band */ 80 VIRTIO_NET_F_GUEST_ANNOUNCE, 81 82 VIRTIO_NET_F_MQ, 83 84 VHOST_INVALID_FEATURE_BIT 85 }; 86 87 static const int *vhost_net_get_feature_bits(struct vhost_net *net) 88 { 89 const int *feature_bits = 0; 90 91 switch (net->nc->info->type) { 92 case NET_CLIENT_DRIVER_TAP: 93 feature_bits = kernel_feature_bits; 94 break; 95 case NET_CLIENT_DRIVER_VHOST_USER: 96 feature_bits = user_feature_bits; 97 break; 98 #ifdef CONFIG_VHOST_NET_VDPA 99 case NET_CLIENT_DRIVER_VHOST_VDPA: 100 feature_bits = vdpa_feature_bits; 101 break; 102 #endif 103 default: 104 error_report("Feature bits not defined for this type: %d", 105 net->nc->info->type); 106 break; 107 } 108 109 return feature_bits; 110 } 111 112 uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features) 113 { 114 return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net), 115 features); 116 } 117 int vhost_net_get_config(struct vhost_net *net, uint8_t *config, 118 uint32_t config_len) 119 { 120 return vhost_dev_get_config(&net->dev, config, config_len, NULL); 121 } 122 int vhost_net_set_config(struct vhost_net *net, const uint8_t *data, 123 uint32_t offset, uint32_t size, uint32_t flags) 124 { 125 return vhost_dev_set_config(&net->dev, data, offset, size, flags); 126 } 127 128 void vhost_net_ack_features(struct vhost_net *net, uint64_t features) 129 { 130 net->dev.acked_features = net->dev.backend_features; 131 vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features); 132 } 133 134 uint64_t vhost_net_get_max_queues(VHostNetState *net) 135 { 136 return net->dev.max_queues; 137 } 138 139 uint64_t vhost_net_get_acked_features(VHostNetState *net) 140 { 141 return net->dev.acked_features; 142 } 143 144 static int vhost_net_get_fd(NetClientState *backend) 145 { 146 switch (backend->info->type) { 147 case NET_CLIENT_DRIVER_TAP: 148 return tap_get_fd(backend); 149 default: 150 fprintf(stderr, "vhost-net requires tap backend\n"); 151 return -ENOSYS; 152 } 153 } 154 155 struct vhost_net *vhost_net_init(VhostNetOptions *options) 156 { 157 int r; 158 bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL; 159 struct vhost_net *net = g_new0(struct vhost_net, 1); 160 uint64_t features = 0; 161 Error *local_err = NULL; 162 163 if (!options->net_backend) { 164 fprintf(stderr, "vhost-net requires net backend to be setup\n"); 165 goto fail; 166 } 167 net->nc = options->net_backend; 168 net->dev.nvqs = options->nvqs; 169 170 net->dev.max_queues = 1; 171 net->dev.vqs = net->vqs; 172 173 if (backend_kernel) { 174 r = vhost_net_get_fd(options->net_backend); 175 if (r < 0) { 176 goto fail; 177 } 178 net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend) 179 ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR); 180 net->backend = r; 181 net->dev.protocol_features = 0; 182 } else { 183 net->dev.backend_features = 0; 184 net->dev.protocol_features = 0; 185 net->backend = -1; 186 187 /* vhost-user needs vq_index to initiate a specific queue pair */ 188 net->dev.vq_index = net->nc->queue_index * net->dev.nvqs; 189 } 190 191 r = vhost_dev_init(&net->dev, options->opaque, 192 options->backend_type, options->busyloop_timeout, 193 &local_err); 194 if (r < 0) { 195 error_report_err(local_err); 196 goto fail; 197 } 198 if (backend_kernel) { 199 if (!qemu_has_vnet_hdr_len(options->net_backend, 200 sizeof(struct virtio_net_hdr_mrg_rxbuf))) { 201 net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF); 202 } 203 if (~net->dev.features & net->dev.backend_features) { 204 fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64 205 " for backend\n", 206 (uint64_t)(~net->dev.features & net->dev.backend_features)); 207 goto fail; 208 } 209 } 210 211 /* Set sane init value. Override when guest acks. */ 212 #ifdef CONFIG_VHOST_NET_USER 213 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 214 features = vhost_user_get_acked_features(net->nc); 215 if (~net->dev.features & features) { 216 fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64 217 " for backend\n", 218 (uint64_t)(~net->dev.features & features)); 219 goto fail; 220 } 221 } 222 #endif 223 224 vhost_net_ack_features(net, features); 225 226 return net; 227 228 fail: 229 vhost_dev_cleanup(&net->dev); 230 g_free(net); 231 return NULL; 232 } 233 234 static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index, 235 int vq_index_end) 236 { 237 net->dev.vq_index = vq_index; 238 net->dev.vq_index_end = vq_index_end; 239 } 240 241 static int vhost_net_start_one(struct vhost_net *net, 242 VirtIODevice *dev) 243 { 244 struct vhost_vring_file file = { }; 245 int r; 246 247 if (net->nc->info->start) { 248 r = net->nc->info->start(net->nc); 249 if (r < 0) { 250 return r; 251 } 252 } 253 254 r = vhost_dev_enable_notifiers(&net->dev, dev); 255 if (r < 0) { 256 goto fail_notifiers; 257 } 258 259 r = vhost_dev_start(&net->dev, dev); 260 if (r < 0) { 261 goto fail_start; 262 } 263 264 if (net->nc->info->poll) { 265 net->nc->info->poll(net->nc, false); 266 } 267 268 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { 269 qemu_set_fd_handler(net->backend, NULL, NULL, NULL); 270 file.fd = net->backend; 271 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { 272 if (!virtio_queue_enabled(dev, net->dev.vq_index + 273 file.index)) { 274 /* Queue might not be ready for start */ 275 continue; 276 } 277 r = vhost_net_set_backend(&net->dev, &file); 278 if (r < 0) { 279 r = -errno; 280 goto fail; 281 } 282 } 283 } 284 return 0; 285 fail: 286 file.fd = -1; 287 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { 288 while (file.index-- > 0) { 289 if (!virtio_queue_enabled(dev, net->dev.vq_index + 290 file.index)) { 291 /* Queue might not be ready for start */ 292 continue; 293 } 294 int r = vhost_net_set_backend(&net->dev, &file); 295 assert(r >= 0); 296 } 297 } 298 if (net->nc->info->poll) { 299 net->nc->info->poll(net->nc, true); 300 } 301 vhost_dev_stop(&net->dev, dev); 302 fail_start: 303 vhost_dev_disable_notifiers(&net->dev, dev); 304 fail_notifiers: 305 return r; 306 } 307 308 static void vhost_net_stop_one(struct vhost_net *net, 309 VirtIODevice *dev) 310 { 311 struct vhost_vring_file file = { .fd = -1 }; 312 313 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { 314 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { 315 int r = vhost_net_set_backend(&net->dev, &file); 316 assert(r >= 0); 317 } 318 } 319 if (net->nc->info->poll) { 320 net->nc->info->poll(net->nc, true); 321 } 322 vhost_dev_stop(&net->dev, dev); 323 if (net->nc->info->stop) { 324 net->nc->info->stop(net->nc); 325 } 326 vhost_dev_disable_notifiers(&net->dev, dev); 327 } 328 329 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, 330 int data_queue_pairs, int cvq) 331 { 332 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); 333 VirtioBusState *vbus = VIRTIO_BUS(qbus); 334 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 335 int total_notifiers = data_queue_pairs * 2 + cvq; 336 VirtIONet *n = VIRTIO_NET(dev); 337 int nvhosts = data_queue_pairs + cvq; 338 struct vhost_net *net; 339 int r, e, i, index_end = data_queue_pairs * 2; 340 NetClientState *peer; 341 342 if (cvq) { 343 index_end += 1; 344 } 345 346 if (!k->set_guest_notifiers) { 347 error_report("binding does not support guest notifiers"); 348 return -ENOSYS; 349 } 350 351 for (i = 0; i < nvhosts; i++) { 352 353 if (i < data_queue_pairs) { 354 peer = qemu_get_peer(ncs, i); 355 } else { /* Control Virtqueue */ 356 peer = qemu_get_peer(ncs, n->max_queue_pairs); 357 } 358 359 net = get_vhost_net(peer); 360 vhost_net_set_vq_index(net, i * 2, index_end); 361 362 /* Suppress the masking guest notifiers on vhost user 363 * because vhost user doesn't interrupt masking/unmasking 364 * properly. 365 */ 366 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { 367 dev->use_guest_notifier_mask = false; 368 } 369 } 370 371 r = k->set_guest_notifiers(qbus->parent, total_notifiers, true); 372 if (r < 0) { 373 error_report("Error binding guest notifier: %d", -r); 374 goto err; 375 } 376 377 for (i = 0; i < nvhosts; i++) { 378 if (i < data_queue_pairs) { 379 peer = qemu_get_peer(ncs, i); 380 } else { 381 peer = qemu_get_peer(ncs, n->max_queue_pairs); 382 } 383 r = vhost_net_start_one(get_vhost_net(peer), dev); 384 385 if (r < 0) { 386 goto err_start; 387 } 388 389 if (peer->vring_enable) { 390 /* restore vring enable state */ 391 r = vhost_set_vring_enable(peer, peer->vring_enable); 392 393 if (r < 0) { 394 vhost_net_stop_one(get_vhost_net(peer), dev); 395 goto err_start; 396 } 397 } 398 } 399 400 return 0; 401 402 err_start: 403 while (--i >= 0) { 404 peer = qemu_get_peer(ncs, i < data_queue_pairs ? 405 i : n->max_queue_pairs); 406 vhost_net_stop_one(get_vhost_net(peer), dev); 407 } 408 e = k->set_guest_notifiers(qbus->parent, total_notifiers, false); 409 if (e < 0) { 410 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e); 411 fflush(stderr); 412 } 413 err: 414 return r; 415 } 416 417 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, 418 int data_queue_pairs, int cvq) 419 { 420 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); 421 VirtioBusState *vbus = VIRTIO_BUS(qbus); 422 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 423 VirtIONet *n = VIRTIO_NET(dev); 424 NetClientState *peer; 425 int total_notifiers = data_queue_pairs * 2 + cvq; 426 int nvhosts = data_queue_pairs + cvq; 427 int i, r; 428 429 for (i = 0; i < nvhosts; i++) { 430 if (i < data_queue_pairs) { 431 peer = qemu_get_peer(ncs, i); 432 } else { 433 peer = qemu_get_peer(ncs, n->max_queue_pairs); 434 } 435 vhost_net_stop_one(get_vhost_net(peer), dev); 436 } 437 438 r = k->set_guest_notifiers(qbus->parent, total_notifiers, false); 439 if (r < 0) { 440 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); 441 fflush(stderr); 442 } 443 assert(r >= 0); 444 } 445 446 void vhost_net_cleanup(struct vhost_net *net) 447 { 448 vhost_dev_cleanup(&net->dev); 449 } 450 451 int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr) 452 { 453 const VhostOps *vhost_ops = net->dev.vhost_ops; 454 455 assert(vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 456 assert(vhost_ops->vhost_migration_done); 457 458 return vhost_ops->vhost_migration_done(&net->dev, mac_addr); 459 } 460 461 bool vhost_net_virtqueue_pending(VHostNetState *net, int idx) 462 { 463 return vhost_virtqueue_pending(&net->dev, idx); 464 } 465 466 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev, 467 int idx, bool mask) 468 { 469 vhost_virtqueue_mask(&net->dev, dev, idx, mask); 470 } 471 472 VHostNetState *get_vhost_net(NetClientState *nc) 473 { 474 VHostNetState *vhost_net = 0; 475 476 if (!nc) { 477 return 0; 478 } 479 480 switch (nc->info->type) { 481 case NET_CLIENT_DRIVER_TAP: 482 vhost_net = tap_get_vhost_net(nc); 483 break; 484 #ifdef CONFIG_VHOST_NET_USER 485 case NET_CLIENT_DRIVER_VHOST_USER: 486 vhost_net = vhost_user_get_vhost_net(nc); 487 assert(vhost_net); 488 break; 489 #endif 490 #ifdef CONFIG_VHOST_NET_VDPA 491 case NET_CLIENT_DRIVER_VHOST_VDPA: 492 vhost_net = vhost_vdpa_get_vhost_net(nc); 493 assert(vhost_net); 494 break; 495 #endif 496 default: 497 break; 498 } 499 500 return vhost_net; 501 } 502 503 int vhost_set_vring_enable(NetClientState *nc, int enable) 504 { 505 VHostNetState *net = get_vhost_net(nc); 506 const VhostOps *vhost_ops = net->dev.vhost_ops; 507 508 nc->vring_enable = enable; 509 510 if (vhost_ops && vhost_ops->vhost_set_vring_enable) { 511 return vhost_ops->vhost_set_vring_enable(&net->dev, enable); 512 } 513 514 return 0; 515 } 516 517 int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu) 518 { 519 const VhostOps *vhost_ops = net->dev.vhost_ops; 520 521 if (!vhost_ops->vhost_net_set_mtu) { 522 return 0; 523 } 524 525 return vhost_ops->vhost_net_set_mtu(&net->dev, mtu); 526 } 527