1 /* 2 * vhost-user 3 * 4 * Copyright (c) 2013 Virtual Open Systems Sarl. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 * 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qapi/error.h" 13 #include "hw/virtio/vhost.h" 14 #include "hw/virtio/vhost-backend.h" 15 #include "hw/virtio/virtio-net.h" 16 #include "chardev/char-fe.h" 17 #include "sysemu/kvm.h" 18 #include "qemu/error-report.h" 19 #include "qemu/sockets.h" 20 21 #include <sys/ioctl.h> 22 #include <sys/socket.h> 23 #include <sys/un.h> 24 #include <linux/vhost.h> 25 26 #define VHOST_MEMORY_MAX_NREGIONS 8 27 #define VHOST_USER_F_PROTOCOL_FEATURES 30 28 29 enum VhostUserProtocolFeature { 30 VHOST_USER_PROTOCOL_F_MQ = 0, 31 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, 32 VHOST_USER_PROTOCOL_F_RARP = 2, 33 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, 34 VHOST_USER_PROTOCOL_F_NET_MTU = 4, 35 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, 36 37 VHOST_USER_PROTOCOL_F_MAX 38 }; 39 40 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1) 41 42 typedef enum VhostUserRequest { 43 VHOST_USER_NONE = 0, 44 VHOST_USER_GET_FEATURES = 1, 45 VHOST_USER_SET_FEATURES = 2, 46 VHOST_USER_SET_OWNER = 3, 47 VHOST_USER_RESET_OWNER = 4, 48 VHOST_USER_SET_MEM_TABLE = 5, 49 VHOST_USER_SET_LOG_BASE = 6, 50 VHOST_USER_SET_LOG_FD = 7, 51 VHOST_USER_SET_VRING_NUM = 8, 52 VHOST_USER_SET_VRING_ADDR = 9, 53 VHOST_USER_SET_VRING_BASE = 10, 54 VHOST_USER_GET_VRING_BASE = 11, 55 VHOST_USER_SET_VRING_KICK = 12, 56 VHOST_USER_SET_VRING_CALL = 13, 57 VHOST_USER_SET_VRING_ERR = 14, 58 VHOST_USER_GET_PROTOCOL_FEATURES = 15, 59 VHOST_USER_SET_PROTOCOL_FEATURES = 16, 60 VHOST_USER_GET_QUEUE_NUM = 17, 61 VHOST_USER_SET_VRING_ENABLE = 18, 62 VHOST_USER_SEND_RARP = 19, 63 VHOST_USER_NET_SET_MTU = 20, 64 VHOST_USER_SET_SLAVE_REQ_FD = 21, 65 VHOST_USER_IOTLB_MSG = 22, 66 VHOST_USER_MAX 67 } VhostUserRequest; 68 69 typedef enum VhostUserSlaveRequest { 70 VHOST_USER_SLAVE_NONE = 0, 71 VHOST_USER_SLAVE_IOTLB_MSG = 1, 72 VHOST_USER_SLAVE_MAX 73 } VhostUserSlaveRequest; 74 75 typedef struct VhostUserMemoryRegion { 76 uint64_t guest_phys_addr; 77 uint64_t memory_size; 78 uint64_t userspace_addr; 79 uint64_t mmap_offset; 80 } VhostUserMemoryRegion; 81 82 typedef struct VhostUserMemory { 83 uint32_t nregions; 84 uint32_t padding; 85 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS]; 86 } VhostUserMemory; 87 88 typedef struct VhostUserLog { 89 uint64_t mmap_size; 90 uint64_t mmap_offset; 91 } VhostUserLog; 92 93 typedef struct VhostUserMsg { 94 VhostUserRequest request; 95 96 #define VHOST_USER_VERSION_MASK (0x3) 97 #define VHOST_USER_REPLY_MASK (0x1<<2) 98 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) 99 uint32_t flags; 100 uint32_t size; /* the following payload size */ 101 union { 102 #define VHOST_USER_VRING_IDX_MASK (0xff) 103 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8) 104 uint64_t u64; 105 struct vhost_vring_state state; 106 struct vhost_vring_addr addr; 107 VhostUserMemory memory; 108 VhostUserLog log; 109 struct vhost_iotlb_msg iotlb; 110 } payload; 111 } QEMU_PACKED VhostUserMsg; 112 113 static VhostUserMsg m __attribute__ ((unused)); 114 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \ 115 + sizeof(m.flags) \ 116 + sizeof(m.size)) 117 118 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE) 119 120 /* The version of the protocol we support */ 121 #define VHOST_USER_VERSION (0x1) 122 123 struct vhost_user { 124 CharBackend *chr; 125 int slave_fd; 126 }; 127 128 static bool ioeventfd_enabled(void) 129 { 130 return kvm_enabled() && kvm_eventfds_enabled(); 131 } 132 133 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) 134 { 135 struct vhost_user *u = dev->opaque; 136 CharBackend *chr = u->chr; 137 uint8_t *p = (uint8_t *) msg; 138 int r, size = VHOST_USER_HDR_SIZE; 139 140 r = qemu_chr_fe_read_all(chr, p, size); 141 if (r != size) { 142 error_report("Failed to read msg header. Read %d instead of %d." 143 " Original request %d.", r, size, msg->request); 144 goto fail; 145 } 146 147 /* validate received flags */ 148 if (msg->flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { 149 error_report("Failed to read msg header." 150 " Flags 0x%x instead of 0x%x.", msg->flags, 151 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); 152 goto fail; 153 } 154 155 /* validate message size is sane */ 156 if (msg->size > VHOST_USER_PAYLOAD_SIZE) { 157 error_report("Failed to read msg header." 158 " Size %d exceeds the maximum %zu.", msg->size, 159 VHOST_USER_PAYLOAD_SIZE); 160 goto fail; 161 } 162 163 if (msg->size) { 164 p += VHOST_USER_HDR_SIZE; 165 size = msg->size; 166 r = qemu_chr_fe_read_all(chr, p, size); 167 if (r != size) { 168 error_report("Failed to read msg payload." 169 " Read %d instead of %d.", r, msg->size); 170 goto fail; 171 } 172 } 173 174 return 0; 175 176 fail: 177 return -1; 178 } 179 180 static int process_message_reply(struct vhost_dev *dev, 181 const VhostUserMsg *msg) 182 { 183 VhostUserMsg msg_reply; 184 185 if ((msg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 186 return 0; 187 } 188 189 if (vhost_user_read(dev, &msg_reply) < 0) { 190 return -1; 191 } 192 193 if (msg_reply.request != msg->request) { 194 error_report("Received unexpected msg type." 195 "Expected %d received %d", 196 msg->request, msg_reply.request); 197 return -1; 198 } 199 200 return msg_reply.payload.u64 ? -1 : 0; 201 } 202 203 static bool vhost_user_one_time_request(VhostUserRequest request) 204 { 205 switch (request) { 206 case VHOST_USER_SET_OWNER: 207 case VHOST_USER_RESET_OWNER: 208 case VHOST_USER_SET_MEM_TABLE: 209 case VHOST_USER_GET_QUEUE_NUM: 210 case VHOST_USER_NET_SET_MTU: 211 return true; 212 default: 213 return false; 214 } 215 } 216 217 /* most non-init callers ignore the error */ 218 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, 219 int *fds, int fd_num) 220 { 221 struct vhost_user *u = dev->opaque; 222 CharBackend *chr = u->chr; 223 int ret, size = VHOST_USER_HDR_SIZE + msg->size; 224 225 /* 226 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE, 227 * we just need send it once in the first time. For later such 228 * request, we just ignore it. 229 */ 230 if (vhost_user_one_time_request(msg->request) && dev->vq_index != 0) { 231 msg->flags &= ~VHOST_USER_NEED_REPLY_MASK; 232 return 0; 233 } 234 235 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) { 236 error_report("Failed to set msg fds."); 237 return -1; 238 } 239 240 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size); 241 if (ret != size) { 242 error_report("Failed to write msg." 243 " Wrote %d instead of %d.", ret, size); 244 return -1; 245 } 246 247 return 0; 248 } 249 250 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, 251 struct vhost_log *log) 252 { 253 int fds[VHOST_MEMORY_MAX_NREGIONS]; 254 size_t fd_num = 0; 255 bool shmfd = virtio_has_feature(dev->protocol_features, 256 VHOST_USER_PROTOCOL_F_LOG_SHMFD); 257 VhostUserMsg msg = { 258 .request = VHOST_USER_SET_LOG_BASE, 259 .flags = VHOST_USER_VERSION, 260 .payload.log.mmap_size = log->size * sizeof(*(log->log)), 261 .payload.log.mmap_offset = 0, 262 .size = sizeof(msg.payload.log), 263 }; 264 265 if (shmfd && log->fd != -1) { 266 fds[fd_num++] = log->fd; 267 } 268 269 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { 270 return -1; 271 } 272 273 if (shmfd) { 274 msg.size = 0; 275 if (vhost_user_read(dev, &msg) < 0) { 276 return -1; 277 } 278 279 if (msg.request != VHOST_USER_SET_LOG_BASE) { 280 error_report("Received unexpected msg type. " 281 "Expected %d received %d", 282 VHOST_USER_SET_LOG_BASE, msg.request); 283 return -1; 284 } 285 } 286 287 return 0; 288 } 289 290 static int vhost_user_set_mem_table(struct vhost_dev *dev, 291 struct vhost_memory *mem) 292 { 293 int fds[VHOST_MEMORY_MAX_NREGIONS]; 294 int i, fd; 295 size_t fd_num = 0; 296 bool reply_supported = virtio_has_feature(dev->protocol_features, 297 VHOST_USER_PROTOCOL_F_REPLY_ACK); 298 299 VhostUserMsg msg = { 300 .request = VHOST_USER_SET_MEM_TABLE, 301 .flags = VHOST_USER_VERSION, 302 }; 303 304 if (reply_supported) { 305 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 306 } 307 308 for (i = 0; i < dev->mem->nregions; ++i) { 309 struct vhost_memory_region *reg = dev->mem->regions + i; 310 ram_addr_t offset; 311 MemoryRegion *mr; 312 313 assert((uintptr_t)reg->userspace_addr == reg->userspace_addr); 314 mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr, 315 &offset); 316 fd = memory_region_get_fd(mr); 317 if (fd > 0) { 318 msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr; 319 msg.payload.memory.regions[fd_num].memory_size = reg->memory_size; 320 msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr; 321 msg.payload.memory.regions[fd_num].mmap_offset = offset; 322 assert(fd_num < VHOST_MEMORY_MAX_NREGIONS); 323 fds[fd_num++] = fd; 324 } 325 } 326 327 msg.payload.memory.nregions = fd_num; 328 329 if (!fd_num) { 330 error_report("Failed initializing vhost-user memory map, " 331 "consider using -object memory-backend-file share=on"); 332 return -1; 333 } 334 335 msg.size = sizeof(msg.payload.memory.nregions); 336 msg.size += sizeof(msg.payload.memory.padding); 337 msg.size += fd_num * sizeof(VhostUserMemoryRegion); 338 339 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { 340 return -1; 341 } 342 343 if (reply_supported) { 344 return process_message_reply(dev, &msg); 345 } 346 347 return 0; 348 } 349 350 static int vhost_user_set_vring_addr(struct vhost_dev *dev, 351 struct vhost_vring_addr *addr) 352 { 353 VhostUserMsg msg = { 354 .request = VHOST_USER_SET_VRING_ADDR, 355 .flags = VHOST_USER_VERSION, 356 .payload.addr = *addr, 357 .size = sizeof(msg.payload.addr), 358 }; 359 360 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 361 return -1; 362 } 363 364 return 0; 365 } 366 367 static int vhost_user_set_vring_endian(struct vhost_dev *dev, 368 struct vhost_vring_state *ring) 369 { 370 error_report("vhost-user trying to send unhandled ioctl"); 371 return -1; 372 } 373 374 static int vhost_set_vring(struct vhost_dev *dev, 375 unsigned long int request, 376 struct vhost_vring_state *ring) 377 { 378 VhostUserMsg msg = { 379 .request = request, 380 .flags = VHOST_USER_VERSION, 381 .payload.state = *ring, 382 .size = sizeof(msg.payload.state), 383 }; 384 385 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 386 return -1; 387 } 388 389 return 0; 390 } 391 392 static int vhost_user_set_vring_num(struct vhost_dev *dev, 393 struct vhost_vring_state *ring) 394 { 395 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); 396 } 397 398 static int vhost_user_set_vring_base(struct vhost_dev *dev, 399 struct vhost_vring_state *ring) 400 { 401 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring); 402 } 403 404 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) 405 { 406 int i; 407 408 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { 409 return -1; 410 } 411 412 for (i = 0; i < dev->nvqs; ++i) { 413 struct vhost_vring_state state = { 414 .index = dev->vq_index + i, 415 .num = enable, 416 }; 417 418 vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); 419 } 420 421 return 0; 422 } 423 424 static int vhost_user_get_vring_base(struct vhost_dev *dev, 425 struct vhost_vring_state *ring) 426 { 427 VhostUserMsg msg = { 428 .request = VHOST_USER_GET_VRING_BASE, 429 .flags = VHOST_USER_VERSION, 430 .payload.state = *ring, 431 .size = sizeof(msg.payload.state), 432 }; 433 434 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 435 return -1; 436 } 437 438 if (vhost_user_read(dev, &msg) < 0) { 439 return -1; 440 } 441 442 if (msg.request != VHOST_USER_GET_VRING_BASE) { 443 error_report("Received unexpected msg type. Expected %d received %d", 444 VHOST_USER_GET_VRING_BASE, msg.request); 445 return -1; 446 } 447 448 if (msg.size != sizeof(msg.payload.state)) { 449 error_report("Received bad msg size."); 450 return -1; 451 } 452 453 *ring = msg.payload.state; 454 455 return 0; 456 } 457 458 static int vhost_set_vring_file(struct vhost_dev *dev, 459 VhostUserRequest request, 460 struct vhost_vring_file *file) 461 { 462 int fds[VHOST_MEMORY_MAX_NREGIONS]; 463 size_t fd_num = 0; 464 VhostUserMsg msg = { 465 .request = request, 466 .flags = VHOST_USER_VERSION, 467 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, 468 .size = sizeof(msg.payload.u64), 469 }; 470 471 if (ioeventfd_enabled() && file->fd > 0) { 472 fds[fd_num++] = file->fd; 473 } else { 474 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; 475 } 476 477 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { 478 return -1; 479 } 480 481 return 0; 482 } 483 484 static int vhost_user_set_vring_kick(struct vhost_dev *dev, 485 struct vhost_vring_file *file) 486 { 487 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file); 488 } 489 490 static int vhost_user_set_vring_call(struct vhost_dev *dev, 491 struct vhost_vring_file *file) 492 { 493 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file); 494 } 495 496 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64) 497 { 498 VhostUserMsg msg = { 499 .request = request, 500 .flags = VHOST_USER_VERSION, 501 .payload.u64 = u64, 502 .size = sizeof(msg.payload.u64), 503 }; 504 505 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 506 return -1; 507 } 508 509 return 0; 510 } 511 512 static int vhost_user_set_features(struct vhost_dev *dev, 513 uint64_t features) 514 { 515 return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features); 516 } 517 518 static int vhost_user_set_protocol_features(struct vhost_dev *dev, 519 uint64_t features) 520 { 521 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features); 522 } 523 524 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) 525 { 526 VhostUserMsg msg = { 527 .request = request, 528 .flags = VHOST_USER_VERSION, 529 }; 530 531 if (vhost_user_one_time_request(request) && dev->vq_index != 0) { 532 return 0; 533 } 534 535 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 536 return -1; 537 } 538 539 if (vhost_user_read(dev, &msg) < 0) { 540 return -1; 541 } 542 543 if (msg.request != request) { 544 error_report("Received unexpected msg type. Expected %d received %d", 545 request, msg.request); 546 return -1; 547 } 548 549 if (msg.size != sizeof(msg.payload.u64)) { 550 error_report("Received bad msg size."); 551 return -1; 552 } 553 554 *u64 = msg.payload.u64; 555 556 return 0; 557 } 558 559 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features) 560 { 561 return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features); 562 } 563 564 static int vhost_user_set_owner(struct vhost_dev *dev) 565 { 566 VhostUserMsg msg = { 567 .request = VHOST_USER_SET_OWNER, 568 .flags = VHOST_USER_VERSION, 569 }; 570 571 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 572 return -1; 573 } 574 575 return 0; 576 } 577 578 static int vhost_user_reset_device(struct vhost_dev *dev) 579 { 580 VhostUserMsg msg = { 581 .request = VHOST_USER_RESET_OWNER, 582 .flags = VHOST_USER_VERSION, 583 }; 584 585 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 586 return -1; 587 } 588 589 return 0; 590 } 591 592 static void slave_read(void *opaque) 593 { 594 struct vhost_dev *dev = opaque; 595 struct vhost_user *u = dev->opaque; 596 VhostUserMsg msg = { 0, }; 597 int size, ret = 0; 598 599 /* Read header */ 600 size = read(u->slave_fd, &msg, VHOST_USER_HDR_SIZE); 601 if (size != VHOST_USER_HDR_SIZE) { 602 error_report("Failed to read from slave."); 603 goto err; 604 } 605 606 if (msg.size > VHOST_USER_PAYLOAD_SIZE) { 607 error_report("Failed to read msg header." 608 " Size %d exceeds the maximum %zu.", msg.size, 609 VHOST_USER_PAYLOAD_SIZE); 610 goto err; 611 } 612 613 /* Read payload */ 614 size = read(u->slave_fd, &msg.payload, msg.size); 615 if (size != msg.size) { 616 error_report("Failed to read payload from slave."); 617 goto err; 618 } 619 620 switch (msg.request) { 621 case VHOST_USER_SLAVE_IOTLB_MSG: 622 ret = vhost_backend_handle_iotlb_msg(dev, &msg.payload.iotlb); 623 break; 624 default: 625 error_report("Received unexpected msg type."); 626 ret = -EINVAL; 627 } 628 629 /* 630 * REPLY_ACK feature handling. Other reply types has to be managed 631 * directly in their request handlers. 632 */ 633 if (msg.flags & VHOST_USER_NEED_REPLY_MASK) { 634 msg.flags &= ~VHOST_USER_NEED_REPLY_MASK; 635 msg.flags |= VHOST_USER_REPLY_MASK; 636 637 msg.payload.u64 = !!ret; 638 msg.size = sizeof(msg.payload.u64); 639 640 size = write(u->slave_fd, &msg, VHOST_USER_HDR_SIZE + msg.size); 641 if (size != VHOST_USER_HDR_SIZE + msg.size) { 642 error_report("Failed to send msg reply to slave."); 643 goto err; 644 } 645 } 646 647 return; 648 649 err: 650 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL); 651 close(u->slave_fd); 652 u->slave_fd = -1; 653 return; 654 } 655 656 static int vhost_setup_slave_channel(struct vhost_dev *dev) 657 { 658 VhostUserMsg msg = { 659 .request = VHOST_USER_SET_SLAVE_REQ_FD, 660 .flags = VHOST_USER_VERSION, 661 }; 662 struct vhost_user *u = dev->opaque; 663 int sv[2], ret = 0; 664 bool reply_supported = virtio_has_feature(dev->protocol_features, 665 VHOST_USER_PROTOCOL_F_REPLY_ACK); 666 667 if (!virtio_has_feature(dev->protocol_features, 668 VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { 669 return 0; 670 } 671 672 if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { 673 error_report("socketpair() failed"); 674 return -1; 675 } 676 677 u->slave_fd = sv[0]; 678 qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev); 679 680 if (reply_supported) { 681 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 682 } 683 684 ret = vhost_user_write(dev, &msg, &sv[1], 1); 685 if (ret) { 686 goto out; 687 } 688 689 if (reply_supported) { 690 ret = process_message_reply(dev, &msg); 691 } 692 693 out: 694 close(sv[1]); 695 if (ret) { 696 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL); 697 close(u->slave_fd); 698 u->slave_fd = -1; 699 } 700 701 return ret; 702 } 703 704 static int vhost_user_init(struct vhost_dev *dev, void *opaque) 705 { 706 uint64_t features, protocol_features; 707 struct vhost_user *u; 708 int err; 709 710 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 711 712 u = g_new0(struct vhost_user, 1); 713 u->chr = opaque; 714 u->slave_fd = -1; 715 dev->opaque = u; 716 717 err = vhost_user_get_features(dev, &features); 718 if (err < 0) { 719 return err; 720 } 721 722 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) { 723 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 724 725 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES, 726 &protocol_features); 727 if (err < 0) { 728 return err; 729 } 730 731 dev->protocol_features = 732 protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK; 733 err = vhost_user_set_protocol_features(dev, dev->protocol_features); 734 if (err < 0) { 735 return err; 736 } 737 738 /* query the max queues we support if backend supports Multiple Queue */ 739 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) { 740 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM, 741 &dev->max_queues); 742 if (err < 0) { 743 return err; 744 } 745 } 746 747 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && 748 !(virtio_has_feature(dev->protocol_features, 749 VHOST_USER_PROTOCOL_F_SLAVE_REQ) && 750 virtio_has_feature(dev->protocol_features, 751 VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 752 error_report("IOMMU support requires reply-ack and " 753 "slave-req protocol features."); 754 return -1; 755 } 756 } 757 758 if (dev->migration_blocker == NULL && 759 !virtio_has_feature(dev->protocol_features, 760 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) { 761 error_setg(&dev->migration_blocker, 762 "Migration disabled: vhost-user backend lacks " 763 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature."); 764 } 765 766 err = vhost_setup_slave_channel(dev); 767 if (err < 0) { 768 return err; 769 } 770 771 return 0; 772 } 773 774 static int vhost_user_cleanup(struct vhost_dev *dev) 775 { 776 struct vhost_user *u; 777 778 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 779 780 u = dev->opaque; 781 if (u->slave_fd >= 0) { 782 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL); 783 close(u->slave_fd); 784 u->slave_fd = -1; 785 } 786 g_free(u); 787 dev->opaque = 0; 788 789 return 0; 790 } 791 792 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx) 793 { 794 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 795 796 return idx; 797 } 798 799 static int vhost_user_memslots_limit(struct vhost_dev *dev) 800 { 801 return VHOST_MEMORY_MAX_NREGIONS; 802 } 803 804 static bool vhost_user_requires_shm_log(struct vhost_dev *dev) 805 { 806 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 807 808 return virtio_has_feature(dev->protocol_features, 809 VHOST_USER_PROTOCOL_F_LOG_SHMFD); 810 } 811 812 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) 813 { 814 VhostUserMsg msg = { 0 }; 815 816 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 817 818 /* If guest supports GUEST_ANNOUNCE do nothing */ 819 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { 820 return 0; 821 } 822 823 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */ 824 if (virtio_has_feature(dev->protocol_features, 825 VHOST_USER_PROTOCOL_F_RARP)) { 826 msg.request = VHOST_USER_SEND_RARP; 827 msg.flags = VHOST_USER_VERSION; 828 memcpy((char *)&msg.payload.u64, mac_addr, 6); 829 msg.size = sizeof(msg.payload.u64); 830 831 return vhost_user_write(dev, &msg, NULL, 0); 832 } 833 return -1; 834 } 835 836 static bool vhost_user_can_merge(struct vhost_dev *dev, 837 uint64_t start1, uint64_t size1, 838 uint64_t start2, uint64_t size2) 839 { 840 ram_addr_t offset; 841 int mfd, rfd; 842 MemoryRegion *mr; 843 844 mr = memory_region_from_host((void *)(uintptr_t)start1, &offset); 845 mfd = memory_region_get_fd(mr); 846 847 mr = memory_region_from_host((void *)(uintptr_t)start2, &offset); 848 rfd = memory_region_get_fd(mr); 849 850 return mfd == rfd; 851 } 852 853 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) 854 { 855 VhostUserMsg msg; 856 bool reply_supported = virtio_has_feature(dev->protocol_features, 857 VHOST_USER_PROTOCOL_F_REPLY_ACK); 858 859 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { 860 return 0; 861 } 862 863 msg.request = VHOST_USER_NET_SET_MTU; 864 msg.payload.u64 = mtu; 865 msg.size = sizeof(msg.payload.u64); 866 msg.flags = VHOST_USER_VERSION; 867 if (reply_supported) { 868 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 869 } 870 871 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 872 return -1; 873 } 874 875 /* If reply_ack supported, slave has to ack specified MTU is valid */ 876 if (reply_supported) { 877 return process_message_reply(dev, &msg); 878 } 879 880 return 0; 881 } 882 883 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, 884 struct vhost_iotlb_msg *imsg) 885 { 886 VhostUserMsg msg = { 887 .request = VHOST_USER_IOTLB_MSG, 888 .size = sizeof(msg.payload.iotlb), 889 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 890 .payload.iotlb = *imsg, 891 }; 892 893 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 894 return -EFAULT; 895 } 896 897 return process_message_reply(dev, &msg); 898 } 899 900 901 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled) 902 { 903 /* No-op as the receive channel is not dedicated to IOTLB messages. */ 904 } 905 906 const VhostOps user_ops = { 907 .backend_type = VHOST_BACKEND_TYPE_USER, 908 .vhost_backend_init = vhost_user_init, 909 .vhost_backend_cleanup = vhost_user_cleanup, 910 .vhost_backend_memslots_limit = vhost_user_memslots_limit, 911 .vhost_set_log_base = vhost_user_set_log_base, 912 .vhost_set_mem_table = vhost_user_set_mem_table, 913 .vhost_set_vring_addr = vhost_user_set_vring_addr, 914 .vhost_set_vring_endian = vhost_user_set_vring_endian, 915 .vhost_set_vring_num = vhost_user_set_vring_num, 916 .vhost_set_vring_base = vhost_user_set_vring_base, 917 .vhost_get_vring_base = vhost_user_get_vring_base, 918 .vhost_set_vring_kick = vhost_user_set_vring_kick, 919 .vhost_set_vring_call = vhost_user_set_vring_call, 920 .vhost_set_features = vhost_user_set_features, 921 .vhost_get_features = vhost_user_get_features, 922 .vhost_set_owner = vhost_user_set_owner, 923 .vhost_reset_device = vhost_user_reset_device, 924 .vhost_get_vq_index = vhost_user_get_vq_index, 925 .vhost_set_vring_enable = vhost_user_set_vring_enable, 926 .vhost_requires_shm_log = vhost_user_requires_shm_log, 927 .vhost_migration_done = vhost_user_migration_done, 928 .vhost_backend_can_merge = vhost_user_can_merge, 929 .vhost_net_set_mtu = vhost_user_net_set_mtu, 930 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback, 931 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg, 932 }; 933