1 /* 2 * vhost-user 3 * 4 * Copyright (c) 2013 Virtual Open Systems Sarl. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 * 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qapi/error.h" 13 #include "hw/virtio/vhost.h" 14 #include "hw/virtio/vhost-backend.h" 15 #include "hw/virtio/virtio-net.h" 16 #include "chardev/char-fe.h" 17 #include "sysemu/kvm.h" 18 #include "qemu/error-report.h" 19 #include "qemu/sockets.h" 20 21 #include <sys/ioctl.h> 22 #include <sys/socket.h> 23 #include <sys/un.h> 24 #include <linux/vhost.h> 25 26 #define VHOST_MEMORY_MAX_NREGIONS 8 27 #define VHOST_USER_F_PROTOCOL_FEATURES 30 28 29 enum VhostUserProtocolFeature { 30 VHOST_USER_PROTOCOL_F_MQ = 0, 31 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, 32 VHOST_USER_PROTOCOL_F_RARP = 2, 33 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, 34 VHOST_USER_PROTOCOL_F_NET_MTU = 4, 35 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, 36 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, 37 38 VHOST_USER_PROTOCOL_F_MAX 39 }; 40 41 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1) 42 43 typedef enum VhostUserRequest { 44 VHOST_USER_NONE = 0, 45 VHOST_USER_GET_FEATURES = 1, 46 VHOST_USER_SET_FEATURES = 2, 47 VHOST_USER_SET_OWNER = 3, 48 VHOST_USER_RESET_OWNER = 4, 49 VHOST_USER_SET_MEM_TABLE = 5, 50 VHOST_USER_SET_LOG_BASE = 6, 51 VHOST_USER_SET_LOG_FD = 7, 52 VHOST_USER_SET_VRING_NUM = 8, 53 VHOST_USER_SET_VRING_ADDR = 9, 54 VHOST_USER_SET_VRING_BASE = 10, 55 VHOST_USER_GET_VRING_BASE = 11, 56 VHOST_USER_SET_VRING_KICK = 12, 57 VHOST_USER_SET_VRING_CALL = 13, 58 VHOST_USER_SET_VRING_ERR = 14, 59 VHOST_USER_GET_PROTOCOL_FEATURES = 15, 60 VHOST_USER_SET_PROTOCOL_FEATURES = 16, 61 VHOST_USER_GET_QUEUE_NUM = 17, 62 VHOST_USER_SET_VRING_ENABLE = 18, 63 VHOST_USER_SEND_RARP = 19, 64 VHOST_USER_NET_SET_MTU = 20, 65 VHOST_USER_SET_SLAVE_REQ_FD = 21, 66 VHOST_USER_IOTLB_MSG = 22, 67 VHOST_USER_SET_VRING_ENDIAN = 23, 68 VHOST_USER_MAX 69 } VhostUserRequest; 70 71 typedef enum VhostUserSlaveRequest { 72 VHOST_USER_SLAVE_NONE = 0, 73 VHOST_USER_SLAVE_IOTLB_MSG = 1, 74 VHOST_USER_SLAVE_MAX 75 } VhostUserSlaveRequest; 76 77 typedef struct VhostUserMemoryRegion { 78 uint64_t guest_phys_addr; 79 uint64_t memory_size; 80 uint64_t userspace_addr; 81 uint64_t mmap_offset; 82 } VhostUserMemoryRegion; 83 84 typedef struct VhostUserMemory { 85 uint32_t nregions; 86 uint32_t padding; 87 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS]; 88 } VhostUserMemory; 89 90 typedef struct VhostUserLog { 91 uint64_t mmap_size; 92 uint64_t mmap_offset; 93 } VhostUserLog; 94 95 typedef struct VhostUserMsg { 96 VhostUserRequest request; 97 98 #define VHOST_USER_VERSION_MASK (0x3) 99 #define VHOST_USER_REPLY_MASK (0x1<<2) 100 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) 101 uint32_t flags; 102 uint32_t size; /* the following payload size */ 103 union { 104 #define VHOST_USER_VRING_IDX_MASK (0xff) 105 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8) 106 uint64_t u64; 107 struct vhost_vring_state state; 108 struct vhost_vring_addr addr; 109 VhostUserMemory memory; 110 VhostUserLog log; 111 struct vhost_iotlb_msg iotlb; 112 } payload; 113 } QEMU_PACKED VhostUserMsg; 114 115 static VhostUserMsg m __attribute__ ((unused)); 116 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \ 117 + sizeof(m.flags) \ 118 + sizeof(m.size)) 119 120 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE) 121 122 /* The version of the protocol we support */ 123 #define VHOST_USER_VERSION (0x1) 124 125 struct vhost_user { 126 CharBackend *chr; 127 int slave_fd; 128 }; 129 130 static bool ioeventfd_enabled(void) 131 { 132 return kvm_enabled() && kvm_eventfds_enabled(); 133 } 134 135 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) 136 { 137 struct vhost_user *u = dev->opaque; 138 CharBackend *chr = u->chr; 139 uint8_t *p = (uint8_t *) msg; 140 int r, size = VHOST_USER_HDR_SIZE; 141 142 r = qemu_chr_fe_read_all(chr, p, size); 143 if (r != size) { 144 error_report("Failed to read msg header. Read %d instead of %d." 145 " Original request %d.", r, size, msg->request); 146 goto fail; 147 } 148 149 /* validate received flags */ 150 if (msg->flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { 151 error_report("Failed to read msg header." 152 " Flags 0x%x instead of 0x%x.", msg->flags, 153 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); 154 goto fail; 155 } 156 157 /* validate message size is sane */ 158 if (msg->size > VHOST_USER_PAYLOAD_SIZE) { 159 error_report("Failed to read msg header." 160 " Size %d exceeds the maximum %zu.", msg->size, 161 VHOST_USER_PAYLOAD_SIZE); 162 goto fail; 163 } 164 165 if (msg->size) { 166 p += VHOST_USER_HDR_SIZE; 167 size = msg->size; 168 r = qemu_chr_fe_read_all(chr, p, size); 169 if (r != size) { 170 error_report("Failed to read msg payload." 171 " Read %d instead of %d.", r, msg->size); 172 goto fail; 173 } 174 } 175 176 return 0; 177 178 fail: 179 return -1; 180 } 181 182 static int process_message_reply(struct vhost_dev *dev, 183 const VhostUserMsg *msg) 184 { 185 VhostUserMsg msg_reply; 186 187 if ((msg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 188 return 0; 189 } 190 191 if (vhost_user_read(dev, &msg_reply) < 0) { 192 return -1; 193 } 194 195 if (msg_reply.request != msg->request) { 196 error_report("Received unexpected msg type." 197 "Expected %d received %d", 198 msg->request, msg_reply.request); 199 return -1; 200 } 201 202 return msg_reply.payload.u64 ? -1 : 0; 203 } 204 205 static bool vhost_user_one_time_request(VhostUserRequest request) 206 { 207 switch (request) { 208 case VHOST_USER_SET_OWNER: 209 case VHOST_USER_RESET_OWNER: 210 case VHOST_USER_SET_MEM_TABLE: 211 case VHOST_USER_GET_QUEUE_NUM: 212 case VHOST_USER_NET_SET_MTU: 213 return true; 214 default: 215 return false; 216 } 217 } 218 219 /* most non-init callers ignore the error */ 220 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, 221 int *fds, int fd_num) 222 { 223 struct vhost_user *u = dev->opaque; 224 CharBackend *chr = u->chr; 225 int ret, size = VHOST_USER_HDR_SIZE + msg->size; 226 227 /* 228 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE, 229 * we just need send it once in the first time. For later such 230 * request, we just ignore it. 231 */ 232 if (vhost_user_one_time_request(msg->request) && dev->vq_index != 0) { 233 msg->flags &= ~VHOST_USER_NEED_REPLY_MASK; 234 return 0; 235 } 236 237 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) { 238 error_report("Failed to set msg fds."); 239 return -1; 240 } 241 242 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size); 243 if (ret != size) { 244 error_report("Failed to write msg." 245 " Wrote %d instead of %d.", ret, size); 246 return -1; 247 } 248 249 return 0; 250 } 251 252 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, 253 struct vhost_log *log) 254 { 255 int fds[VHOST_MEMORY_MAX_NREGIONS]; 256 size_t fd_num = 0; 257 bool shmfd = virtio_has_feature(dev->protocol_features, 258 VHOST_USER_PROTOCOL_F_LOG_SHMFD); 259 VhostUserMsg msg = { 260 .request = VHOST_USER_SET_LOG_BASE, 261 .flags = VHOST_USER_VERSION, 262 .payload.log.mmap_size = log->size * sizeof(*(log->log)), 263 .payload.log.mmap_offset = 0, 264 .size = sizeof(msg.payload.log), 265 }; 266 267 if (shmfd && log->fd != -1) { 268 fds[fd_num++] = log->fd; 269 } 270 271 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { 272 return -1; 273 } 274 275 if (shmfd) { 276 msg.size = 0; 277 if (vhost_user_read(dev, &msg) < 0) { 278 return -1; 279 } 280 281 if (msg.request != VHOST_USER_SET_LOG_BASE) { 282 error_report("Received unexpected msg type. " 283 "Expected %d received %d", 284 VHOST_USER_SET_LOG_BASE, msg.request); 285 return -1; 286 } 287 } 288 289 return 0; 290 } 291 292 static int vhost_user_set_mem_table(struct vhost_dev *dev, 293 struct vhost_memory *mem) 294 { 295 int fds[VHOST_MEMORY_MAX_NREGIONS]; 296 int i, fd; 297 size_t fd_num = 0; 298 bool reply_supported = virtio_has_feature(dev->protocol_features, 299 VHOST_USER_PROTOCOL_F_REPLY_ACK); 300 301 VhostUserMsg msg = { 302 .request = VHOST_USER_SET_MEM_TABLE, 303 .flags = VHOST_USER_VERSION, 304 }; 305 306 if (reply_supported) { 307 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 308 } 309 310 for (i = 0; i < dev->mem->nregions; ++i) { 311 struct vhost_memory_region *reg = dev->mem->regions + i; 312 ram_addr_t offset; 313 MemoryRegion *mr; 314 315 assert((uintptr_t)reg->userspace_addr == reg->userspace_addr); 316 mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr, 317 &offset); 318 fd = memory_region_get_fd(mr); 319 if (fd > 0) { 320 msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr; 321 msg.payload.memory.regions[fd_num].memory_size = reg->memory_size; 322 msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr; 323 msg.payload.memory.regions[fd_num].mmap_offset = offset; 324 assert(fd_num < VHOST_MEMORY_MAX_NREGIONS); 325 fds[fd_num++] = fd; 326 } 327 } 328 329 msg.payload.memory.nregions = fd_num; 330 331 if (!fd_num) { 332 error_report("Failed initializing vhost-user memory map, " 333 "consider using -object memory-backend-file share=on"); 334 return -1; 335 } 336 337 msg.size = sizeof(msg.payload.memory.nregions); 338 msg.size += sizeof(msg.payload.memory.padding); 339 msg.size += fd_num * sizeof(VhostUserMemoryRegion); 340 341 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { 342 return -1; 343 } 344 345 if (reply_supported) { 346 return process_message_reply(dev, &msg); 347 } 348 349 return 0; 350 } 351 352 static int vhost_user_set_vring_addr(struct vhost_dev *dev, 353 struct vhost_vring_addr *addr) 354 { 355 VhostUserMsg msg = { 356 .request = VHOST_USER_SET_VRING_ADDR, 357 .flags = VHOST_USER_VERSION, 358 .payload.addr = *addr, 359 .size = sizeof(msg.payload.addr), 360 }; 361 362 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 363 return -1; 364 } 365 366 return 0; 367 } 368 369 static int vhost_user_set_vring_endian(struct vhost_dev *dev, 370 struct vhost_vring_state *ring) 371 { 372 bool cross_endian = virtio_has_feature(dev->protocol_features, 373 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN); 374 VhostUserMsg msg = { 375 .request = VHOST_USER_SET_VRING_ENDIAN, 376 .flags = VHOST_USER_VERSION, 377 .payload.state = *ring, 378 .size = sizeof(msg.payload.state), 379 }; 380 381 if (!cross_endian) { 382 error_report("vhost-user trying to send unhandled ioctl"); 383 return -1; 384 } 385 386 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 387 return -1; 388 } 389 390 return 0; 391 } 392 393 static int vhost_set_vring(struct vhost_dev *dev, 394 unsigned long int request, 395 struct vhost_vring_state *ring) 396 { 397 VhostUserMsg msg = { 398 .request = request, 399 .flags = VHOST_USER_VERSION, 400 .payload.state = *ring, 401 .size = sizeof(msg.payload.state), 402 }; 403 404 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 405 return -1; 406 } 407 408 return 0; 409 } 410 411 static int vhost_user_set_vring_num(struct vhost_dev *dev, 412 struct vhost_vring_state *ring) 413 { 414 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); 415 } 416 417 static int vhost_user_set_vring_base(struct vhost_dev *dev, 418 struct vhost_vring_state *ring) 419 { 420 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring); 421 } 422 423 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) 424 { 425 int i; 426 427 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { 428 return -1; 429 } 430 431 for (i = 0; i < dev->nvqs; ++i) { 432 struct vhost_vring_state state = { 433 .index = dev->vq_index + i, 434 .num = enable, 435 }; 436 437 vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); 438 } 439 440 return 0; 441 } 442 443 static int vhost_user_get_vring_base(struct vhost_dev *dev, 444 struct vhost_vring_state *ring) 445 { 446 VhostUserMsg msg = { 447 .request = VHOST_USER_GET_VRING_BASE, 448 .flags = VHOST_USER_VERSION, 449 .payload.state = *ring, 450 .size = sizeof(msg.payload.state), 451 }; 452 453 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 454 return -1; 455 } 456 457 if (vhost_user_read(dev, &msg) < 0) { 458 return -1; 459 } 460 461 if (msg.request != VHOST_USER_GET_VRING_BASE) { 462 error_report("Received unexpected msg type. Expected %d received %d", 463 VHOST_USER_GET_VRING_BASE, msg.request); 464 return -1; 465 } 466 467 if (msg.size != sizeof(msg.payload.state)) { 468 error_report("Received bad msg size."); 469 return -1; 470 } 471 472 *ring = msg.payload.state; 473 474 return 0; 475 } 476 477 static int vhost_set_vring_file(struct vhost_dev *dev, 478 VhostUserRequest request, 479 struct vhost_vring_file *file) 480 { 481 int fds[VHOST_MEMORY_MAX_NREGIONS]; 482 size_t fd_num = 0; 483 VhostUserMsg msg = { 484 .request = request, 485 .flags = VHOST_USER_VERSION, 486 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, 487 .size = sizeof(msg.payload.u64), 488 }; 489 490 if (ioeventfd_enabled() && file->fd > 0) { 491 fds[fd_num++] = file->fd; 492 } else { 493 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; 494 } 495 496 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { 497 return -1; 498 } 499 500 return 0; 501 } 502 503 static int vhost_user_set_vring_kick(struct vhost_dev *dev, 504 struct vhost_vring_file *file) 505 { 506 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file); 507 } 508 509 static int vhost_user_set_vring_call(struct vhost_dev *dev, 510 struct vhost_vring_file *file) 511 { 512 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file); 513 } 514 515 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64) 516 { 517 VhostUserMsg msg = { 518 .request = request, 519 .flags = VHOST_USER_VERSION, 520 .payload.u64 = u64, 521 .size = sizeof(msg.payload.u64), 522 }; 523 524 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 525 return -1; 526 } 527 528 return 0; 529 } 530 531 static int vhost_user_set_features(struct vhost_dev *dev, 532 uint64_t features) 533 { 534 return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features); 535 } 536 537 static int vhost_user_set_protocol_features(struct vhost_dev *dev, 538 uint64_t features) 539 { 540 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features); 541 } 542 543 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) 544 { 545 VhostUserMsg msg = { 546 .request = request, 547 .flags = VHOST_USER_VERSION, 548 }; 549 550 if (vhost_user_one_time_request(request) && dev->vq_index != 0) { 551 return 0; 552 } 553 554 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 555 return -1; 556 } 557 558 if (vhost_user_read(dev, &msg) < 0) { 559 return -1; 560 } 561 562 if (msg.request != request) { 563 error_report("Received unexpected msg type. Expected %d received %d", 564 request, msg.request); 565 return -1; 566 } 567 568 if (msg.size != sizeof(msg.payload.u64)) { 569 error_report("Received bad msg size."); 570 return -1; 571 } 572 573 *u64 = msg.payload.u64; 574 575 return 0; 576 } 577 578 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features) 579 { 580 return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features); 581 } 582 583 static int vhost_user_set_owner(struct vhost_dev *dev) 584 { 585 VhostUserMsg msg = { 586 .request = VHOST_USER_SET_OWNER, 587 .flags = VHOST_USER_VERSION, 588 }; 589 590 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 591 return -1; 592 } 593 594 return 0; 595 } 596 597 static int vhost_user_reset_device(struct vhost_dev *dev) 598 { 599 VhostUserMsg msg = { 600 .request = VHOST_USER_RESET_OWNER, 601 .flags = VHOST_USER_VERSION, 602 }; 603 604 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 605 return -1; 606 } 607 608 return 0; 609 } 610 611 static void slave_read(void *opaque) 612 { 613 struct vhost_dev *dev = opaque; 614 struct vhost_user *u = dev->opaque; 615 VhostUserMsg msg = { 0, }; 616 int size, ret = 0; 617 618 /* Read header */ 619 size = read(u->slave_fd, &msg, VHOST_USER_HDR_SIZE); 620 if (size != VHOST_USER_HDR_SIZE) { 621 error_report("Failed to read from slave."); 622 goto err; 623 } 624 625 if (msg.size > VHOST_USER_PAYLOAD_SIZE) { 626 error_report("Failed to read msg header." 627 " Size %d exceeds the maximum %zu.", msg.size, 628 VHOST_USER_PAYLOAD_SIZE); 629 goto err; 630 } 631 632 /* Read payload */ 633 size = read(u->slave_fd, &msg.payload, msg.size); 634 if (size != msg.size) { 635 error_report("Failed to read payload from slave."); 636 goto err; 637 } 638 639 switch (msg.request) { 640 case VHOST_USER_SLAVE_IOTLB_MSG: 641 ret = vhost_backend_handle_iotlb_msg(dev, &msg.payload.iotlb); 642 break; 643 default: 644 error_report("Received unexpected msg type."); 645 ret = -EINVAL; 646 } 647 648 /* 649 * REPLY_ACK feature handling. Other reply types has to be managed 650 * directly in their request handlers. 651 */ 652 if (msg.flags & VHOST_USER_NEED_REPLY_MASK) { 653 msg.flags &= ~VHOST_USER_NEED_REPLY_MASK; 654 msg.flags |= VHOST_USER_REPLY_MASK; 655 656 msg.payload.u64 = !!ret; 657 msg.size = sizeof(msg.payload.u64); 658 659 size = write(u->slave_fd, &msg, VHOST_USER_HDR_SIZE + msg.size); 660 if (size != VHOST_USER_HDR_SIZE + msg.size) { 661 error_report("Failed to send msg reply to slave."); 662 goto err; 663 } 664 } 665 666 return; 667 668 err: 669 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL); 670 close(u->slave_fd); 671 u->slave_fd = -1; 672 return; 673 } 674 675 static int vhost_setup_slave_channel(struct vhost_dev *dev) 676 { 677 VhostUserMsg msg = { 678 .request = VHOST_USER_SET_SLAVE_REQ_FD, 679 .flags = VHOST_USER_VERSION, 680 }; 681 struct vhost_user *u = dev->opaque; 682 int sv[2], ret = 0; 683 bool reply_supported = virtio_has_feature(dev->protocol_features, 684 VHOST_USER_PROTOCOL_F_REPLY_ACK); 685 686 if (!virtio_has_feature(dev->protocol_features, 687 VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { 688 return 0; 689 } 690 691 if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { 692 error_report("socketpair() failed"); 693 return -1; 694 } 695 696 u->slave_fd = sv[0]; 697 qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev); 698 699 if (reply_supported) { 700 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 701 } 702 703 ret = vhost_user_write(dev, &msg, &sv[1], 1); 704 if (ret) { 705 goto out; 706 } 707 708 if (reply_supported) { 709 ret = process_message_reply(dev, &msg); 710 } 711 712 out: 713 close(sv[1]); 714 if (ret) { 715 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL); 716 close(u->slave_fd); 717 u->slave_fd = -1; 718 } 719 720 return ret; 721 } 722 723 static int vhost_user_init(struct vhost_dev *dev, void *opaque) 724 { 725 uint64_t features, protocol_features; 726 struct vhost_user *u; 727 int err; 728 729 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 730 731 u = g_new0(struct vhost_user, 1); 732 u->chr = opaque; 733 u->slave_fd = -1; 734 dev->opaque = u; 735 736 err = vhost_user_get_features(dev, &features); 737 if (err < 0) { 738 return err; 739 } 740 741 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) { 742 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 743 744 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES, 745 &protocol_features); 746 if (err < 0) { 747 return err; 748 } 749 750 dev->protocol_features = 751 protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK; 752 err = vhost_user_set_protocol_features(dev, dev->protocol_features); 753 if (err < 0) { 754 return err; 755 } 756 757 /* query the max queues we support if backend supports Multiple Queue */ 758 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) { 759 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM, 760 &dev->max_queues); 761 if (err < 0) { 762 return err; 763 } 764 } 765 766 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && 767 !(virtio_has_feature(dev->protocol_features, 768 VHOST_USER_PROTOCOL_F_SLAVE_REQ) && 769 virtio_has_feature(dev->protocol_features, 770 VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 771 error_report("IOMMU support requires reply-ack and " 772 "slave-req protocol features."); 773 return -1; 774 } 775 } 776 777 if (dev->migration_blocker == NULL && 778 !virtio_has_feature(dev->protocol_features, 779 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) { 780 error_setg(&dev->migration_blocker, 781 "Migration disabled: vhost-user backend lacks " 782 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature."); 783 } 784 785 err = vhost_setup_slave_channel(dev); 786 if (err < 0) { 787 return err; 788 } 789 790 return 0; 791 } 792 793 static int vhost_user_cleanup(struct vhost_dev *dev) 794 { 795 struct vhost_user *u; 796 797 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 798 799 u = dev->opaque; 800 if (u->slave_fd >= 0) { 801 qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL); 802 close(u->slave_fd); 803 u->slave_fd = -1; 804 } 805 g_free(u); 806 dev->opaque = 0; 807 808 return 0; 809 } 810 811 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx) 812 { 813 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 814 815 return idx; 816 } 817 818 static int vhost_user_memslots_limit(struct vhost_dev *dev) 819 { 820 return VHOST_MEMORY_MAX_NREGIONS; 821 } 822 823 static bool vhost_user_requires_shm_log(struct vhost_dev *dev) 824 { 825 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 826 827 return virtio_has_feature(dev->protocol_features, 828 VHOST_USER_PROTOCOL_F_LOG_SHMFD); 829 } 830 831 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) 832 { 833 VhostUserMsg msg = { 0 }; 834 835 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 836 837 /* If guest supports GUEST_ANNOUNCE do nothing */ 838 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { 839 return 0; 840 } 841 842 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */ 843 if (virtio_has_feature(dev->protocol_features, 844 VHOST_USER_PROTOCOL_F_RARP)) { 845 msg.request = VHOST_USER_SEND_RARP; 846 msg.flags = VHOST_USER_VERSION; 847 memcpy((char *)&msg.payload.u64, mac_addr, 6); 848 msg.size = sizeof(msg.payload.u64); 849 850 return vhost_user_write(dev, &msg, NULL, 0); 851 } 852 return -1; 853 } 854 855 static bool vhost_user_can_merge(struct vhost_dev *dev, 856 uint64_t start1, uint64_t size1, 857 uint64_t start2, uint64_t size2) 858 { 859 ram_addr_t offset; 860 int mfd, rfd; 861 MemoryRegion *mr; 862 863 mr = memory_region_from_host((void *)(uintptr_t)start1, &offset); 864 mfd = memory_region_get_fd(mr); 865 866 mr = memory_region_from_host((void *)(uintptr_t)start2, &offset); 867 rfd = memory_region_get_fd(mr); 868 869 return mfd == rfd; 870 } 871 872 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) 873 { 874 VhostUserMsg msg; 875 bool reply_supported = virtio_has_feature(dev->protocol_features, 876 VHOST_USER_PROTOCOL_F_REPLY_ACK); 877 878 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { 879 return 0; 880 } 881 882 msg.request = VHOST_USER_NET_SET_MTU; 883 msg.payload.u64 = mtu; 884 msg.size = sizeof(msg.payload.u64); 885 msg.flags = VHOST_USER_VERSION; 886 if (reply_supported) { 887 msg.flags |= VHOST_USER_NEED_REPLY_MASK; 888 } 889 890 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 891 return -1; 892 } 893 894 /* If reply_ack supported, slave has to ack specified MTU is valid */ 895 if (reply_supported) { 896 return process_message_reply(dev, &msg); 897 } 898 899 return 0; 900 } 901 902 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, 903 struct vhost_iotlb_msg *imsg) 904 { 905 VhostUserMsg msg = { 906 .request = VHOST_USER_IOTLB_MSG, 907 .size = sizeof(msg.payload.iotlb), 908 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 909 .payload.iotlb = *imsg, 910 }; 911 912 if (vhost_user_write(dev, &msg, NULL, 0) < 0) { 913 return -EFAULT; 914 } 915 916 return process_message_reply(dev, &msg); 917 } 918 919 920 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled) 921 { 922 /* No-op as the receive channel is not dedicated to IOTLB messages. */ 923 } 924 925 const VhostOps user_ops = { 926 .backend_type = VHOST_BACKEND_TYPE_USER, 927 .vhost_backend_init = vhost_user_init, 928 .vhost_backend_cleanup = vhost_user_cleanup, 929 .vhost_backend_memslots_limit = vhost_user_memslots_limit, 930 .vhost_set_log_base = vhost_user_set_log_base, 931 .vhost_set_mem_table = vhost_user_set_mem_table, 932 .vhost_set_vring_addr = vhost_user_set_vring_addr, 933 .vhost_set_vring_endian = vhost_user_set_vring_endian, 934 .vhost_set_vring_num = vhost_user_set_vring_num, 935 .vhost_set_vring_base = vhost_user_set_vring_base, 936 .vhost_get_vring_base = vhost_user_get_vring_base, 937 .vhost_set_vring_kick = vhost_user_set_vring_kick, 938 .vhost_set_vring_call = vhost_user_set_vring_call, 939 .vhost_set_features = vhost_user_set_features, 940 .vhost_get_features = vhost_user_get_features, 941 .vhost_set_owner = vhost_user_set_owner, 942 .vhost_reset_device = vhost_user_reset_device, 943 .vhost_get_vq_index = vhost_user_get_vq_index, 944 .vhost_set_vring_enable = vhost_user_set_vring_enable, 945 .vhost_requires_shm_log = vhost_user_requires_shm_log, 946 .vhost_migration_done = vhost_user_migration_done, 947 .vhost_backend_can_merge = vhost_user_can_merge, 948 .vhost_net_set_mtu = vhost_user_net_set_mtu, 949 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback, 950 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg, 951 }; 952