1 /* 2 * Vhost User Bridge 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * Authors: 7 * Victor Kaplansky <victork@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * later. See the COPYING file in the top-level directory. 11 */ 12 13 /* 14 * TODO: 15 * - main should get parameters from the command line. 16 * - implement all request handlers. Still not implemented: 17 * vubr_get_queue_num_exec() 18 * vubr_send_rarp_exec() 19 * - test for broken requests and virtqueue. 20 * - implement features defined by Virtio 1.0 spec. 21 * - support mergeable buffers and indirect descriptors. 22 * - implement clean shutdown. 23 * - implement non-blocking writes to UDP backend. 24 * - implement polling strategy. 25 * - implement clean starting/stopping of vq processing 26 * - implement clean starting/stopping of used and buffers 27 * dirty page logging. 28 */ 29 30 #define _FILE_OFFSET_BITS 64 31 32 #include "qemu/osdep.h" 33 #include <sys/socket.h> 34 #include <sys/un.h> 35 #include <sys/unistd.h> 36 #include <sys/mman.h> 37 #include <sys/eventfd.h> 38 #include <arpa/inet.h> 39 #include <netdb.h> 40 41 #include <linux/vhost.h> 42 43 #include "qemu/atomic.h" 44 #include "standard-headers/linux/virtio_net.h" 45 #include "standard-headers/linux/virtio_ring.h" 46 47 #define VHOST_USER_BRIDGE_DEBUG 1 48 49 #define DPRINT(...) \ 50 do { \ 51 if (VHOST_USER_BRIDGE_DEBUG) { \ 52 printf(__VA_ARGS__); \ 53 } \ 54 } while (0) 55 56 typedef void (*CallbackFunc)(int sock, void *ctx); 57 58 typedef struct Event { 59 void *ctx; 60 CallbackFunc callback; 61 } Event; 62 63 typedef struct Dispatcher { 64 int max_sock; 65 fd_set fdset; 66 Event events[FD_SETSIZE]; 67 } Dispatcher; 68 69 static void 70 vubr_die(const char *s) 71 { 72 perror(s); 73 exit(1); 74 } 75 76 static int 77 dispatcher_init(Dispatcher *dispr) 78 { 79 FD_ZERO(&dispr->fdset); 80 dispr->max_sock = -1; 81 return 0; 82 } 83 84 static int 85 dispatcher_add(Dispatcher *dispr, int sock, void *ctx, CallbackFunc cb) 86 { 87 if (sock >= FD_SETSIZE) { 88 fprintf(stderr, 89 "Error: Failed to add new event. sock %d should be less than %d\n", 90 sock, FD_SETSIZE); 91 return -1; 92 } 93 94 dispr->events[sock].ctx = ctx; 95 dispr->events[sock].callback = cb; 96 97 FD_SET(sock, &dispr->fdset); 98 if (sock > dispr->max_sock) { 99 dispr->max_sock = sock; 100 } 101 DPRINT("Added sock %d for watching. max_sock: %d\n", 102 sock, dispr->max_sock); 103 return 0; 104 } 105 106 /* dispatcher_remove() is not currently in use but may be useful 107 * in the future. */ 108 static int 109 dispatcher_remove(Dispatcher *dispr, int sock) 110 { 111 if (sock >= FD_SETSIZE) { 112 fprintf(stderr, 113 "Error: Failed to remove event. sock %d should be less than %d\n", 114 sock, FD_SETSIZE); 115 return -1; 116 } 117 118 FD_CLR(sock, &dispr->fdset); 119 DPRINT("Sock %d removed from dispatcher watch.\n", sock); 120 return 0; 121 } 122 123 /* timeout in us */ 124 static int 125 dispatcher_wait(Dispatcher *dispr, uint32_t timeout) 126 { 127 struct timeval tv; 128 tv.tv_sec = timeout / 1000000; 129 tv.tv_usec = timeout % 1000000; 130 131 fd_set fdset = dispr->fdset; 132 133 /* wait until some of sockets become readable. */ 134 int rc = select(dispr->max_sock + 1, &fdset, 0, 0, &tv); 135 136 if (rc == -1) { 137 vubr_die("select"); 138 } 139 140 /* Timeout */ 141 if (rc == 0) { 142 return 0; 143 } 144 145 /* Now call callback for every ready socket. */ 146 147 int sock; 148 for (sock = 0; sock < dispr->max_sock + 1; sock++) { 149 /* The callback on a socket can remove other sockets from the 150 * dispatcher, thus we have to check that the socket is 151 * still not removed from dispatcher's list 152 */ 153 if (FD_ISSET(sock, &fdset) && FD_ISSET(sock, &dispr->fdset)) { 154 Event *e = &dispr->events[sock]; 155 e->callback(sock, e->ctx); 156 } 157 } 158 159 return 0; 160 } 161 162 typedef struct VubrVirtq { 163 int call_fd; 164 int kick_fd; 165 uint32_t size; 166 uint16_t last_avail_index; 167 uint16_t last_used_index; 168 struct vring_desc *desc; 169 struct vring_avail *avail; 170 struct vring_used *used; 171 uint64_t log_guest_addr; 172 int enable; 173 } VubrVirtq; 174 175 /* Based on qemu/hw/virtio/vhost-user.c */ 176 177 #define VHOST_MEMORY_MAX_NREGIONS 8 178 #define VHOST_USER_F_PROTOCOL_FEATURES 30 179 180 #define VHOST_LOG_PAGE 4096 181 182 enum VhostUserProtocolFeature { 183 VHOST_USER_PROTOCOL_F_MQ = 0, 184 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, 185 VHOST_USER_PROTOCOL_F_RARP = 2, 186 187 VHOST_USER_PROTOCOL_F_MAX 188 }; 189 190 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1) 191 192 typedef enum VhostUserRequest { 193 VHOST_USER_NONE = 0, 194 VHOST_USER_GET_FEATURES = 1, 195 VHOST_USER_SET_FEATURES = 2, 196 VHOST_USER_SET_OWNER = 3, 197 VHOST_USER_RESET_OWNER = 4, 198 VHOST_USER_SET_MEM_TABLE = 5, 199 VHOST_USER_SET_LOG_BASE = 6, 200 VHOST_USER_SET_LOG_FD = 7, 201 VHOST_USER_SET_VRING_NUM = 8, 202 VHOST_USER_SET_VRING_ADDR = 9, 203 VHOST_USER_SET_VRING_BASE = 10, 204 VHOST_USER_GET_VRING_BASE = 11, 205 VHOST_USER_SET_VRING_KICK = 12, 206 VHOST_USER_SET_VRING_CALL = 13, 207 VHOST_USER_SET_VRING_ERR = 14, 208 VHOST_USER_GET_PROTOCOL_FEATURES = 15, 209 VHOST_USER_SET_PROTOCOL_FEATURES = 16, 210 VHOST_USER_GET_QUEUE_NUM = 17, 211 VHOST_USER_SET_VRING_ENABLE = 18, 212 VHOST_USER_SEND_RARP = 19, 213 VHOST_USER_MAX 214 } VhostUserRequest; 215 216 typedef struct VhostUserMemoryRegion { 217 uint64_t guest_phys_addr; 218 uint64_t memory_size; 219 uint64_t userspace_addr; 220 uint64_t mmap_offset; 221 } VhostUserMemoryRegion; 222 223 typedef struct VhostUserMemory { 224 uint32_t nregions; 225 uint32_t padding; 226 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS]; 227 } VhostUserMemory; 228 229 typedef struct VhostUserLog { 230 uint64_t mmap_size; 231 uint64_t mmap_offset; 232 } VhostUserLog; 233 234 typedef struct VhostUserMsg { 235 VhostUserRequest request; 236 237 #define VHOST_USER_VERSION_MASK (0x3) 238 #define VHOST_USER_REPLY_MASK (0x1<<2) 239 uint32_t flags; 240 uint32_t size; /* the following payload size */ 241 union { 242 #define VHOST_USER_VRING_IDX_MASK (0xff) 243 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8) 244 uint64_t u64; 245 struct vhost_vring_state state; 246 struct vhost_vring_addr addr; 247 VhostUserMemory memory; 248 VhostUserLog log; 249 } payload; 250 int fds[VHOST_MEMORY_MAX_NREGIONS]; 251 int fd_num; 252 } QEMU_PACKED VhostUserMsg; 253 254 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64) 255 256 /* The version of the protocol we support */ 257 #define VHOST_USER_VERSION (0x1) 258 259 #define MAX_NR_VIRTQUEUE (8) 260 261 typedef struct VubrDevRegion { 262 /* Guest Physical address. */ 263 uint64_t gpa; 264 /* Memory region size. */ 265 uint64_t size; 266 /* QEMU virtual address (userspace). */ 267 uint64_t qva; 268 /* Starting offset in our mmaped space. */ 269 uint64_t mmap_offset; 270 /* Start address of mmaped space. */ 271 uint64_t mmap_addr; 272 } VubrDevRegion; 273 274 typedef struct VubrDev { 275 int sock; 276 Dispatcher dispatcher; 277 uint32_t nregions; 278 VubrDevRegion regions[VHOST_MEMORY_MAX_NREGIONS]; 279 VubrVirtq vq[MAX_NR_VIRTQUEUE]; 280 int log_call_fd; 281 uint64_t log_size; 282 uint8_t *log_table; 283 int backend_udp_sock; 284 struct sockaddr_in backend_udp_dest; 285 int ready; 286 uint64_t features; 287 } VubrDev; 288 289 static const char *vubr_request_str[] = { 290 [VHOST_USER_NONE] = "VHOST_USER_NONE", 291 [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", 292 [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES", 293 [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER", 294 [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER", 295 [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE", 296 [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE", 297 [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD", 298 [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM", 299 [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR", 300 [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE", 301 [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE", 302 [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK", 303 [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL", 304 [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR", 305 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES", 306 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES", 307 [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM", 308 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE", 309 [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP", 310 [VHOST_USER_MAX] = "VHOST_USER_MAX", 311 }; 312 313 static void 314 print_buffer(uint8_t *buf, size_t len) 315 { 316 int i; 317 printf("Raw buffer:\n"); 318 for (i = 0; i < len; i++) { 319 if (i % 16 == 0) { 320 printf("\n"); 321 } 322 if (i % 4 == 0) { 323 printf(" "); 324 } 325 printf("%02x ", buf[i]); 326 } 327 printf("\n............................................................\n"); 328 } 329 330 /* Translate guest physical address to our virtual address. */ 331 static uint64_t 332 gpa_to_va(VubrDev *dev, uint64_t guest_addr) 333 { 334 int i; 335 336 /* Find matching memory region. */ 337 for (i = 0; i < dev->nregions; i++) { 338 VubrDevRegion *r = &dev->regions[i]; 339 340 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) { 341 return guest_addr - r->gpa + r->mmap_addr + r->mmap_offset; 342 } 343 } 344 345 assert(!"address not found in regions"); 346 return 0; 347 } 348 349 /* Translate qemu virtual address to our virtual address. */ 350 static uint64_t 351 qva_to_va(VubrDev *dev, uint64_t qemu_addr) 352 { 353 int i; 354 355 /* Find matching memory region. */ 356 for (i = 0; i < dev->nregions; i++) { 357 VubrDevRegion *r = &dev->regions[i]; 358 359 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { 360 return qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; 361 } 362 } 363 364 assert(!"address not found in regions"); 365 return 0; 366 } 367 368 static void 369 vubr_message_read(int conn_fd, VhostUserMsg *vmsg) 370 { 371 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { }; 372 struct iovec iov = { 373 .iov_base = (char *)vmsg, 374 .iov_len = VHOST_USER_HDR_SIZE, 375 }; 376 struct msghdr msg = { 377 .msg_iov = &iov, 378 .msg_iovlen = 1, 379 .msg_control = control, 380 .msg_controllen = sizeof(control), 381 }; 382 size_t fd_size; 383 struct cmsghdr *cmsg; 384 int rc; 385 386 rc = recvmsg(conn_fd, &msg, 0); 387 388 if (rc == 0) { 389 vubr_die("recvmsg"); 390 fprintf(stderr, "Peer disconnected.\n"); 391 exit(1); 392 } 393 if (rc < 0) { 394 vubr_die("recvmsg"); 395 } 396 397 vmsg->fd_num = 0; 398 for (cmsg = CMSG_FIRSTHDR(&msg); 399 cmsg != NULL; 400 cmsg = CMSG_NXTHDR(&msg, cmsg)) 401 { 402 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 403 fd_size = cmsg->cmsg_len - CMSG_LEN(0); 404 vmsg->fd_num = fd_size / sizeof(int); 405 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); 406 break; 407 } 408 } 409 410 if (vmsg->size > sizeof(vmsg->payload)) { 411 fprintf(stderr, 412 "Error: too big message request: %d, size: vmsg->size: %u, " 413 "while sizeof(vmsg->payload) = %lu\n", 414 vmsg->request, vmsg->size, sizeof(vmsg->payload)); 415 exit(1); 416 } 417 418 if (vmsg->size) { 419 rc = read(conn_fd, &vmsg->payload, vmsg->size); 420 if (rc == 0) { 421 vubr_die("recvmsg"); 422 fprintf(stderr, "Peer disconnected.\n"); 423 exit(1); 424 } 425 if (rc < 0) { 426 vubr_die("recvmsg"); 427 } 428 429 assert(rc == vmsg->size); 430 } 431 } 432 433 static void 434 vubr_message_write(int conn_fd, VhostUserMsg *vmsg) 435 { 436 int rc; 437 438 do { 439 rc = write(conn_fd, vmsg, VHOST_USER_HDR_SIZE + vmsg->size); 440 } while (rc < 0 && errno == EINTR); 441 442 if (rc < 0) { 443 vubr_die("write"); 444 } 445 } 446 447 static void 448 vubr_backend_udp_sendbuf(VubrDev *dev, uint8_t *buf, size_t len) 449 { 450 int slen = sizeof(struct sockaddr_in); 451 452 if (sendto(dev->backend_udp_sock, buf, len, 0, 453 (struct sockaddr *) &dev->backend_udp_dest, slen) == -1) { 454 vubr_die("sendto()"); 455 } 456 } 457 458 static int 459 vubr_backend_udp_recvbuf(VubrDev *dev, uint8_t *buf, size_t buflen) 460 { 461 int slen = sizeof(struct sockaddr_in); 462 int rc; 463 464 rc = recvfrom(dev->backend_udp_sock, buf, buflen, 0, 465 (struct sockaddr *) &dev->backend_udp_dest, 466 (socklen_t *)&slen); 467 if (rc == -1) { 468 vubr_die("recvfrom()"); 469 } 470 471 return rc; 472 } 473 474 static void 475 vubr_consume_raw_packet(VubrDev *dev, uint8_t *buf, uint32_t len) 476 { 477 int hdrlen = sizeof(struct virtio_net_hdr_v1); 478 479 if (VHOST_USER_BRIDGE_DEBUG) { 480 print_buffer(buf, len); 481 } 482 vubr_backend_udp_sendbuf(dev, buf + hdrlen, len - hdrlen); 483 } 484 485 /* Kick the log_call_fd if required. */ 486 static void 487 vubr_log_kick(VubrDev *dev) 488 { 489 if (dev->log_call_fd != -1) { 490 DPRINT("Kicking the QEMU's log...\n"); 491 eventfd_write(dev->log_call_fd, 1); 492 } 493 } 494 495 /* Kick the guest if necessary. */ 496 static void 497 vubr_virtqueue_kick(VubrVirtq *vq) 498 { 499 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 500 DPRINT("Kicking the guest...\n"); 501 eventfd_write(vq->call_fd, 1); 502 } 503 } 504 505 static void 506 vubr_log_page(uint8_t *log_table, uint64_t page) 507 { 508 DPRINT("Logged dirty guest page: %"PRId64"\n", page); 509 atomic_or(&log_table[page / 8], 1 << (page % 8)); 510 } 511 512 static void 513 vubr_log_write(VubrDev *dev, uint64_t address, uint64_t length) 514 { 515 uint64_t page; 516 517 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || 518 !dev->log_table || !length) { 519 return; 520 } 521 522 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); 523 524 page = address / VHOST_LOG_PAGE; 525 while (page * VHOST_LOG_PAGE < address + length) { 526 vubr_log_page(dev->log_table, page); 527 page += VHOST_LOG_PAGE; 528 } 529 vubr_log_kick(dev); 530 } 531 532 static void 533 vubr_post_buffer(VubrDev *dev, VubrVirtq *vq, uint8_t *buf, int32_t len) 534 { 535 struct vring_desc *desc = vq->desc; 536 struct vring_avail *avail = vq->avail; 537 struct vring_used *used = vq->used; 538 uint64_t log_guest_addr = vq->log_guest_addr; 539 540 unsigned int size = vq->size; 541 542 uint16_t avail_index = atomic_mb_read(&avail->idx); 543 544 /* We check the available descriptors before posting the 545 * buffer, so here we assume that enough available 546 * descriptors. */ 547 assert(vq->last_avail_index != avail_index); 548 uint16_t a_index = vq->last_avail_index % size; 549 uint16_t u_index = vq->last_used_index % size; 550 uint16_t d_index = avail->ring[a_index]; 551 552 int i = d_index; 553 554 DPRINT("Post packet to guest on vq:\n"); 555 DPRINT(" size = %d\n", vq->size); 556 DPRINT(" last_avail_index = %d\n", vq->last_avail_index); 557 DPRINT(" last_used_index = %d\n", vq->last_used_index); 558 DPRINT(" a_index = %d\n", a_index); 559 DPRINT(" u_index = %d\n", u_index); 560 DPRINT(" d_index = %d\n", d_index); 561 DPRINT(" desc[%d].addr = 0x%016"PRIx64"\n", i, desc[i].addr); 562 DPRINT(" desc[%d].len = %d\n", i, desc[i].len); 563 DPRINT(" desc[%d].flags = %d\n", i, desc[i].flags); 564 DPRINT(" avail->idx = %d\n", avail_index); 565 DPRINT(" used->idx = %d\n", used->idx); 566 567 if (!(desc[i].flags & VRING_DESC_F_WRITE)) { 568 /* FIXME: we should find writable descriptor. */ 569 fprintf(stderr, "Error: descriptor is not writable. Exiting.\n"); 570 exit(1); 571 } 572 573 void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr); 574 uint32_t chunk_len = desc[i].len; 575 576 if (len <= chunk_len) { 577 memcpy(chunk_start, buf, len); 578 vubr_log_write(dev, desc[i].addr, len); 579 } else { 580 fprintf(stderr, 581 "Received too long packet from the backend. Dropping...\n"); 582 return; 583 } 584 585 /* Add descriptor to the used ring. */ 586 used->ring[u_index].id = d_index; 587 used->ring[u_index].len = len; 588 vubr_log_write(dev, 589 log_guest_addr + offsetof(struct vring_used, ring[u_index]), 590 sizeof(used->ring[u_index])); 591 592 vq->last_avail_index++; 593 vq->last_used_index++; 594 595 atomic_mb_set(&used->idx, vq->last_used_index); 596 vubr_log_write(dev, 597 log_guest_addr + offsetof(struct vring_used, idx), 598 sizeof(used->idx)); 599 600 /* Kick the guest if necessary. */ 601 vubr_virtqueue_kick(vq); 602 } 603 604 static int 605 vubr_process_desc(VubrDev *dev, VubrVirtq *vq) 606 { 607 struct vring_desc *desc = vq->desc; 608 struct vring_avail *avail = vq->avail; 609 struct vring_used *used = vq->used; 610 uint64_t log_guest_addr = vq->log_guest_addr; 611 612 unsigned int size = vq->size; 613 614 uint16_t a_index = vq->last_avail_index % size; 615 uint16_t u_index = vq->last_used_index % size; 616 uint16_t d_index = avail->ring[a_index]; 617 618 uint32_t i, len = 0; 619 size_t buf_size = 4096; 620 uint8_t buf[4096]; 621 622 DPRINT("Chunks: "); 623 i = d_index; 624 do { 625 void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr); 626 uint32_t chunk_len = desc[i].len; 627 628 assert(!(desc[i].flags & VRING_DESC_F_WRITE)); 629 630 if (len + chunk_len < buf_size) { 631 memcpy(buf + len, chunk_start, chunk_len); 632 DPRINT("%d ", chunk_len); 633 } else { 634 fprintf(stderr, "Error: too long packet. Dropping...\n"); 635 break; 636 } 637 638 len += chunk_len; 639 640 if (!(desc[i].flags & VRING_DESC_F_NEXT)) { 641 break; 642 } 643 644 i = desc[i].next; 645 } while (1); 646 DPRINT("\n"); 647 648 if (!len) { 649 return -1; 650 } 651 652 /* Add descriptor to the used ring. */ 653 used->ring[u_index].id = d_index; 654 used->ring[u_index].len = len; 655 vubr_log_write(dev, 656 log_guest_addr + offsetof(struct vring_used, ring[u_index]), 657 sizeof(used->ring[u_index])); 658 659 vubr_consume_raw_packet(dev, buf, len); 660 661 return 0; 662 } 663 664 static void 665 vubr_process_avail(VubrDev *dev, VubrVirtq *vq) 666 { 667 struct vring_avail *avail = vq->avail; 668 struct vring_used *used = vq->used; 669 uint64_t log_guest_addr = vq->log_guest_addr; 670 671 while (vq->last_avail_index != atomic_mb_read(&avail->idx)) { 672 vubr_process_desc(dev, vq); 673 vq->last_avail_index++; 674 vq->last_used_index++; 675 } 676 677 atomic_mb_set(&used->idx, vq->last_used_index); 678 vubr_log_write(dev, 679 log_guest_addr + offsetof(struct vring_used, idx), 680 sizeof(used->idx)); 681 } 682 683 static void 684 vubr_backend_recv_cb(int sock, void *ctx) 685 { 686 VubrDev *dev = (VubrDev *) ctx; 687 VubrVirtq *rx_vq = &dev->vq[0]; 688 uint8_t buf[4096]; 689 struct virtio_net_hdr_v1 *hdr = (struct virtio_net_hdr_v1 *)buf; 690 int hdrlen = sizeof(struct virtio_net_hdr_v1); 691 int buflen = sizeof(buf); 692 int len; 693 694 if (!dev->ready) { 695 return; 696 } 697 698 DPRINT("\n\n *** IN UDP RECEIVE CALLBACK ***\n\n"); 699 700 uint16_t avail_index = atomic_mb_read(&rx_vq->avail->idx); 701 702 /* If there is no available descriptors, just do nothing. 703 * The buffer will be handled by next arrived UDP packet, 704 * or next kick on receive virtq. */ 705 if (rx_vq->last_avail_index == avail_index) { 706 DPRINT("Got UDP packet, but no available descriptors on RX virtq.\n"); 707 return; 708 } 709 710 len = vubr_backend_udp_recvbuf(dev, buf + hdrlen, buflen - hdrlen); 711 712 *hdr = (struct virtio_net_hdr_v1) { }; 713 hdr->num_buffers = 1; 714 vubr_post_buffer(dev, rx_vq, buf, len + hdrlen); 715 } 716 717 static void 718 vubr_kick_cb(int sock, void *ctx) 719 { 720 VubrDev *dev = (VubrDev *) ctx; 721 eventfd_t kick_data; 722 ssize_t rc; 723 724 rc = eventfd_read(sock, &kick_data); 725 if (rc == -1) { 726 vubr_die("eventfd_read()"); 727 } else { 728 DPRINT("Got kick_data: %016"PRIx64"\n", kick_data); 729 vubr_process_avail(dev, &dev->vq[1]); 730 } 731 } 732 733 static int 734 vubr_none_exec(VubrDev *dev, VhostUserMsg *vmsg) 735 { 736 DPRINT("Function %s() not implemented yet.\n", __func__); 737 return 0; 738 } 739 740 static int 741 vubr_get_features_exec(VubrDev *dev, VhostUserMsg *vmsg) 742 { 743 vmsg->payload.u64 = 744 ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | 745 (1ULL << VHOST_F_LOG_ALL) | 746 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | 747 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)); 748 749 vmsg->size = sizeof(vmsg->payload.u64); 750 751 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 752 753 /* Reply */ 754 return 1; 755 } 756 757 static int 758 vubr_set_features_exec(VubrDev *dev, VhostUserMsg *vmsg) 759 { 760 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 761 dev->features = vmsg->payload.u64; 762 return 0; 763 } 764 765 static int 766 vubr_set_owner_exec(VubrDev *dev, VhostUserMsg *vmsg) 767 { 768 return 0; 769 } 770 771 static void 772 vubr_close_log(VubrDev *dev) 773 { 774 if (dev->log_table) { 775 if (munmap(dev->log_table, dev->log_size) != 0) { 776 vubr_die("munmap()"); 777 } 778 779 dev->log_table = 0; 780 } 781 if (dev->log_call_fd != -1) { 782 close(dev->log_call_fd); 783 dev->log_call_fd = -1; 784 } 785 } 786 787 static int 788 vubr_reset_device_exec(VubrDev *dev, VhostUserMsg *vmsg) 789 { 790 vubr_close_log(dev); 791 dev->ready = 0; 792 dev->features = 0; 793 return 0; 794 } 795 796 static int 797 vubr_set_mem_table_exec(VubrDev *dev, VhostUserMsg *vmsg) 798 { 799 int i; 800 VhostUserMemory *memory = &vmsg->payload.memory; 801 dev->nregions = memory->nregions; 802 803 DPRINT("Nregions: %d\n", memory->nregions); 804 for (i = 0; i < dev->nregions; i++) { 805 void *mmap_addr; 806 VhostUserMemoryRegion *msg_region = &memory->regions[i]; 807 VubrDevRegion *dev_region = &dev->regions[i]; 808 809 DPRINT("Region %d\n", i); 810 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 811 msg_region->guest_phys_addr); 812 DPRINT(" memory_size: 0x%016"PRIx64"\n", 813 msg_region->memory_size); 814 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 815 msg_region->userspace_addr); 816 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 817 msg_region->mmap_offset); 818 819 dev_region->gpa = msg_region->guest_phys_addr; 820 dev_region->size = msg_region->memory_size; 821 dev_region->qva = msg_region->userspace_addr; 822 dev_region->mmap_offset = msg_region->mmap_offset; 823 824 /* We don't use offset argument of mmap() since the 825 * mapped address has to be page aligned, and we use huge 826 * pages. */ 827 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 828 PROT_READ | PROT_WRITE, MAP_SHARED, 829 vmsg->fds[i], 0); 830 831 if (mmap_addr == MAP_FAILED) { 832 vubr_die("mmap"); 833 } 834 dev_region->mmap_addr = (uint64_t) mmap_addr; 835 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", dev_region->mmap_addr); 836 837 close(vmsg->fds[i]); 838 } 839 840 return 0; 841 } 842 843 static int 844 vubr_set_log_base_exec(VubrDev *dev, VhostUserMsg *vmsg) 845 { 846 int fd; 847 uint64_t log_mmap_size, log_mmap_offset; 848 void *rc; 849 850 assert(vmsg->fd_num == 1); 851 fd = vmsg->fds[0]; 852 853 assert(vmsg->size == sizeof(vmsg->payload.log)); 854 log_mmap_offset = vmsg->payload.log.mmap_offset; 855 log_mmap_size = vmsg->payload.log.mmap_size; 856 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset); 857 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size); 858 859 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 860 log_mmap_offset); 861 if (rc == MAP_FAILED) { 862 vubr_die("mmap"); 863 } 864 dev->log_table = rc; 865 dev->log_size = log_mmap_size; 866 867 vmsg->size = sizeof(vmsg->payload.u64); 868 /* Reply */ 869 return 1; 870 } 871 872 static int 873 vubr_set_log_fd_exec(VubrDev *dev, VhostUserMsg *vmsg) 874 { 875 assert(vmsg->fd_num == 1); 876 dev->log_call_fd = vmsg->fds[0]; 877 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); 878 return 0; 879 } 880 881 static int 882 vubr_set_vring_num_exec(VubrDev *dev, VhostUserMsg *vmsg) 883 { 884 unsigned int index = vmsg->payload.state.index; 885 unsigned int num = vmsg->payload.state.num; 886 887 DPRINT("State.index: %d\n", index); 888 DPRINT("State.num: %d\n", num); 889 dev->vq[index].size = num; 890 return 0; 891 } 892 893 static int 894 vubr_set_vring_addr_exec(VubrDev *dev, VhostUserMsg *vmsg) 895 { 896 struct vhost_vring_addr *vra = &vmsg->payload.addr; 897 unsigned int index = vra->index; 898 VubrVirtq *vq = &dev->vq[index]; 899 900 DPRINT("vhost_vring_addr:\n"); 901 DPRINT(" index: %d\n", vra->index); 902 DPRINT(" flags: %d\n", vra->flags); 903 DPRINT(" desc_user_addr: 0x%016llx\n", vra->desc_user_addr); 904 DPRINT(" used_user_addr: 0x%016llx\n", vra->used_user_addr); 905 DPRINT(" avail_user_addr: 0x%016llx\n", vra->avail_user_addr); 906 DPRINT(" log_guest_addr: 0x%016llx\n", vra->log_guest_addr); 907 908 vq->desc = (struct vring_desc *)qva_to_va(dev, vra->desc_user_addr); 909 vq->used = (struct vring_used *)qva_to_va(dev, vra->used_user_addr); 910 vq->avail = (struct vring_avail *)qva_to_va(dev, vra->avail_user_addr); 911 vq->log_guest_addr = vra->log_guest_addr; 912 913 DPRINT("Setting virtq addresses:\n"); 914 DPRINT(" vring_desc at %p\n", vq->desc); 915 DPRINT(" vring_used at %p\n", vq->used); 916 DPRINT(" vring_avail at %p\n", vq->avail); 917 918 vq->last_used_index = vq->used->idx; 919 return 0; 920 } 921 922 static int 923 vubr_set_vring_base_exec(VubrDev *dev, VhostUserMsg *vmsg) 924 { 925 unsigned int index = vmsg->payload.state.index; 926 unsigned int num = vmsg->payload.state.num; 927 928 DPRINT("State.index: %d\n", index); 929 DPRINT("State.num: %d\n", num); 930 dev->vq[index].last_avail_index = num; 931 932 return 0; 933 } 934 935 static int 936 vubr_get_vring_base_exec(VubrDev *dev, VhostUserMsg *vmsg) 937 { 938 unsigned int index = vmsg->payload.state.index; 939 940 DPRINT("State.index: %d\n", index); 941 vmsg->payload.state.num = dev->vq[index].last_avail_index; 942 vmsg->size = sizeof(vmsg->payload.state); 943 /* FIXME: this is a work-around for a bug in QEMU enabling 944 * too early vrings. When protocol features are enabled, 945 * we have to respect * VHOST_USER_SET_VRING_ENABLE request. */ 946 dev->ready = 0; 947 948 if (dev->vq[index].call_fd != -1) { 949 close(dev->vq[index].call_fd); 950 dispatcher_remove(&dev->dispatcher, dev->vq[index].call_fd); 951 dev->vq[index].call_fd = -1; 952 } 953 if (dev->vq[index].kick_fd != -1) { 954 close(dev->vq[index].kick_fd); 955 dispatcher_remove(&dev->dispatcher, dev->vq[index].kick_fd); 956 dev->vq[index].kick_fd = -1; 957 } 958 959 /* Reply */ 960 return 1; 961 } 962 963 static int 964 vubr_set_vring_kick_exec(VubrDev *dev, VhostUserMsg *vmsg) 965 { 966 uint64_t u64_arg = vmsg->payload.u64; 967 int index = u64_arg & VHOST_USER_VRING_IDX_MASK; 968 969 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 970 971 assert((u64_arg & VHOST_USER_VRING_NOFD_MASK) == 0); 972 assert(vmsg->fd_num == 1); 973 974 if (dev->vq[index].kick_fd != -1) { 975 close(dev->vq[index].kick_fd); 976 dispatcher_remove(&dev->dispatcher, dev->vq[index].kick_fd); 977 } 978 dev->vq[index].kick_fd = vmsg->fds[0]; 979 DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index); 980 981 if (index % 2 == 1) { 982 /* TX queue. */ 983 dispatcher_add(&dev->dispatcher, dev->vq[index].kick_fd, 984 dev, vubr_kick_cb); 985 986 DPRINT("Waiting for kicks on fd: %d for vq: %d\n", 987 dev->vq[index].kick_fd, index); 988 } 989 /* We temporarily use this hack to determine that both TX and RX 990 * queues are set up and ready for processing. 991 * FIXME: we need to rely in VHOST_USER_SET_VRING_ENABLE and 992 * actual kicks. */ 993 if (dev->vq[0].kick_fd != -1 && 994 dev->vq[1].kick_fd != -1) { 995 dev->ready = 1; 996 DPRINT("vhost-user-bridge is ready for processing queues.\n"); 997 } 998 return 0; 999 1000 } 1001 1002 static int 1003 vubr_set_vring_call_exec(VubrDev *dev, VhostUserMsg *vmsg) 1004 { 1005 uint64_t u64_arg = vmsg->payload.u64; 1006 int index = u64_arg & VHOST_USER_VRING_IDX_MASK; 1007 1008 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1009 assert((u64_arg & VHOST_USER_VRING_NOFD_MASK) == 0); 1010 assert(vmsg->fd_num == 1); 1011 1012 if (dev->vq[index].call_fd != -1) { 1013 close(dev->vq[index].call_fd); 1014 dispatcher_remove(&dev->dispatcher, dev->vq[index].call_fd); 1015 } 1016 dev->vq[index].call_fd = vmsg->fds[0]; 1017 DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index); 1018 1019 return 0; 1020 } 1021 1022 static int 1023 vubr_set_vring_err_exec(VubrDev *dev, VhostUserMsg *vmsg) 1024 { 1025 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1026 return 0; 1027 } 1028 1029 static int 1030 vubr_get_protocol_features_exec(VubrDev *dev, VhostUserMsg *vmsg) 1031 { 1032 vmsg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD; 1033 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1034 vmsg->size = sizeof(vmsg->payload.u64); 1035 1036 /* Reply */ 1037 return 1; 1038 } 1039 1040 static int 1041 vubr_set_protocol_features_exec(VubrDev *dev, VhostUserMsg *vmsg) 1042 { 1043 /* FIXME: unimplented */ 1044 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1045 return 0; 1046 } 1047 1048 static int 1049 vubr_get_queue_num_exec(VubrDev *dev, VhostUserMsg *vmsg) 1050 { 1051 DPRINT("Function %s() not implemented yet.\n", __func__); 1052 return 0; 1053 } 1054 1055 static int 1056 vubr_set_vring_enable_exec(VubrDev *dev, VhostUserMsg *vmsg) 1057 { 1058 unsigned int index = vmsg->payload.state.index; 1059 unsigned int enable = vmsg->payload.state.num; 1060 1061 DPRINT("State.index: %d\n", index); 1062 DPRINT("State.enable: %d\n", enable); 1063 dev->vq[index].enable = enable; 1064 return 0; 1065 } 1066 1067 static int 1068 vubr_send_rarp_exec(VubrDev *dev, VhostUserMsg *vmsg) 1069 { 1070 DPRINT("Function %s() not implemented yet.\n", __func__); 1071 return 0; 1072 } 1073 1074 static int 1075 vubr_execute_request(VubrDev *dev, VhostUserMsg *vmsg) 1076 { 1077 /* Print out generic part of the request. */ 1078 DPRINT( 1079 "================== Vhost user message from QEMU ==================\n"); 1080 DPRINT("Request: %s (%d)\n", vubr_request_str[vmsg->request], 1081 vmsg->request); 1082 DPRINT("Flags: 0x%x\n", vmsg->flags); 1083 DPRINT("Size: %d\n", vmsg->size); 1084 1085 if (vmsg->fd_num) { 1086 int i; 1087 DPRINT("Fds:"); 1088 for (i = 0; i < vmsg->fd_num; i++) { 1089 DPRINT(" %d", vmsg->fds[i]); 1090 } 1091 DPRINT("\n"); 1092 } 1093 1094 switch (vmsg->request) { 1095 case VHOST_USER_NONE: 1096 return vubr_none_exec(dev, vmsg); 1097 case VHOST_USER_GET_FEATURES: 1098 return vubr_get_features_exec(dev, vmsg); 1099 case VHOST_USER_SET_FEATURES: 1100 return vubr_set_features_exec(dev, vmsg); 1101 case VHOST_USER_SET_OWNER: 1102 return vubr_set_owner_exec(dev, vmsg); 1103 case VHOST_USER_RESET_OWNER: 1104 return vubr_reset_device_exec(dev, vmsg); 1105 case VHOST_USER_SET_MEM_TABLE: 1106 return vubr_set_mem_table_exec(dev, vmsg); 1107 case VHOST_USER_SET_LOG_BASE: 1108 return vubr_set_log_base_exec(dev, vmsg); 1109 case VHOST_USER_SET_LOG_FD: 1110 return vubr_set_log_fd_exec(dev, vmsg); 1111 case VHOST_USER_SET_VRING_NUM: 1112 return vubr_set_vring_num_exec(dev, vmsg); 1113 case VHOST_USER_SET_VRING_ADDR: 1114 return vubr_set_vring_addr_exec(dev, vmsg); 1115 case VHOST_USER_SET_VRING_BASE: 1116 return vubr_set_vring_base_exec(dev, vmsg); 1117 case VHOST_USER_GET_VRING_BASE: 1118 return vubr_get_vring_base_exec(dev, vmsg); 1119 case VHOST_USER_SET_VRING_KICK: 1120 return vubr_set_vring_kick_exec(dev, vmsg); 1121 case VHOST_USER_SET_VRING_CALL: 1122 return vubr_set_vring_call_exec(dev, vmsg); 1123 case VHOST_USER_SET_VRING_ERR: 1124 return vubr_set_vring_err_exec(dev, vmsg); 1125 case VHOST_USER_GET_PROTOCOL_FEATURES: 1126 return vubr_get_protocol_features_exec(dev, vmsg); 1127 case VHOST_USER_SET_PROTOCOL_FEATURES: 1128 return vubr_set_protocol_features_exec(dev, vmsg); 1129 case VHOST_USER_GET_QUEUE_NUM: 1130 return vubr_get_queue_num_exec(dev, vmsg); 1131 case VHOST_USER_SET_VRING_ENABLE: 1132 return vubr_set_vring_enable_exec(dev, vmsg); 1133 case VHOST_USER_SEND_RARP: 1134 return vubr_send_rarp_exec(dev, vmsg); 1135 1136 case VHOST_USER_MAX: 1137 assert(vmsg->request != VHOST_USER_MAX); 1138 } 1139 return 0; 1140 } 1141 1142 static void 1143 vubr_receive_cb(int sock, void *ctx) 1144 { 1145 VubrDev *dev = (VubrDev *) ctx; 1146 VhostUserMsg vmsg; 1147 int reply_requested; 1148 1149 vubr_message_read(sock, &vmsg); 1150 reply_requested = vubr_execute_request(dev, &vmsg); 1151 if (reply_requested) { 1152 /* Set the version in the flags when sending the reply */ 1153 vmsg.flags &= ~VHOST_USER_VERSION_MASK; 1154 vmsg.flags |= VHOST_USER_VERSION; 1155 vmsg.flags |= VHOST_USER_REPLY_MASK; 1156 vubr_message_write(sock, &vmsg); 1157 } 1158 } 1159 1160 static void 1161 vubr_accept_cb(int sock, void *ctx) 1162 { 1163 VubrDev *dev = (VubrDev *)ctx; 1164 int conn_fd; 1165 struct sockaddr_un un; 1166 socklen_t len = sizeof(un); 1167 1168 conn_fd = accept(sock, (struct sockaddr *) &un, &len); 1169 if (conn_fd == -1) { 1170 vubr_die("accept()"); 1171 } 1172 DPRINT("Got connection from remote peer on sock %d\n", conn_fd); 1173 dispatcher_add(&dev->dispatcher, conn_fd, ctx, vubr_receive_cb); 1174 } 1175 1176 static VubrDev * 1177 vubr_new(const char *path) 1178 { 1179 VubrDev *dev = (VubrDev *) calloc(1, sizeof(VubrDev)); 1180 dev->nregions = 0; 1181 int i; 1182 struct sockaddr_un un; 1183 size_t len; 1184 1185 for (i = 0; i < MAX_NR_VIRTQUEUE; i++) { 1186 dev->vq[i] = (VubrVirtq) { 1187 .call_fd = -1, .kick_fd = -1, 1188 .size = 0, 1189 .last_avail_index = 0, .last_used_index = 0, 1190 .desc = 0, .avail = 0, .used = 0, 1191 .enable = 0, 1192 }; 1193 } 1194 1195 /* Init log */ 1196 dev->log_call_fd = -1; 1197 dev->log_size = 0; 1198 dev->log_table = 0; 1199 dev->ready = 0; 1200 dev->features = 0; 1201 1202 /* Get a UNIX socket. */ 1203 dev->sock = socket(AF_UNIX, SOCK_STREAM, 0); 1204 if (dev->sock == -1) { 1205 vubr_die("socket"); 1206 } 1207 1208 un.sun_family = AF_UNIX; 1209 strcpy(un.sun_path, path); 1210 len = sizeof(un.sun_family) + strlen(path); 1211 unlink(path); 1212 1213 if (bind(dev->sock, (struct sockaddr *) &un, len) == -1) { 1214 vubr_die("bind"); 1215 } 1216 1217 if (listen(dev->sock, 1) == -1) { 1218 vubr_die("listen"); 1219 } 1220 1221 dispatcher_init(&dev->dispatcher); 1222 dispatcher_add(&dev->dispatcher, dev->sock, (void *)dev, 1223 vubr_accept_cb); 1224 1225 DPRINT("Waiting for connections on UNIX socket %s ...\n", path); 1226 return dev; 1227 } 1228 1229 static void 1230 vubr_set_host(struct sockaddr_in *saddr, const char *host) 1231 { 1232 if (isdigit(host[0])) { 1233 if (!inet_aton(host, &saddr->sin_addr)) { 1234 fprintf(stderr, "inet_aton() failed.\n"); 1235 exit(1); 1236 } 1237 } else { 1238 struct hostent *he = gethostbyname(host); 1239 1240 if (!he) { 1241 fprintf(stderr, "gethostbyname() failed.\n"); 1242 exit(1); 1243 } 1244 saddr->sin_addr = *(struct in_addr *)he->h_addr; 1245 } 1246 } 1247 1248 static void 1249 vubr_backend_udp_setup(VubrDev *dev, 1250 const char *local_host, 1251 const char *local_port, 1252 const char *remote_host, 1253 const char *remote_port) 1254 { 1255 int sock; 1256 const char *r; 1257 1258 int lport, rport; 1259 1260 lport = strtol(local_port, (char **)&r, 0); 1261 if (r == local_port) { 1262 fprintf(stderr, "lport parsing failed.\n"); 1263 exit(1); 1264 } 1265 1266 rport = strtol(remote_port, (char **)&r, 0); 1267 if (r == remote_port) { 1268 fprintf(stderr, "rport parsing failed.\n"); 1269 exit(1); 1270 } 1271 1272 struct sockaddr_in si_local = { 1273 .sin_family = AF_INET, 1274 .sin_port = htons(lport), 1275 }; 1276 1277 vubr_set_host(&si_local, local_host); 1278 1279 /* setup destination for sends */ 1280 dev->backend_udp_dest = (struct sockaddr_in) { 1281 .sin_family = AF_INET, 1282 .sin_port = htons(rport), 1283 }; 1284 vubr_set_host(&dev->backend_udp_dest, remote_host); 1285 1286 sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); 1287 if (sock == -1) { 1288 vubr_die("socket"); 1289 } 1290 1291 if (bind(sock, (struct sockaddr *)&si_local, sizeof(si_local)) == -1) { 1292 vubr_die("bind"); 1293 } 1294 1295 dev->backend_udp_sock = sock; 1296 dispatcher_add(&dev->dispatcher, sock, dev, vubr_backend_recv_cb); 1297 DPRINT("Waiting for data from udp backend on %s:%d...\n", 1298 local_host, lport); 1299 } 1300 1301 static void 1302 vubr_run(VubrDev *dev) 1303 { 1304 while (1) { 1305 /* timeout 200ms */ 1306 dispatcher_wait(&dev->dispatcher, 200000); 1307 /* Here one can try polling strategy. */ 1308 } 1309 } 1310 1311 static int 1312 vubr_parse_host_port(const char **host, const char **port, const char *buf) 1313 { 1314 char *p = strchr(buf, ':'); 1315 1316 if (!p) { 1317 return -1; 1318 } 1319 *p = '\0'; 1320 *host = strdup(buf); 1321 *port = strdup(p + 1); 1322 return 0; 1323 } 1324 1325 #define DEFAULT_UD_SOCKET "/tmp/vubr.sock" 1326 #define DEFAULT_LHOST "127.0.0.1" 1327 #define DEFAULT_LPORT "4444" 1328 #define DEFAULT_RHOST "127.0.0.1" 1329 #define DEFAULT_RPORT "5555" 1330 1331 static const char *ud_socket_path = DEFAULT_UD_SOCKET; 1332 static const char *lhost = DEFAULT_LHOST; 1333 static const char *lport = DEFAULT_LPORT; 1334 static const char *rhost = DEFAULT_RHOST; 1335 static const char *rport = DEFAULT_RPORT; 1336 1337 int 1338 main(int argc, char *argv[]) 1339 { 1340 VubrDev *dev; 1341 int opt; 1342 1343 while ((opt = getopt(argc, argv, "l:r:u:")) != -1) { 1344 1345 switch (opt) { 1346 case 'l': 1347 if (vubr_parse_host_port(&lhost, &lport, optarg) < 0) { 1348 goto out; 1349 } 1350 break; 1351 case 'r': 1352 if (vubr_parse_host_port(&rhost, &rport, optarg) < 0) { 1353 goto out; 1354 } 1355 break; 1356 case 'u': 1357 ud_socket_path = strdup(optarg); 1358 break; 1359 default: 1360 goto out; 1361 } 1362 } 1363 1364 DPRINT("ud socket: %s\n", ud_socket_path); 1365 DPRINT("local: %s:%s\n", lhost, lport); 1366 DPRINT("remote: %s:%s\n", rhost, rport); 1367 1368 dev = vubr_new(ud_socket_path); 1369 if (!dev) { 1370 return 1; 1371 } 1372 1373 vubr_backend_udp_setup(dev, lhost, lport, rhost, rport); 1374 vubr_run(dev); 1375 return 0; 1376 1377 out: 1378 fprintf(stderr, "Usage: %s ", argv[0]); 1379 fprintf(stderr, "[-u ud_socket_path] [-l lhost:lport] [-r rhost:rport]\n"); 1380 fprintf(stderr, "\t-u path to unix doman socket. default: %s\n", 1381 DEFAULT_UD_SOCKET); 1382 fprintf(stderr, "\t-l local host and port. default: %s:%s\n", 1383 DEFAULT_LHOST, DEFAULT_LPORT); 1384 fprintf(stderr, "\t-r remote host and port. default: %s:%s\n", 1385 DEFAULT_RHOST, DEFAULT_RPORT); 1386 1387 return 1; 1388 } 1389