1 /* 2 * VMware vSockets Driver 3 * 4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 */ 15 16 /* Implementation notes: 17 * 18 * - There are two kinds of sockets: those created by user action (such as 19 * calling socket(2)) and those created by incoming connection request packets. 20 * 21 * - There are two "global" tables, one for bound sockets (sockets that have 22 * specified an address that they are responsible for) and one for connected 23 * sockets (sockets that have established a connection with another socket). 24 * These tables are "global" in that all sockets on the system are placed 25 * within them. - Note, though, that the bound table contains an extra entry 26 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in 27 * that list. The bound table is used solely for lookup of sockets when packets 28 * are received and that's not necessary for SOCK_DGRAM sockets since we create 29 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM 30 * sockets out of the bound hash buckets will reduce the chance of collisions 31 * when looking for SOCK_STREAM sockets and prevents us from having to check the 32 * socket type in the hash table lookups. 33 * 34 * - Sockets created by user action will either be "client" sockets that 35 * initiate a connection or "server" sockets that listen for connections; we do 36 * not support simultaneous connects (two "client" sockets connecting). 37 * 38 * - "Server" sockets are referred to as listener sockets throughout this 39 * implementation because they are in the TCP_LISTEN state. When a 40 * connection request is received (the second kind of socket mentioned above), 41 * we create a new socket and refer to it as a pending socket. These pending 42 * sockets are placed on the pending connection list of the listener socket. 43 * When future packets are received for the address the listener socket is 44 * bound to, we check if the source of the packet is from one that has an 45 * existing pending connection. If it does, we process the packet for the 46 * pending socket. When that socket reaches the connected state, it is removed 47 * from the listener socket's pending list and enqueued in the listener 48 * socket's accept queue. Callers of accept(2) will accept connected sockets 49 * from the listener socket's accept queue. If the socket cannot be accepted 50 * for some reason then it is marked rejected. Once the connection is 51 * accepted, it is owned by the user process and the responsibility for cleanup 52 * falls with that user process. 53 * 54 * - It is possible that these pending sockets will never reach the connected 55 * state; in fact, we may never receive another packet after the connection 56 * request. Because of this, we must schedule a cleanup function to run in the 57 * future, after some amount of time passes where a connection should have been 58 * established. This function ensures that the socket is off all lists so it 59 * cannot be retrieved, then drops all references to the socket so it is cleaned 60 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this 61 * function will also cleanup rejected sockets, those that reach the connected 62 * state but leave it before they have been accepted. 63 * 64 * - Lock ordering for pending or accept queue sockets is: 65 * 66 * lock_sock(listener); 67 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING); 68 * 69 * Using explicit nested locking keeps lockdep happy since normally only one 70 * lock of a given class may be taken at a time. 71 * 72 * - Sockets created by user action will be cleaned up when the user process 73 * calls close(2), causing our release implementation to be called. Our release 74 * implementation will perform some cleanup then drop the last reference so our 75 * sk_destruct implementation is invoked. Our sk_destruct implementation will 76 * perform additional cleanup that's common for both types of sockets. 77 * 78 * - A socket's reference count is what ensures that the structure won't be 79 * freed. Each entry in a list (such as the "global" bound and connected tables 80 * and the listener socket's pending list and connected queue) ensures a 81 * reference. When we defer work until process context and pass a socket as our 82 * argument, we must ensure the reference count is increased to ensure the 83 * socket isn't freed before the function is run; the deferred function will 84 * then drop the reference. 85 * 86 * - sk->sk_state uses the TCP state constants because they are widely used by 87 * other address families and exposed to userspace tools like ss(8): 88 * 89 * TCP_CLOSE - unconnected 90 * TCP_SYN_SENT - connecting 91 * TCP_ESTABLISHED - connected 92 * TCP_CLOSING - disconnecting 93 * TCP_LISTEN - listening 94 */ 95 96 #include <linux/types.h> 97 #include <linux/bitops.h> 98 #include <linux/cred.h> 99 #include <linux/init.h> 100 #include <linux/io.h> 101 #include <linux/kernel.h> 102 #include <linux/sched/signal.h> 103 #include <linux/kmod.h> 104 #include <linux/list.h> 105 #include <linux/miscdevice.h> 106 #include <linux/module.h> 107 #include <linux/mutex.h> 108 #include <linux/net.h> 109 #include <linux/poll.h> 110 #include <linux/skbuff.h> 111 #include <linux/smp.h> 112 #include <linux/socket.h> 113 #include <linux/stddef.h> 114 #include <linux/unistd.h> 115 #include <linux/wait.h> 116 #include <linux/workqueue.h> 117 #include <net/sock.h> 118 #include <net/af_vsock.h> 119 120 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); 121 static void vsock_sk_destruct(struct sock *sk); 122 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 123 124 /* Protocol family. */ 125 static struct proto vsock_proto = { 126 .name = "AF_VSOCK", 127 .owner = THIS_MODULE, 128 .obj_size = sizeof(struct vsock_sock), 129 }; 130 131 /* The default peer timeout indicates how long we will wait for a peer response 132 * to a control message. 133 */ 134 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) 135 136 static const struct vsock_transport *transport; 137 static DEFINE_MUTEX(vsock_register_mutex); 138 139 /**** EXPORTS ****/ 140 141 /* Get the ID of the local context. This is transport dependent. */ 142 143 int vm_sockets_get_local_cid(void) 144 { 145 return transport->get_local_cid(); 146 } 147 EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid); 148 149 /**** UTILS ****/ 150 151 /* Each bound VSocket is stored in the bind hash table and each connected 152 * VSocket is stored in the connected hash table. 153 * 154 * Unbound sockets are all put on the same list attached to the end of the hash 155 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in 156 * the bucket that their local address hashes to (vsock_bound_sockets(addr) 157 * represents the list that addr hashes to). 158 * 159 * Specifically, we initialize the vsock_bind_table array to a size of 160 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through 161 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and 162 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function 163 * mods with VSOCK_HASH_SIZE to ensure this. 164 */ 165 #define MAX_PORT_RETRIES 24 166 167 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE) 168 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)]) 169 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE]) 170 171 /* XXX This can probably be implemented in a better way. */ 172 #define VSOCK_CONN_HASH(src, dst) \ 173 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE) 174 #define vsock_connected_sockets(src, dst) \ 175 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)]) 176 #define vsock_connected_sockets_vsk(vsk) \ 177 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr) 178 179 struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1]; 180 EXPORT_SYMBOL_GPL(vsock_bind_table); 181 struct list_head vsock_connected_table[VSOCK_HASH_SIZE]; 182 EXPORT_SYMBOL_GPL(vsock_connected_table); 183 DEFINE_SPINLOCK(vsock_table_lock); 184 EXPORT_SYMBOL_GPL(vsock_table_lock); 185 186 /* Autobind this socket to the local address if necessary. */ 187 static int vsock_auto_bind(struct vsock_sock *vsk) 188 { 189 struct sock *sk = sk_vsock(vsk); 190 struct sockaddr_vm local_addr; 191 192 if (vsock_addr_bound(&vsk->local_addr)) 193 return 0; 194 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 195 return __vsock_bind(sk, &local_addr); 196 } 197 198 static int __init vsock_init_tables(void) 199 { 200 int i; 201 202 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++) 203 INIT_LIST_HEAD(&vsock_bind_table[i]); 204 205 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) 206 INIT_LIST_HEAD(&vsock_connected_table[i]); 207 return 0; 208 } 209 210 static void __vsock_insert_bound(struct list_head *list, 211 struct vsock_sock *vsk) 212 { 213 sock_hold(&vsk->sk); 214 list_add(&vsk->bound_table, list); 215 } 216 217 static void __vsock_insert_connected(struct list_head *list, 218 struct vsock_sock *vsk) 219 { 220 sock_hold(&vsk->sk); 221 list_add(&vsk->connected_table, list); 222 } 223 224 static void __vsock_remove_bound(struct vsock_sock *vsk) 225 { 226 list_del_init(&vsk->bound_table); 227 sock_put(&vsk->sk); 228 } 229 230 static void __vsock_remove_connected(struct vsock_sock *vsk) 231 { 232 list_del_init(&vsk->connected_table); 233 sock_put(&vsk->sk); 234 } 235 236 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) 237 { 238 struct vsock_sock *vsk; 239 240 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) 241 if (addr->svm_port == vsk->local_addr.svm_port) 242 return sk_vsock(vsk); 243 244 return NULL; 245 } 246 247 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, 248 struct sockaddr_vm *dst) 249 { 250 struct vsock_sock *vsk; 251 252 list_for_each_entry(vsk, vsock_connected_sockets(src, dst), 253 connected_table) { 254 if (vsock_addr_equals_addr(src, &vsk->remote_addr) && 255 dst->svm_port == vsk->local_addr.svm_port) { 256 return sk_vsock(vsk); 257 } 258 } 259 260 return NULL; 261 } 262 263 static void vsock_insert_unbound(struct vsock_sock *vsk) 264 { 265 spin_lock_bh(&vsock_table_lock); 266 __vsock_insert_bound(vsock_unbound_sockets, vsk); 267 spin_unlock_bh(&vsock_table_lock); 268 } 269 270 void vsock_insert_connected(struct vsock_sock *vsk) 271 { 272 struct list_head *list = vsock_connected_sockets( 273 &vsk->remote_addr, &vsk->local_addr); 274 275 spin_lock_bh(&vsock_table_lock); 276 __vsock_insert_connected(list, vsk); 277 spin_unlock_bh(&vsock_table_lock); 278 } 279 EXPORT_SYMBOL_GPL(vsock_insert_connected); 280 281 void vsock_remove_bound(struct vsock_sock *vsk) 282 { 283 spin_lock_bh(&vsock_table_lock); 284 __vsock_remove_bound(vsk); 285 spin_unlock_bh(&vsock_table_lock); 286 } 287 EXPORT_SYMBOL_GPL(vsock_remove_bound); 288 289 void vsock_remove_connected(struct vsock_sock *vsk) 290 { 291 spin_lock_bh(&vsock_table_lock); 292 __vsock_remove_connected(vsk); 293 spin_unlock_bh(&vsock_table_lock); 294 } 295 EXPORT_SYMBOL_GPL(vsock_remove_connected); 296 297 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr) 298 { 299 struct sock *sk; 300 301 spin_lock_bh(&vsock_table_lock); 302 sk = __vsock_find_bound_socket(addr); 303 if (sk) 304 sock_hold(sk); 305 306 spin_unlock_bh(&vsock_table_lock); 307 308 return sk; 309 } 310 EXPORT_SYMBOL_GPL(vsock_find_bound_socket); 311 312 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, 313 struct sockaddr_vm *dst) 314 { 315 struct sock *sk; 316 317 spin_lock_bh(&vsock_table_lock); 318 sk = __vsock_find_connected_socket(src, dst); 319 if (sk) 320 sock_hold(sk); 321 322 spin_unlock_bh(&vsock_table_lock); 323 324 return sk; 325 } 326 EXPORT_SYMBOL_GPL(vsock_find_connected_socket); 327 328 static bool vsock_in_bound_table(struct vsock_sock *vsk) 329 { 330 bool ret; 331 332 spin_lock_bh(&vsock_table_lock); 333 ret = __vsock_in_bound_table(vsk); 334 spin_unlock_bh(&vsock_table_lock); 335 336 return ret; 337 } 338 339 static bool vsock_in_connected_table(struct vsock_sock *vsk) 340 { 341 bool ret; 342 343 spin_lock_bh(&vsock_table_lock); 344 ret = __vsock_in_connected_table(vsk); 345 spin_unlock_bh(&vsock_table_lock); 346 347 return ret; 348 } 349 350 void vsock_remove_sock(struct vsock_sock *vsk) 351 { 352 if (vsock_in_bound_table(vsk)) 353 vsock_remove_bound(vsk); 354 355 if (vsock_in_connected_table(vsk)) 356 vsock_remove_connected(vsk); 357 } 358 EXPORT_SYMBOL_GPL(vsock_remove_sock); 359 360 void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) 361 { 362 int i; 363 364 spin_lock_bh(&vsock_table_lock); 365 366 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 367 struct vsock_sock *vsk; 368 list_for_each_entry(vsk, &vsock_connected_table[i], 369 connected_table) 370 fn(sk_vsock(vsk)); 371 } 372 373 spin_unlock_bh(&vsock_table_lock); 374 } 375 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket); 376 377 void vsock_add_pending(struct sock *listener, struct sock *pending) 378 { 379 struct vsock_sock *vlistener; 380 struct vsock_sock *vpending; 381 382 vlistener = vsock_sk(listener); 383 vpending = vsock_sk(pending); 384 385 sock_hold(pending); 386 sock_hold(listener); 387 list_add_tail(&vpending->pending_links, &vlistener->pending_links); 388 } 389 EXPORT_SYMBOL_GPL(vsock_add_pending); 390 391 void vsock_remove_pending(struct sock *listener, struct sock *pending) 392 { 393 struct vsock_sock *vpending = vsock_sk(pending); 394 395 list_del_init(&vpending->pending_links); 396 sock_put(listener); 397 sock_put(pending); 398 } 399 EXPORT_SYMBOL_GPL(vsock_remove_pending); 400 401 void vsock_enqueue_accept(struct sock *listener, struct sock *connected) 402 { 403 struct vsock_sock *vlistener; 404 struct vsock_sock *vconnected; 405 406 vlistener = vsock_sk(listener); 407 vconnected = vsock_sk(connected); 408 409 sock_hold(connected); 410 sock_hold(listener); 411 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue); 412 } 413 EXPORT_SYMBOL_GPL(vsock_enqueue_accept); 414 415 static struct sock *vsock_dequeue_accept(struct sock *listener) 416 { 417 struct vsock_sock *vlistener; 418 struct vsock_sock *vconnected; 419 420 vlistener = vsock_sk(listener); 421 422 if (list_empty(&vlistener->accept_queue)) 423 return NULL; 424 425 vconnected = list_entry(vlistener->accept_queue.next, 426 struct vsock_sock, accept_queue); 427 428 list_del_init(&vconnected->accept_queue); 429 sock_put(listener); 430 /* The caller will need a reference on the connected socket so we let 431 * it call sock_put(). 432 */ 433 434 return sk_vsock(vconnected); 435 } 436 437 static bool vsock_is_accept_queue_empty(struct sock *sk) 438 { 439 struct vsock_sock *vsk = vsock_sk(sk); 440 return list_empty(&vsk->accept_queue); 441 } 442 443 static bool vsock_is_pending(struct sock *sk) 444 { 445 struct vsock_sock *vsk = vsock_sk(sk); 446 return !list_empty(&vsk->pending_links); 447 } 448 449 static int vsock_send_shutdown(struct sock *sk, int mode) 450 { 451 return transport->shutdown(vsock_sk(sk), mode); 452 } 453 454 void vsock_pending_work(struct work_struct *work) 455 { 456 struct sock *sk; 457 struct sock *listener; 458 struct vsock_sock *vsk; 459 bool cleanup; 460 461 vsk = container_of(work, struct vsock_sock, dwork.work); 462 sk = sk_vsock(vsk); 463 listener = vsk->listener; 464 cleanup = true; 465 466 lock_sock(listener); 467 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 468 469 if (vsock_is_pending(sk)) { 470 vsock_remove_pending(listener, sk); 471 472 listener->sk_ack_backlog--; 473 } else if (!vsk->rejected) { 474 /* We are not on the pending list and accept() did not reject 475 * us, so we must have been accepted by our user process. We 476 * just need to drop our references to the sockets and be on 477 * our way. 478 */ 479 cleanup = false; 480 goto out; 481 } 482 483 /* We need to remove ourself from the global connected sockets list so 484 * incoming packets can't find this socket, and to reduce the reference 485 * count. 486 */ 487 if (vsock_in_connected_table(vsk)) 488 vsock_remove_connected(vsk); 489 490 sk->sk_state = TCP_CLOSE; 491 492 out: 493 release_sock(sk); 494 release_sock(listener); 495 if (cleanup) 496 sock_put(sk); 497 498 sock_put(sk); 499 sock_put(listener); 500 } 501 EXPORT_SYMBOL_GPL(vsock_pending_work); 502 503 /**** SOCKET OPERATIONS ****/ 504 505 static int __vsock_bind_stream(struct vsock_sock *vsk, 506 struct sockaddr_vm *addr) 507 { 508 static u32 port = LAST_RESERVED_PORT + 1; 509 struct sockaddr_vm new_addr; 510 511 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); 512 513 if (addr->svm_port == VMADDR_PORT_ANY) { 514 bool found = false; 515 unsigned int i; 516 517 for (i = 0; i < MAX_PORT_RETRIES; i++) { 518 if (port <= LAST_RESERVED_PORT) 519 port = LAST_RESERVED_PORT + 1; 520 521 new_addr.svm_port = port++; 522 523 if (!__vsock_find_bound_socket(&new_addr)) { 524 found = true; 525 break; 526 } 527 } 528 529 if (!found) 530 return -EADDRNOTAVAIL; 531 } else { 532 /* If port is in reserved range, ensure caller 533 * has necessary privileges. 534 */ 535 if (addr->svm_port <= LAST_RESERVED_PORT && 536 !capable(CAP_NET_BIND_SERVICE)) { 537 return -EACCES; 538 } 539 540 if (__vsock_find_bound_socket(&new_addr)) 541 return -EADDRINUSE; 542 } 543 544 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port); 545 546 /* Remove stream sockets from the unbound list and add them to the hash 547 * table for easy lookup by its address. The unbound list is simply an 548 * extra entry at the end of the hash table, a trick used by AF_UNIX. 549 */ 550 __vsock_remove_bound(vsk); 551 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk); 552 553 return 0; 554 } 555 556 static int __vsock_bind_dgram(struct vsock_sock *vsk, 557 struct sockaddr_vm *addr) 558 { 559 return transport->dgram_bind(vsk, addr); 560 } 561 562 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) 563 { 564 struct vsock_sock *vsk = vsock_sk(sk); 565 u32 cid; 566 int retval; 567 568 /* First ensure this socket isn't already bound. */ 569 if (vsock_addr_bound(&vsk->local_addr)) 570 return -EINVAL; 571 572 /* Now bind to the provided address or select appropriate values if 573 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that 574 * like AF_INET prevents binding to a non-local IP address (in most 575 * cases), we only allow binding to the local CID. 576 */ 577 cid = transport->get_local_cid(); 578 if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY) 579 return -EADDRNOTAVAIL; 580 581 switch (sk->sk_socket->type) { 582 case SOCK_STREAM: 583 spin_lock_bh(&vsock_table_lock); 584 retval = __vsock_bind_stream(vsk, addr); 585 spin_unlock_bh(&vsock_table_lock); 586 break; 587 588 case SOCK_DGRAM: 589 retval = __vsock_bind_dgram(vsk, addr); 590 break; 591 592 default: 593 retval = -EINVAL; 594 break; 595 } 596 597 return retval; 598 } 599 600 struct sock *__vsock_create(struct net *net, 601 struct socket *sock, 602 struct sock *parent, 603 gfp_t priority, 604 unsigned short type, 605 int kern) 606 { 607 struct sock *sk; 608 struct vsock_sock *psk; 609 struct vsock_sock *vsk; 610 611 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern); 612 if (!sk) 613 return NULL; 614 615 sock_init_data(sock, sk); 616 617 /* sk->sk_type is normally set in sock_init_data, but only if sock is 618 * non-NULL. We make sure that our sockets always have a type by 619 * setting it here if needed. 620 */ 621 if (!sock) 622 sk->sk_type = type; 623 624 vsk = vsock_sk(sk); 625 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 626 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 627 628 sk->sk_destruct = vsock_sk_destruct; 629 sk->sk_backlog_rcv = vsock_queue_rcv_skb; 630 sock_reset_flag(sk, SOCK_DONE); 631 632 INIT_LIST_HEAD(&vsk->bound_table); 633 INIT_LIST_HEAD(&vsk->connected_table); 634 vsk->listener = NULL; 635 INIT_LIST_HEAD(&vsk->pending_links); 636 INIT_LIST_HEAD(&vsk->accept_queue); 637 vsk->rejected = false; 638 vsk->sent_request = false; 639 vsk->ignore_connecting_rst = false; 640 vsk->peer_shutdown = 0; 641 642 psk = parent ? vsock_sk(parent) : NULL; 643 if (parent) { 644 vsk->trusted = psk->trusted; 645 vsk->owner = get_cred(psk->owner); 646 vsk->connect_timeout = psk->connect_timeout; 647 } else { 648 vsk->trusted = capable(CAP_NET_ADMIN); 649 vsk->owner = get_current_cred(); 650 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; 651 } 652 653 if (transport->init(vsk, psk) < 0) { 654 sk_free(sk); 655 return NULL; 656 } 657 658 if (sock) 659 vsock_insert_unbound(vsk); 660 661 return sk; 662 } 663 EXPORT_SYMBOL_GPL(__vsock_create); 664 665 static void __vsock_release(struct sock *sk) 666 { 667 if (sk) { 668 struct sk_buff *skb; 669 struct sock *pending; 670 struct vsock_sock *vsk; 671 672 vsk = vsock_sk(sk); 673 pending = NULL; /* Compiler warning. */ 674 675 transport->release(vsk); 676 677 lock_sock(sk); 678 sock_orphan(sk); 679 sk->sk_shutdown = SHUTDOWN_MASK; 680 681 while ((skb = skb_dequeue(&sk->sk_receive_queue))) 682 kfree_skb(skb); 683 684 /* Clean up any sockets that never were accepted. */ 685 while ((pending = vsock_dequeue_accept(sk)) != NULL) { 686 __vsock_release(pending); 687 sock_put(pending); 688 } 689 690 release_sock(sk); 691 sock_put(sk); 692 } 693 } 694 695 static void vsock_sk_destruct(struct sock *sk) 696 { 697 struct vsock_sock *vsk = vsock_sk(sk); 698 699 transport->destruct(vsk); 700 701 /* When clearing these addresses, there's no need to set the family and 702 * possibly register the address family with the kernel. 703 */ 704 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 705 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 706 707 put_cred(vsk->owner); 708 } 709 710 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 711 { 712 int err; 713 714 err = sock_queue_rcv_skb(sk, skb); 715 if (err) 716 kfree_skb(skb); 717 718 return err; 719 } 720 721 s64 vsock_stream_has_data(struct vsock_sock *vsk) 722 { 723 return transport->stream_has_data(vsk); 724 } 725 EXPORT_SYMBOL_GPL(vsock_stream_has_data); 726 727 s64 vsock_stream_has_space(struct vsock_sock *vsk) 728 { 729 return transport->stream_has_space(vsk); 730 } 731 EXPORT_SYMBOL_GPL(vsock_stream_has_space); 732 733 static int vsock_release(struct socket *sock) 734 { 735 __vsock_release(sock->sk); 736 sock->sk = NULL; 737 sock->state = SS_FREE; 738 739 return 0; 740 } 741 742 static int 743 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 744 { 745 int err; 746 struct sock *sk; 747 struct sockaddr_vm *vm_addr; 748 749 sk = sock->sk; 750 751 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0) 752 return -EINVAL; 753 754 lock_sock(sk); 755 err = __vsock_bind(sk, vm_addr); 756 release_sock(sk); 757 758 return err; 759 } 760 761 static int vsock_getname(struct socket *sock, 762 struct sockaddr *addr, int peer) 763 { 764 int err; 765 struct sock *sk; 766 struct vsock_sock *vsk; 767 struct sockaddr_vm *vm_addr; 768 769 sk = sock->sk; 770 vsk = vsock_sk(sk); 771 err = 0; 772 773 lock_sock(sk); 774 775 if (peer) { 776 if (sock->state != SS_CONNECTED) { 777 err = -ENOTCONN; 778 goto out; 779 } 780 vm_addr = &vsk->remote_addr; 781 } else { 782 vm_addr = &vsk->local_addr; 783 } 784 785 if (!vm_addr) { 786 err = -EINVAL; 787 goto out; 788 } 789 790 /* sys_getsockname() and sys_getpeername() pass us a 791 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately 792 * that macro is defined in socket.c instead of .h, so we hardcode its 793 * value here. 794 */ 795 BUILD_BUG_ON(sizeof(*vm_addr) > 128); 796 memcpy(addr, vm_addr, sizeof(*vm_addr)); 797 err = sizeof(*vm_addr); 798 799 out: 800 release_sock(sk); 801 return err; 802 } 803 804 static int vsock_shutdown(struct socket *sock, int mode) 805 { 806 int err; 807 struct sock *sk; 808 809 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses 810 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode 811 * here like the other address families do. Note also that the 812 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3), 813 * which is what we want. 814 */ 815 mode++; 816 817 if ((mode & ~SHUTDOWN_MASK) || !mode) 818 return -EINVAL; 819 820 /* If this is a STREAM socket and it is not connected then bail out 821 * immediately. If it is a DGRAM socket then we must first kick the 822 * socket so that it wakes up from any sleeping calls, for example 823 * recv(), and then afterwards return the error. 824 */ 825 826 sk = sock->sk; 827 if (sock->state == SS_UNCONNECTED) { 828 err = -ENOTCONN; 829 if (sk->sk_type == SOCK_STREAM) 830 return err; 831 } else { 832 sock->state = SS_DISCONNECTING; 833 err = 0; 834 } 835 836 /* Receive and send shutdowns are treated alike. */ 837 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); 838 if (mode) { 839 lock_sock(sk); 840 sk->sk_shutdown |= mode; 841 sk->sk_state_change(sk); 842 release_sock(sk); 843 844 if (sk->sk_type == SOCK_STREAM) { 845 sock_reset_flag(sk, SOCK_DONE); 846 vsock_send_shutdown(sk, mode); 847 } 848 } 849 850 return err; 851 } 852 853 static __poll_t vsock_poll(struct file *file, struct socket *sock, 854 poll_table *wait) 855 { 856 struct sock *sk; 857 __poll_t mask; 858 struct vsock_sock *vsk; 859 860 sk = sock->sk; 861 vsk = vsock_sk(sk); 862 863 poll_wait(file, sk_sleep(sk), wait); 864 mask = 0; 865 866 if (sk->sk_err) 867 /* Signify that there has been an error on this socket. */ 868 mask |= EPOLLERR; 869 870 /* INET sockets treat local write shutdown and peer write shutdown as a 871 * case of EPOLLHUP set. 872 */ 873 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 874 ((sk->sk_shutdown & SEND_SHUTDOWN) && 875 (vsk->peer_shutdown & SEND_SHUTDOWN))) { 876 mask |= EPOLLHUP; 877 } 878 879 if (sk->sk_shutdown & RCV_SHUTDOWN || 880 vsk->peer_shutdown & SEND_SHUTDOWN) { 881 mask |= EPOLLRDHUP; 882 } 883 884 if (sock->type == SOCK_DGRAM) { 885 /* For datagram sockets we can read if there is something in 886 * the queue and write as long as the socket isn't shutdown for 887 * sending. 888 */ 889 if (!skb_queue_empty(&sk->sk_receive_queue) || 890 (sk->sk_shutdown & RCV_SHUTDOWN)) { 891 mask |= EPOLLIN | EPOLLRDNORM; 892 } 893 894 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 895 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 896 897 } else if (sock->type == SOCK_STREAM) { 898 lock_sock(sk); 899 900 /* Listening sockets that have connections in their accept 901 * queue can be read. 902 */ 903 if (sk->sk_state == TCP_LISTEN 904 && !vsock_is_accept_queue_empty(sk)) 905 mask |= EPOLLIN | EPOLLRDNORM; 906 907 /* If there is something in the queue then we can read. */ 908 if (transport->stream_is_active(vsk) && 909 !(sk->sk_shutdown & RCV_SHUTDOWN)) { 910 bool data_ready_now = false; 911 int ret = transport->notify_poll_in( 912 vsk, 1, &data_ready_now); 913 if (ret < 0) { 914 mask |= EPOLLERR; 915 } else { 916 if (data_ready_now) 917 mask |= EPOLLIN | EPOLLRDNORM; 918 919 } 920 } 921 922 /* Sockets whose connections have been closed, reset, or 923 * terminated should also be considered read, and we check the 924 * shutdown flag for that. 925 */ 926 if (sk->sk_shutdown & RCV_SHUTDOWN || 927 vsk->peer_shutdown & SEND_SHUTDOWN) { 928 mask |= EPOLLIN | EPOLLRDNORM; 929 } 930 931 /* Connected sockets that can produce data can be written. */ 932 if (sk->sk_state == TCP_ESTABLISHED) { 933 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 934 bool space_avail_now = false; 935 int ret = transport->notify_poll_out( 936 vsk, 1, &space_avail_now); 937 if (ret < 0) { 938 mask |= EPOLLERR; 939 } else { 940 if (space_avail_now) 941 /* Remove EPOLLWRBAND since INET 942 * sockets are not setting it. 943 */ 944 mask |= EPOLLOUT | EPOLLWRNORM; 945 946 } 947 } 948 } 949 950 /* Simulate INET socket poll behaviors, which sets 951 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read, 952 * but local send is not shutdown. 953 */ 954 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { 955 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 956 mask |= EPOLLOUT | EPOLLWRNORM; 957 958 } 959 960 release_sock(sk); 961 } 962 963 return mask; 964 } 965 966 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg, 967 size_t len) 968 { 969 int err; 970 struct sock *sk; 971 struct vsock_sock *vsk; 972 struct sockaddr_vm *remote_addr; 973 974 if (msg->msg_flags & MSG_OOB) 975 return -EOPNOTSUPP; 976 977 /* For now, MSG_DONTWAIT is always assumed... */ 978 err = 0; 979 sk = sock->sk; 980 vsk = vsock_sk(sk); 981 982 lock_sock(sk); 983 984 err = vsock_auto_bind(vsk); 985 if (err) 986 goto out; 987 988 989 /* If the provided message contains an address, use that. Otherwise 990 * fall back on the socket's remote handle (if it has been connected). 991 */ 992 if (msg->msg_name && 993 vsock_addr_cast(msg->msg_name, msg->msg_namelen, 994 &remote_addr) == 0) { 995 /* Ensure this address is of the right type and is a valid 996 * destination. 997 */ 998 999 if (remote_addr->svm_cid == VMADDR_CID_ANY) 1000 remote_addr->svm_cid = transport->get_local_cid(); 1001 1002 if (!vsock_addr_bound(remote_addr)) { 1003 err = -EINVAL; 1004 goto out; 1005 } 1006 } else if (sock->state == SS_CONNECTED) { 1007 remote_addr = &vsk->remote_addr; 1008 1009 if (remote_addr->svm_cid == VMADDR_CID_ANY) 1010 remote_addr->svm_cid = transport->get_local_cid(); 1011 1012 /* XXX Should connect() or this function ensure remote_addr is 1013 * bound? 1014 */ 1015 if (!vsock_addr_bound(&vsk->remote_addr)) { 1016 err = -EINVAL; 1017 goto out; 1018 } 1019 } else { 1020 err = -EINVAL; 1021 goto out; 1022 } 1023 1024 if (!transport->dgram_allow(remote_addr->svm_cid, 1025 remote_addr->svm_port)) { 1026 err = -EINVAL; 1027 goto out; 1028 } 1029 1030 err = transport->dgram_enqueue(vsk, remote_addr, msg, len); 1031 1032 out: 1033 release_sock(sk); 1034 return err; 1035 } 1036 1037 static int vsock_dgram_connect(struct socket *sock, 1038 struct sockaddr *addr, int addr_len, int flags) 1039 { 1040 int err; 1041 struct sock *sk; 1042 struct vsock_sock *vsk; 1043 struct sockaddr_vm *remote_addr; 1044 1045 sk = sock->sk; 1046 vsk = vsock_sk(sk); 1047 1048 err = vsock_addr_cast(addr, addr_len, &remote_addr); 1049 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) { 1050 lock_sock(sk); 1051 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, 1052 VMADDR_PORT_ANY); 1053 sock->state = SS_UNCONNECTED; 1054 release_sock(sk); 1055 return 0; 1056 } else if (err != 0) 1057 return -EINVAL; 1058 1059 lock_sock(sk); 1060 1061 err = vsock_auto_bind(vsk); 1062 if (err) 1063 goto out; 1064 1065 if (!transport->dgram_allow(remote_addr->svm_cid, 1066 remote_addr->svm_port)) { 1067 err = -EINVAL; 1068 goto out; 1069 } 1070 1071 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); 1072 sock->state = SS_CONNECTED; 1073 1074 out: 1075 release_sock(sk); 1076 return err; 1077 } 1078 1079 static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 1080 size_t len, int flags) 1081 { 1082 return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags); 1083 } 1084 1085 static const struct proto_ops vsock_dgram_ops = { 1086 .family = PF_VSOCK, 1087 .owner = THIS_MODULE, 1088 .release = vsock_release, 1089 .bind = vsock_bind, 1090 .connect = vsock_dgram_connect, 1091 .socketpair = sock_no_socketpair, 1092 .accept = sock_no_accept, 1093 .getname = vsock_getname, 1094 .poll = vsock_poll, 1095 .ioctl = sock_no_ioctl, 1096 .listen = sock_no_listen, 1097 .shutdown = vsock_shutdown, 1098 .setsockopt = sock_no_setsockopt, 1099 .getsockopt = sock_no_getsockopt, 1100 .sendmsg = vsock_dgram_sendmsg, 1101 .recvmsg = vsock_dgram_recvmsg, 1102 .mmap = sock_no_mmap, 1103 .sendpage = sock_no_sendpage, 1104 }; 1105 1106 static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) 1107 { 1108 if (!transport->cancel_pkt) 1109 return -EOPNOTSUPP; 1110 1111 return transport->cancel_pkt(vsk); 1112 } 1113 1114 static void vsock_connect_timeout(struct work_struct *work) 1115 { 1116 struct sock *sk; 1117 struct vsock_sock *vsk; 1118 int cancel = 0; 1119 1120 vsk = container_of(work, struct vsock_sock, dwork.work); 1121 sk = sk_vsock(vsk); 1122 1123 lock_sock(sk); 1124 if (sk->sk_state == TCP_SYN_SENT && 1125 (sk->sk_shutdown != SHUTDOWN_MASK)) { 1126 sk->sk_state = TCP_CLOSE; 1127 sk->sk_err = ETIMEDOUT; 1128 sk->sk_error_report(sk); 1129 cancel = 1; 1130 } 1131 release_sock(sk); 1132 if (cancel) 1133 vsock_transport_cancel_pkt(vsk); 1134 1135 sock_put(sk); 1136 } 1137 1138 static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, 1139 int addr_len, int flags) 1140 { 1141 int err; 1142 struct sock *sk; 1143 struct vsock_sock *vsk; 1144 struct sockaddr_vm *remote_addr; 1145 long timeout; 1146 DEFINE_WAIT(wait); 1147 1148 err = 0; 1149 sk = sock->sk; 1150 vsk = vsock_sk(sk); 1151 1152 lock_sock(sk); 1153 1154 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ 1155 switch (sock->state) { 1156 case SS_CONNECTED: 1157 err = -EISCONN; 1158 goto out; 1159 case SS_DISCONNECTING: 1160 err = -EINVAL; 1161 goto out; 1162 case SS_CONNECTING: 1163 /* This continues on so we can move sock into the SS_CONNECTED 1164 * state once the connection has completed (at which point err 1165 * will be set to zero also). Otherwise, we will either wait 1166 * for the connection or return -EALREADY should this be a 1167 * non-blocking call. 1168 */ 1169 err = -EALREADY; 1170 break; 1171 default: 1172 if ((sk->sk_state == TCP_LISTEN) || 1173 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { 1174 err = -EINVAL; 1175 goto out; 1176 } 1177 1178 /* The hypervisor and well-known contexts do not have socket 1179 * endpoints. 1180 */ 1181 if (!transport->stream_allow(remote_addr->svm_cid, 1182 remote_addr->svm_port)) { 1183 err = -ENETUNREACH; 1184 goto out; 1185 } 1186 1187 /* Set the remote address that we are connecting to. */ 1188 memcpy(&vsk->remote_addr, remote_addr, 1189 sizeof(vsk->remote_addr)); 1190 1191 err = vsock_auto_bind(vsk); 1192 if (err) 1193 goto out; 1194 1195 sk->sk_state = TCP_SYN_SENT; 1196 1197 err = transport->connect(vsk); 1198 if (err < 0) 1199 goto out; 1200 1201 /* Mark sock as connecting and set the error code to in 1202 * progress in case this is a non-blocking connect. 1203 */ 1204 sock->state = SS_CONNECTING; 1205 err = -EINPROGRESS; 1206 } 1207 1208 /* The receive path will handle all communication until we are able to 1209 * enter the connected state. Here we wait for the connection to be 1210 * completed or a notification of an error. 1211 */ 1212 timeout = vsk->connect_timeout; 1213 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1214 1215 while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { 1216 if (flags & O_NONBLOCK) { 1217 /* If we're not going to block, we schedule a timeout 1218 * function to generate a timeout on the connection 1219 * attempt, in case the peer doesn't respond in a 1220 * timely manner. We hold on to the socket until the 1221 * timeout fires. 1222 */ 1223 sock_hold(sk); 1224 INIT_DELAYED_WORK(&vsk->dwork, 1225 vsock_connect_timeout); 1226 schedule_delayed_work(&vsk->dwork, timeout); 1227 1228 /* Skip ahead to preserve error code set above. */ 1229 goto out_wait; 1230 } 1231 1232 release_sock(sk); 1233 timeout = schedule_timeout(timeout); 1234 lock_sock(sk); 1235 1236 if (signal_pending(current)) { 1237 err = sock_intr_errno(timeout); 1238 sk->sk_state = TCP_CLOSE; 1239 sock->state = SS_UNCONNECTED; 1240 vsock_transport_cancel_pkt(vsk); 1241 goto out_wait; 1242 } else if (timeout == 0) { 1243 err = -ETIMEDOUT; 1244 sk->sk_state = TCP_CLOSE; 1245 sock->state = SS_UNCONNECTED; 1246 vsock_transport_cancel_pkt(vsk); 1247 goto out_wait; 1248 } 1249 1250 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1251 } 1252 1253 if (sk->sk_err) { 1254 err = -sk->sk_err; 1255 sk->sk_state = TCP_CLOSE; 1256 sock->state = SS_UNCONNECTED; 1257 } else { 1258 err = 0; 1259 } 1260 1261 out_wait: 1262 finish_wait(sk_sleep(sk), &wait); 1263 out: 1264 release_sock(sk); 1265 return err; 1266 } 1267 1268 static int vsock_accept(struct socket *sock, struct socket *newsock, int flags, 1269 bool kern) 1270 { 1271 struct sock *listener; 1272 int err; 1273 struct sock *connected; 1274 struct vsock_sock *vconnected; 1275 long timeout; 1276 DEFINE_WAIT(wait); 1277 1278 err = 0; 1279 listener = sock->sk; 1280 1281 lock_sock(listener); 1282 1283 if (sock->type != SOCK_STREAM) { 1284 err = -EOPNOTSUPP; 1285 goto out; 1286 } 1287 1288 if (listener->sk_state != TCP_LISTEN) { 1289 err = -EINVAL; 1290 goto out; 1291 } 1292 1293 /* Wait for children sockets to appear; these are the new sockets 1294 * created upon connection establishment. 1295 */ 1296 timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); 1297 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1298 1299 while ((connected = vsock_dequeue_accept(listener)) == NULL && 1300 listener->sk_err == 0) { 1301 release_sock(listener); 1302 timeout = schedule_timeout(timeout); 1303 finish_wait(sk_sleep(listener), &wait); 1304 lock_sock(listener); 1305 1306 if (signal_pending(current)) { 1307 err = sock_intr_errno(timeout); 1308 goto out; 1309 } else if (timeout == 0) { 1310 err = -EAGAIN; 1311 goto out; 1312 } 1313 1314 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1315 } 1316 finish_wait(sk_sleep(listener), &wait); 1317 1318 if (listener->sk_err) 1319 err = -listener->sk_err; 1320 1321 if (connected) { 1322 listener->sk_ack_backlog--; 1323 1324 lock_sock_nested(connected, SINGLE_DEPTH_NESTING); 1325 vconnected = vsock_sk(connected); 1326 1327 /* If the listener socket has received an error, then we should 1328 * reject this socket and return. Note that we simply mark the 1329 * socket rejected, drop our reference, and let the cleanup 1330 * function handle the cleanup; the fact that we found it in 1331 * the listener's accept queue guarantees that the cleanup 1332 * function hasn't run yet. 1333 */ 1334 if (err) { 1335 vconnected->rejected = true; 1336 } else { 1337 newsock->state = SS_CONNECTED; 1338 sock_graft(connected, newsock); 1339 } 1340 1341 release_sock(connected); 1342 sock_put(connected); 1343 } 1344 1345 out: 1346 release_sock(listener); 1347 return err; 1348 } 1349 1350 static int vsock_listen(struct socket *sock, int backlog) 1351 { 1352 int err; 1353 struct sock *sk; 1354 struct vsock_sock *vsk; 1355 1356 sk = sock->sk; 1357 1358 lock_sock(sk); 1359 1360 if (sock->type != SOCK_STREAM) { 1361 err = -EOPNOTSUPP; 1362 goto out; 1363 } 1364 1365 if (sock->state != SS_UNCONNECTED) { 1366 err = -EINVAL; 1367 goto out; 1368 } 1369 1370 vsk = vsock_sk(sk); 1371 1372 if (!vsock_addr_bound(&vsk->local_addr)) { 1373 err = -EINVAL; 1374 goto out; 1375 } 1376 1377 sk->sk_max_ack_backlog = backlog; 1378 sk->sk_state = TCP_LISTEN; 1379 1380 err = 0; 1381 1382 out: 1383 release_sock(sk); 1384 return err; 1385 } 1386 1387 static int vsock_stream_setsockopt(struct socket *sock, 1388 int level, 1389 int optname, 1390 char __user *optval, 1391 unsigned int optlen) 1392 { 1393 int err; 1394 struct sock *sk; 1395 struct vsock_sock *vsk; 1396 u64 val; 1397 1398 if (level != AF_VSOCK) 1399 return -ENOPROTOOPT; 1400 1401 #define COPY_IN(_v) \ 1402 do { \ 1403 if (optlen < sizeof(_v)) { \ 1404 err = -EINVAL; \ 1405 goto exit; \ 1406 } \ 1407 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \ 1408 err = -EFAULT; \ 1409 goto exit; \ 1410 } \ 1411 } while (0) 1412 1413 err = 0; 1414 sk = sock->sk; 1415 vsk = vsock_sk(sk); 1416 1417 lock_sock(sk); 1418 1419 switch (optname) { 1420 case SO_VM_SOCKETS_BUFFER_SIZE: 1421 COPY_IN(val); 1422 transport->set_buffer_size(vsk, val); 1423 break; 1424 1425 case SO_VM_SOCKETS_BUFFER_MAX_SIZE: 1426 COPY_IN(val); 1427 transport->set_max_buffer_size(vsk, val); 1428 break; 1429 1430 case SO_VM_SOCKETS_BUFFER_MIN_SIZE: 1431 COPY_IN(val); 1432 transport->set_min_buffer_size(vsk, val); 1433 break; 1434 1435 case SO_VM_SOCKETS_CONNECT_TIMEOUT: { 1436 struct timeval tv; 1437 COPY_IN(tv); 1438 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && 1439 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { 1440 vsk->connect_timeout = tv.tv_sec * HZ + 1441 DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ)); 1442 if (vsk->connect_timeout == 0) 1443 vsk->connect_timeout = 1444 VSOCK_DEFAULT_CONNECT_TIMEOUT; 1445 1446 } else { 1447 err = -ERANGE; 1448 } 1449 break; 1450 } 1451 1452 default: 1453 err = -ENOPROTOOPT; 1454 break; 1455 } 1456 1457 #undef COPY_IN 1458 1459 exit: 1460 release_sock(sk); 1461 return err; 1462 } 1463 1464 static int vsock_stream_getsockopt(struct socket *sock, 1465 int level, int optname, 1466 char __user *optval, 1467 int __user *optlen) 1468 { 1469 int err; 1470 int len; 1471 struct sock *sk; 1472 struct vsock_sock *vsk; 1473 u64 val; 1474 1475 if (level != AF_VSOCK) 1476 return -ENOPROTOOPT; 1477 1478 err = get_user(len, optlen); 1479 if (err != 0) 1480 return err; 1481 1482 #define COPY_OUT(_v) \ 1483 do { \ 1484 if (len < sizeof(_v)) \ 1485 return -EINVAL; \ 1486 \ 1487 len = sizeof(_v); \ 1488 if (copy_to_user(optval, &_v, len) != 0) \ 1489 return -EFAULT; \ 1490 \ 1491 } while (0) 1492 1493 err = 0; 1494 sk = sock->sk; 1495 vsk = vsock_sk(sk); 1496 1497 switch (optname) { 1498 case SO_VM_SOCKETS_BUFFER_SIZE: 1499 val = transport->get_buffer_size(vsk); 1500 COPY_OUT(val); 1501 break; 1502 1503 case SO_VM_SOCKETS_BUFFER_MAX_SIZE: 1504 val = transport->get_max_buffer_size(vsk); 1505 COPY_OUT(val); 1506 break; 1507 1508 case SO_VM_SOCKETS_BUFFER_MIN_SIZE: 1509 val = transport->get_min_buffer_size(vsk); 1510 COPY_OUT(val); 1511 break; 1512 1513 case SO_VM_SOCKETS_CONNECT_TIMEOUT: { 1514 struct timeval tv; 1515 tv.tv_sec = vsk->connect_timeout / HZ; 1516 tv.tv_usec = 1517 (vsk->connect_timeout - 1518 tv.tv_sec * HZ) * (1000000 / HZ); 1519 COPY_OUT(tv); 1520 break; 1521 } 1522 default: 1523 return -ENOPROTOOPT; 1524 } 1525 1526 err = put_user(len, optlen); 1527 if (err != 0) 1528 return -EFAULT; 1529 1530 #undef COPY_OUT 1531 1532 return 0; 1533 } 1534 1535 static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, 1536 size_t len) 1537 { 1538 struct sock *sk; 1539 struct vsock_sock *vsk; 1540 ssize_t total_written; 1541 long timeout; 1542 int err; 1543 struct vsock_transport_send_notify_data send_data; 1544 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1545 1546 sk = sock->sk; 1547 vsk = vsock_sk(sk); 1548 total_written = 0; 1549 err = 0; 1550 1551 if (msg->msg_flags & MSG_OOB) 1552 return -EOPNOTSUPP; 1553 1554 lock_sock(sk); 1555 1556 /* Callers should not provide a destination with stream sockets. */ 1557 if (msg->msg_namelen) { 1558 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; 1559 goto out; 1560 } 1561 1562 /* Send data only if both sides are not shutdown in the direction. */ 1563 if (sk->sk_shutdown & SEND_SHUTDOWN || 1564 vsk->peer_shutdown & RCV_SHUTDOWN) { 1565 err = -EPIPE; 1566 goto out; 1567 } 1568 1569 if (sk->sk_state != TCP_ESTABLISHED || 1570 !vsock_addr_bound(&vsk->local_addr)) { 1571 err = -ENOTCONN; 1572 goto out; 1573 } 1574 1575 if (!vsock_addr_bound(&vsk->remote_addr)) { 1576 err = -EDESTADDRREQ; 1577 goto out; 1578 } 1579 1580 /* Wait for room in the produce queue to enqueue our user's data. */ 1581 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1582 1583 err = transport->notify_send_init(vsk, &send_data); 1584 if (err < 0) 1585 goto out; 1586 1587 while (total_written < len) { 1588 ssize_t written; 1589 1590 add_wait_queue(sk_sleep(sk), &wait); 1591 while (vsock_stream_has_space(vsk) == 0 && 1592 sk->sk_err == 0 && 1593 !(sk->sk_shutdown & SEND_SHUTDOWN) && 1594 !(vsk->peer_shutdown & RCV_SHUTDOWN)) { 1595 1596 /* Don't wait for non-blocking sockets. */ 1597 if (timeout == 0) { 1598 err = -EAGAIN; 1599 remove_wait_queue(sk_sleep(sk), &wait); 1600 goto out_err; 1601 } 1602 1603 err = transport->notify_send_pre_block(vsk, &send_data); 1604 if (err < 0) { 1605 remove_wait_queue(sk_sleep(sk), &wait); 1606 goto out_err; 1607 } 1608 1609 release_sock(sk); 1610 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); 1611 lock_sock(sk); 1612 if (signal_pending(current)) { 1613 err = sock_intr_errno(timeout); 1614 remove_wait_queue(sk_sleep(sk), &wait); 1615 goto out_err; 1616 } else if (timeout == 0) { 1617 err = -EAGAIN; 1618 remove_wait_queue(sk_sleep(sk), &wait); 1619 goto out_err; 1620 } 1621 } 1622 remove_wait_queue(sk_sleep(sk), &wait); 1623 1624 /* These checks occur both as part of and after the loop 1625 * conditional since we need to check before and after 1626 * sleeping. 1627 */ 1628 if (sk->sk_err) { 1629 err = -sk->sk_err; 1630 goto out_err; 1631 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || 1632 (vsk->peer_shutdown & RCV_SHUTDOWN)) { 1633 err = -EPIPE; 1634 goto out_err; 1635 } 1636 1637 err = transport->notify_send_pre_enqueue(vsk, &send_data); 1638 if (err < 0) 1639 goto out_err; 1640 1641 /* Note that enqueue will only write as many bytes as are free 1642 * in the produce queue, so we don't need to ensure len is 1643 * smaller than the queue size. It is the caller's 1644 * responsibility to check how many bytes we were able to send. 1645 */ 1646 1647 written = transport->stream_enqueue( 1648 vsk, msg, 1649 len - total_written); 1650 if (written < 0) { 1651 err = -ENOMEM; 1652 goto out_err; 1653 } 1654 1655 total_written += written; 1656 1657 err = transport->notify_send_post_enqueue( 1658 vsk, written, &send_data); 1659 if (err < 0) 1660 goto out_err; 1661 1662 } 1663 1664 out_err: 1665 if (total_written > 0) 1666 err = total_written; 1667 out: 1668 release_sock(sk); 1669 return err; 1670 } 1671 1672 1673 static int 1674 vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 1675 int flags) 1676 { 1677 struct sock *sk; 1678 struct vsock_sock *vsk; 1679 int err; 1680 size_t target; 1681 ssize_t copied; 1682 long timeout; 1683 struct vsock_transport_recv_notify_data recv_data; 1684 1685 DEFINE_WAIT(wait); 1686 1687 sk = sock->sk; 1688 vsk = vsock_sk(sk); 1689 err = 0; 1690 1691 lock_sock(sk); 1692 1693 if (sk->sk_state != TCP_ESTABLISHED) { 1694 /* Recvmsg is supposed to return 0 if a peer performs an 1695 * orderly shutdown. Differentiate between that case and when a 1696 * peer has not connected or a local shutdown occured with the 1697 * SOCK_DONE flag. 1698 */ 1699 if (sock_flag(sk, SOCK_DONE)) 1700 err = 0; 1701 else 1702 err = -ENOTCONN; 1703 1704 goto out; 1705 } 1706 1707 if (flags & MSG_OOB) { 1708 err = -EOPNOTSUPP; 1709 goto out; 1710 } 1711 1712 /* We don't check peer_shutdown flag here since peer may actually shut 1713 * down, but there can be data in the queue that a local socket can 1714 * receive. 1715 */ 1716 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1717 err = 0; 1718 goto out; 1719 } 1720 1721 /* It is valid on Linux to pass in a zero-length receive buffer. This 1722 * is not an error. We may as well bail out now. 1723 */ 1724 if (!len) { 1725 err = 0; 1726 goto out; 1727 } 1728 1729 /* We must not copy less than target bytes into the user's buffer 1730 * before returning successfully, so we wait for the consume queue to 1731 * have that much data to consume before dequeueing. Note that this 1732 * makes it impossible to handle cases where target is greater than the 1733 * queue size. 1734 */ 1735 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1736 if (target >= transport->stream_rcvhiwat(vsk)) { 1737 err = -ENOMEM; 1738 goto out; 1739 } 1740 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1741 copied = 0; 1742 1743 err = transport->notify_recv_init(vsk, target, &recv_data); 1744 if (err < 0) 1745 goto out; 1746 1747 1748 while (1) { 1749 s64 ready; 1750 1751 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1752 ready = vsock_stream_has_data(vsk); 1753 1754 if (ready == 0) { 1755 if (sk->sk_err != 0 || 1756 (sk->sk_shutdown & RCV_SHUTDOWN) || 1757 (vsk->peer_shutdown & SEND_SHUTDOWN)) { 1758 finish_wait(sk_sleep(sk), &wait); 1759 break; 1760 } 1761 /* Don't wait for non-blocking sockets. */ 1762 if (timeout == 0) { 1763 err = -EAGAIN; 1764 finish_wait(sk_sleep(sk), &wait); 1765 break; 1766 } 1767 1768 err = transport->notify_recv_pre_block( 1769 vsk, target, &recv_data); 1770 if (err < 0) { 1771 finish_wait(sk_sleep(sk), &wait); 1772 break; 1773 } 1774 release_sock(sk); 1775 timeout = schedule_timeout(timeout); 1776 lock_sock(sk); 1777 1778 if (signal_pending(current)) { 1779 err = sock_intr_errno(timeout); 1780 finish_wait(sk_sleep(sk), &wait); 1781 break; 1782 } else if (timeout == 0) { 1783 err = -EAGAIN; 1784 finish_wait(sk_sleep(sk), &wait); 1785 break; 1786 } 1787 } else { 1788 ssize_t read; 1789 1790 finish_wait(sk_sleep(sk), &wait); 1791 1792 if (ready < 0) { 1793 /* Invalid queue pair content. XXX This should 1794 * be changed to a connection reset in a later 1795 * change. 1796 */ 1797 1798 err = -ENOMEM; 1799 goto out; 1800 } 1801 1802 err = transport->notify_recv_pre_dequeue( 1803 vsk, target, &recv_data); 1804 if (err < 0) 1805 break; 1806 1807 read = transport->stream_dequeue( 1808 vsk, msg, 1809 len - copied, flags); 1810 if (read < 0) { 1811 err = -ENOMEM; 1812 break; 1813 } 1814 1815 copied += read; 1816 1817 err = transport->notify_recv_post_dequeue( 1818 vsk, target, read, 1819 !(flags & MSG_PEEK), &recv_data); 1820 if (err < 0) 1821 goto out; 1822 1823 if (read >= target || flags & MSG_PEEK) 1824 break; 1825 1826 target -= read; 1827 } 1828 } 1829 1830 if (sk->sk_err) 1831 err = -sk->sk_err; 1832 else if (sk->sk_shutdown & RCV_SHUTDOWN) 1833 err = 0; 1834 1835 if (copied > 0) 1836 err = copied; 1837 1838 out: 1839 release_sock(sk); 1840 return err; 1841 } 1842 1843 static const struct proto_ops vsock_stream_ops = { 1844 .family = PF_VSOCK, 1845 .owner = THIS_MODULE, 1846 .release = vsock_release, 1847 .bind = vsock_bind, 1848 .connect = vsock_stream_connect, 1849 .socketpair = sock_no_socketpair, 1850 .accept = vsock_accept, 1851 .getname = vsock_getname, 1852 .poll = vsock_poll, 1853 .ioctl = sock_no_ioctl, 1854 .listen = vsock_listen, 1855 .shutdown = vsock_shutdown, 1856 .setsockopt = vsock_stream_setsockopt, 1857 .getsockopt = vsock_stream_getsockopt, 1858 .sendmsg = vsock_stream_sendmsg, 1859 .recvmsg = vsock_stream_recvmsg, 1860 .mmap = sock_no_mmap, 1861 .sendpage = sock_no_sendpage, 1862 }; 1863 1864 static int vsock_create(struct net *net, struct socket *sock, 1865 int protocol, int kern) 1866 { 1867 if (!sock) 1868 return -EINVAL; 1869 1870 if (protocol && protocol != PF_VSOCK) 1871 return -EPROTONOSUPPORT; 1872 1873 switch (sock->type) { 1874 case SOCK_DGRAM: 1875 sock->ops = &vsock_dgram_ops; 1876 break; 1877 case SOCK_STREAM: 1878 sock->ops = &vsock_stream_ops; 1879 break; 1880 default: 1881 return -ESOCKTNOSUPPORT; 1882 } 1883 1884 sock->state = SS_UNCONNECTED; 1885 1886 return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM; 1887 } 1888 1889 static const struct net_proto_family vsock_family_ops = { 1890 .family = AF_VSOCK, 1891 .create = vsock_create, 1892 .owner = THIS_MODULE, 1893 }; 1894 1895 static long vsock_dev_do_ioctl(struct file *filp, 1896 unsigned int cmd, void __user *ptr) 1897 { 1898 u32 __user *p = ptr; 1899 int retval = 0; 1900 1901 switch (cmd) { 1902 case IOCTL_VM_SOCKETS_GET_LOCAL_CID: 1903 if (put_user(transport->get_local_cid(), p) != 0) 1904 retval = -EFAULT; 1905 break; 1906 1907 default: 1908 pr_err("Unknown ioctl %d\n", cmd); 1909 retval = -EINVAL; 1910 } 1911 1912 return retval; 1913 } 1914 1915 static long vsock_dev_ioctl(struct file *filp, 1916 unsigned int cmd, unsigned long arg) 1917 { 1918 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg); 1919 } 1920 1921 #ifdef CONFIG_COMPAT 1922 static long vsock_dev_compat_ioctl(struct file *filp, 1923 unsigned int cmd, unsigned long arg) 1924 { 1925 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg)); 1926 } 1927 #endif 1928 1929 static const struct file_operations vsock_device_ops = { 1930 .owner = THIS_MODULE, 1931 .unlocked_ioctl = vsock_dev_ioctl, 1932 #ifdef CONFIG_COMPAT 1933 .compat_ioctl = vsock_dev_compat_ioctl, 1934 #endif 1935 .open = nonseekable_open, 1936 }; 1937 1938 static struct miscdevice vsock_device = { 1939 .name = "vsock", 1940 .fops = &vsock_device_ops, 1941 }; 1942 1943 int __vsock_core_init(const struct vsock_transport *t, struct module *owner) 1944 { 1945 int err = mutex_lock_interruptible(&vsock_register_mutex); 1946 1947 if (err) 1948 return err; 1949 1950 if (transport) { 1951 err = -EBUSY; 1952 goto err_busy; 1953 } 1954 1955 /* Transport must be the owner of the protocol so that it can't 1956 * unload while there are open sockets. 1957 */ 1958 vsock_proto.owner = owner; 1959 transport = t; 1960 1961 vsock_device.minor = MISC_DYNAMIC_MINOR; 1962 err = misc_register(&vsock_device); 1963 if (err) { 1964 pr_err("Failed to register misc device\n"); 1965 goto err_reset_transport; 1966 } 1967 1968 err = proto_register(&vsock_proto, 1); /* we want our slab */ 1969 if (err) { 1970 pr_err("Cannot register vsock protocol\n"); 1971 goto err_deregister_misc; 1972 } 1973 1974 err = sock_register(&vsock_family_ops); 1975 if (err) { 1976 pr_err("could not register af_vsock (%d) address family: %d\n", 1977 AF_VSOCK, err); 1978 goto err_unregister_proto; 1979 } 1980 1981 mutex_unlock(&vsock_register_mutex); 1982 return 0; 1983 1984 err_unregister_proto: 1985 proto_unregister(&vsock_proto); 1986 err_deregister_misc: 1987 misc_deregister(&vsock_device); 1988 err_reset_transport: 1989 transport = NULL; 1990 err_busy: 1991 mutex_unlock(&vsock_register_mutex); 1992 return err; 1993 } 1994 EXPORT_SYMBOL_GPL(__vsock_core_init); 1995 1996 void vsock_core_exit(void) 1997 { 1998 mutex_lock(&vsock_register_mutex); 1999 2000 misc_deregister(&vsock_device); 2001 sock_unregister(AF_VSOCK); 2002 proto_unregister(&vsock_proto); 2003 2004 /* We do not want the assignment below re-ordered. */ 2005 mb(); 2006 transport = NULL; 2007 2008 mutex_unlock(&vsock_register_mutex); 2009 } 2010 EXPORT_SYMBOL_GPL(vsock_core_exit); 2011 2012 const struct vsock_transport *vsock_core_get_transport(void) 2013 { 2014 /* vsock_register_mutex not taken since only the transport uses this 2015 * function and only while registered. 2016 */ 2017 return transport; 2018 } 2019 EXPORT_SYMBOL_GPL(vsock_core_get_transport); 2020 2021 module_init(vsock_init_tables); 2022 2023 MODULE_AUTHOR("VMware, Inc."); 2024 MODULE_DESCRIPTION("VMware Virtual Socket Family"); 2025 MODULE_VERSION("1.0.2.0-k"); 2026 MODULE_LICENSE("GPL v2"); 2027