1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMware vSockets Driver 4 * 5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. 6 */ 7 8 /* Implementation notes: 9 * 10 * - There are two kinds of sockets: those created by user action (such as 11 * calling socket(2)) and those created by incoming connection request packets. 12 * 13 * - There are two "global" tables, one for bound sockets (sockets that have 14 * specified an address that they are responsible for) and one for connected 15 * sockets (sockets that have established a connection with another socket). 16 * These tables are "global" in that all sockets on the system are placed 17 * within them. - Note, though, that the bound table contains an extra entry 18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in 19 * that list. The bound table is used solely for lookup of sockets when packets 20 * are received and that's not necessary for SOCK_DGRAM sockets since we create 21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM 22 * sockets out of the bound hash buckets will reduce the chance of collisions 23 * when looking for SOCK_STREAM sockets and prevents us from having to check the 24 * socket type in the hash table lookups. 25 * 26 * - Sockets created by user action will either be "client" sockets that 27 * initiate a connection or "server" sockets that listen for connections; we do 28 * not support simultaneous connects (two "client" sockets connecting). 29 * 30 * - "Server" sockets are referred to as listener sockets throughout this 31 * implementation because they are in the TCP_LISTEN state. When a 32 * connection request is received (the second kind of socket mentioned above), 33 * we create a new socket and refer to it as a pending socket. These pending 34 * sockets are placed on the pending connection list of the listener socket. 35 * When future packets are received for the address the listener socket is 36 * bound to, we check if the source of the packet is from one that has an 37 * existing pending connection. If it does, we process the packet for the 38 * pending socket. When that socket reaches the connected state, it is removed 39 * from the listener socket's pending list and enqueued in the listener 40 * socket's accept queue. Callers of accept(2) will accept connected sockets 41 * from the listener socket's accept queue. If the socket cannot be accepted 42 * for some reason then it is marked rejected. Once the connection is 43 * accepted, it is owned by the user process and the responsibility for cleanup 44 * falls with that user process. 45 * 46 * - It is possible that these pending sockets will never reach the connected 47 * state; in fact, we may never receive another packet after the connection 48 * request. Because of this, we must schedule a cleanup function to run in the 49 * future, after some amount of time passes where a connection should have been 50 * established. This function ensures that the socket is off all lists so it 51 * cannot be retrieved, then drops all references to the socket so it is cleaned 52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this 53 * function will also cleanup rejected sockets, those that reach the connected 54 * state but leave it before they have been accepted. 55 * 56 * - Lock ordering for pending or accept queue sockets is: 57 * 58 * lock_sock(listener); 59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING); 60 * 61 * Using explicit nested locking keeps lockdep happy since normally only one 62 * lock of a given class may be taken at a time. 63 * 64 * - Sockets created by user action will be cleaned up when the user process 65 * calls close(2), causing our release implementation to be called. Our release 66 * implementation will perform some cleanup then drop the last reference so our 67 * sk_destruct implementation is invoked. Our sk_destruct implementation will 68 * perform additional cleanup that's common for both types of sockets. 69 * 70 * - A socket's reference count is what ensures that the structure won't be 71 * freed. Each entry in a list (such as the "global" bound and connected tables 72 * and the listener socket's pending list and connected queue) ensures a 73 * reference. When we defer work until process context and pass a socket as our 74 * argument, we must ensure the reference count is increased to ensure the 75 * socket isn't freed before the function is run; the deferred function will 76 * then drop the reference. 77 * 78 * - sk->sk_state uses the TCP state constants because they are widely used by 79 * other address families and exposed to userspace tools like ss(8): 80 * 81 * TCP_CLOSE - unconnected 82 * TCP_SYN_SENT - connecting 83 * TCP_ESTABLISHED - connected 84 * TCP_CLOSING - disconnecting 85 * TCP_LISTEN - listening 86 */ 87 88 #include <linux/types.h> 89 #include <linux/bitops.h> 90 #include <linux/cred.h> 91 #include <linux/init.h> 92 #include <linux/io.h> 93 #include <linux/kernel.h> 94 #include <linux/sched/signal.h> 95 #include <linux/kmod.h> 96 #include <linux/list.h> 97 #include <linux/miscdevice.h> 98 #include <linux/module.h> 99 #include <linux/mutex.h> 100 #include <linux/net.h> 101 #include <linux/poll.h> 102 #include <linux/random.h> 103 #include <linux/skbuff.h> 104 #include <linux/smp.h> 105 #include <linux/socket.h> 106 #include <linux/stddef.h> 107 #include <linux/unistd.h> 108 #include <linux/wait.h> 109 #include <linux/workqueue.h> 110 #include <net/sock.h> 111 #include <net/af_vsock.h> 112 113 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); 114 static void vsock_sk_destruct(struct sock *sk); 115 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 116 117 /* Protocol family. */ 118 static struct proto vsock_proto = { 119 .name = "AF_VSOCK", 120 .owner = THIS_MODULE, 121 .obj_size = sizeof(struct vsock_sock), 122 }; 123 124 /* The default peer timeout indicates how long we will wait for a peer response 125 * to a control message. 126 */ 127 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) 128 129 #define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256) 130 #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256) 131 #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128 132 133 /* Transport used for host->guest communication */ 134 static const struct vsock_transport *transport_h2g; 135 /* Transport used for guest->host communication */ 136 static const struct vsock_transport *transport_g2h; 137 /* Transport used for DGRAM communication */ 138 static const struct vsock_transport *transport_dgram; 139 /* Transport used for local communication */ 140 static const struct vsock_transport *transport_local; 141 static DEFINE_MUTEX(vsock_register_mutex); 142 143 /**** UTILS ****/ 144 145 /* Each bound VSocket is stored in the bind hash table and each connected 146 * VSocket is stored in the connected hash table. 147 * 148 * Unbound sockets are all put on the same list attached to the end of the hash 149 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in 150 * the bucket that their local address hashes to (vsock_bound_sockets(addr) 151 * represents the list that addr hashes to). 152 * 153 * Specifically, we initialize the vsock_bind_table array to a size of 154 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through 155 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and 156 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function 157 * mods with VSOCK_HASH_SIZE to ensure this. 158 */ 159 #define MAX_PORT_RETRIES 24 160 161 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE) 162 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)]) 163 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE]) 164 165 /* XXX This can probably be implemented in a better way. */ 166 #define VSOCK_CONN_HASH(src, dst) \ 167 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE) 168 #define vsock_connected_sockets(src, dst) \ 169 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)]) 170 #define vsock_connected_sockets_vsk(vsk) \ 171 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr) 172 173 struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1]; 174 EXPORT_SYMBOL_GPL(vsock_bind_table); 175 struct list_head vsock_connected_table[VSOCK_HASH_SIZE]; 176 EXPORT_SYMBOL_GPL(vsock_connected_table); 177 DEFINE_SPINLOCK(vsock_table_lock); 178 EXPORT_SYMBOL_GPL(vsock_table_lock); 179 180 /* Autobind this socket to the local address if necessary. */ 181 static int vsock_auto_bind(struct vsock_sock *vsk) 182 { 183 struct sock *sk = sk_vsock(vsk); 184 struct sockaddr_vm local_addr; 185 186 if (vsock_addr_bound(&vsk->local_addr)) 187 return 0; 188 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 189 return __vsock_bind(sk, &local_addr); 190 } 191 192 static void vsock_init_tables(void) 193 { 194 int i; 195 196 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++) 197 INIT_LIST_HEAD(&vsock_bind_table[i]); 198 199 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) 200 INIT_LIST_HEAD(&vsock_connected_table[i]); 201 } 202 203 static void __vsock_insert_bound(struct list_head *list, 204 struct vsock_sock *vsk) 205 { 206 sock_hold(&vsk->sk); 207 list_add(&vsk->bound_table, list); 208 } 209 210 static void __vsock_insert_connected(struct list_head *list, 211 struct vsock_sock *vsk) 212 { 213 sock_hold(&vsk->sk); 214 list_add(&vsk->connected_table, list); 215 } 216 217 static void __vsock_remove_bound(struct vsock_sock *vsk) 218 { 219 list_del_init(&vsk->bound_table); 220 sock_put(&vsk->sk); 221 } 222 223 static void __vsock_remove_connected(struct vsock_sock *vsk) 224 { 225 list_del_init(&vsk->connected_table); 226 sock_put(&vsk->sk); 227 } 228 229 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) 230 { 231 struct vsock_sock *vsk; 232 233 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) { 234 if (vsock_addr_equals_addr(addr, &vsk->local_addr)) 235 return sk_vsock(vsk); 236 237 if (addr->svm_port == vsk->local_addr.svm_port && 238 (vsk->local_addr.svm_cid == VMADDR_CID_ANY || 239 addr->svm_cid == VMADDR_CID_ANY)) 240 return sk_vsock(vsk); 241 } 242 243 return NULL; 244 } 245 246 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, 247 struct sockaddr_vm *dst) 248 { 249 struct vsock_sock *vsk; 250 251 list_for_each_entry(vsk, vsock_connected_sockets(src, dst), 252 connected_table) { 253 if (vsock_addr_equals_addr(src, &vsk->remote_addr) && 254 dst->svm_port == vsk->local_addr.svm_port) { 255 return sk_vsock(vsk); 256 } 257 } 258 259 return NULL; 260 } 261 262 static void vsock_insert_unbound(struct vsock_sock *vsk) 263 { 264 spin_lock_bh(&vsock_table_lock); 265 __vsock_insert_bound(vsock_unbound_sockets, vsk); 266 spin_unlock_bh(&vsock_table_lock); 267 } 268 269 void vsock_insert_connected(struct vsock_sock *vsk) 270 { 271 struct list_head *list = vsock_connected_sockets( 272 &vsk->remote_addr, &vsk->local_addr); 273 274 spin_lock_bh(&vsock_table_lock); 275 __vsock_insert_connected(list, vsk); 276 spin_unlock_bh(&vsock_table_lock); 277 } 278 EXPORT_SYMBOL_GPL(vsock_insert_connected); 279 280 void vsock_remove_bound(struct vsock_sock *vsk) 281 { 282 spin_lock_bh(&vsock_table_lock); 283 if (__vsock_in_bound_table(vsk)) 284 __vsock_remove_bound(vsk); 285 spin_unlock_bh(&vsock_table_lock); 286 } 287 EXPORT_SYMBOL_GPL(vsock_remove_bound); 288 289 void vsock_remove_connected(struct vsock_sock *vsk) 290 { 291 spin_lock_bh(&vsock_table_lock); 292 if (__vsock_in_connected_table(vsk)) 293 __vsock_remove_connected(vsk); 294 spin_unlock_bh(&vsock_table_lock); 295 } 296 EXPORT_SYMBOL_GPL(vsock_remove_connected); 297 298 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr) 299 { 300 struct sock *sk; 301 302 spin_lock_bh(&vsock_table_lock); 303 sk = __vsock_find_bound_socket(addr); 304 if (sk) 305 sock_hold(sk); 306 307 spin_unlock_bh(&vsock_table_lock); 308 309 return sk; 310 } 311 EXPORT_SYMBOL_GPL(vsock_find_bound_socket); 312 313 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, 314 struct sockaddr_vm *dst) 315 { 316 struct sock *sk; 317 318 spin_lock_bh(&vsock_table_lock); 319 sk = __vsock_find_connected_socket(src, dst); 320 if (sk) 321 sock_hold(sk); 322 323 spin_unlock_bh(&vsock_table_lock); 324 325 return sk; 326 } 327 EXPORT_SYMBOL_GPL(vsock_find_connected_socket); 328 329 void vsock_remove_sock(struct vsock_sock *vsk) 330 { 331 vsock_remove_bound(vsk); 332 vsock_remove_connected(vsk); 333 } 334 EXPORT_SYMBOL_GPL(vsock_remove_sock); 335 336 void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) 337 { 338 int i; 339 340 spin_lock_bh(&vsock_table_lock); 341 342 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 343 struct vsock_sock *vsk; 344 list_for_each_entry(vsk, &vsock_connected_table[i], 345 connected_table) 346 fn(sk_vsock(vsk)); 347 } 348 349 spin_unlock_bh(&vsock_table_lock); 350 } 351 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket); 352 353 void vsock_add_pending(struct sock *listener, struct sock *pending) 354 { 355 struct vsock_sock *vlistener; 356 struct vsock_sock *vpending; 357 358 vlistener = vsock_sk(listener); 359 vpending = vsock_sk(pending); 360 361 sock_hold(pending); 362 sock_hold(listener); 363 list_add_tail(&vpending->pending_links, &vlistener->pending_links); 364 } 365 EXPORT_SYMBOL_GPL(vsock_add_pending); 366 367 void vsock_remove_pending(struct sock *listener, struct sock *pending) 368 { 369 struct vsock_sock *vpending = vsock_sk(pending); 370 371 list_del_init(&vpending->pending_links); 372 sock_put(listener); 373 sock_put(pending); 374 } 375 EXPORT_SYMBOL_GPL(vsock_remove_pending); 376 377 void vsock_enqueue_accept(struct sock *listener, struct sock *connected) 378 { 379 struct vsock_sock *vlistener; 380 struct vsock_sock *vconnected; 381 382 vlistener = vsock_sk(listener); 383 vconnected = vsock_sk(connected); 384 385 sock_hold(connected); 386 sock_hold(listener); 387 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue); 388 } 389 EXPORT_SYMBOL_GPL(vsock_enqueue_accept); 390 391 static bool vsock_use_local_transport(unsigned int remote_cid) 392 { 393 if (!transport_local) 394 return false; 395 396 if (remote_cid == VMADDR_CID_LOCAL) 397 return true; 398 399 if (transport_g2h) { 400 return remote_cid == transport_g2h->get_local_cid(); 401 } else { 402 return remote_cid == VMADDR_CID_HOST; 403 } 404 } 405 406 static void vsock_deassign_transport(struct vsock_sock *vsk) 407 { 408 if (!vsk->transport) 409 return; 410 411 vsk->transport->destruct(vsk); 412 module_put(vsk->transport->module); 413 vsk->transport = NULL; 414 } 415 416 /* Assign a transport to a socket and call the .init transport callback. 417 * 418 * Note: for stream socket this must be called when vsk->remote_addr is set 419 * (e.g. during the connect() or when a connection request on a listener 420 * socket is received). 421 * The vsk->remote_addr is used to decide which transport to use: 422 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if 423 * g2h is not loaded, will use local transport; 424 * - remote CID <= VMADDR_CID_HOST will use guest->host transport; 425 * - remote CID > VMADDR_CID_HOST will use host->guest transport; 426 */ 427 int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) 428 { 429 const struct vsock_transport *new_transport; 430 struct sock *sk = sk_vsock(vsk); 431 unsigned int remote_cid = vsk->remote_addr.svm_cid; 432 int ret; 433 434 switch (sk->sk_type) { 435 case SOCK_DGRAM: 436 new_transport = transport_dgram; 437 break; 438 case SOCK_STREAM: 439 if (vsock_use_local_transport(remote_cid)) 440 new_transport = transport_local; 441 else if (remote_cid <= VMADDR_CID_HOST) 442 new_transport = transport_g2h; 443 else 444 new_transport = transport_h2g; 445 break; 446 default: 447 return -ESOCKTNOSUPPORT; 448 } 449 450 if (vsk->transport) { 451 if (vsk->transport == new_transport) 452 return 0; 453 454 vsk->transport->release(vsk); 455 vsock_deassign_transport(vsk); 456 } 457 458 /* We increase the module refcnt to prevent the transport unloading 459 * while there are open sockets assigned to it. 460 */ 461 if (!new_transport || !try_module_get(new_transport->module)) 462 return -ENODEV; 463 464 ret = new_transport->init(vsk, psk); 465 if (ret) { 466 module_put(new_transport->module); 467 return ret; 468 } 469 470 vsk->transport = new_transport; 471 472 return 0; 473 } 474 EXPORT_SYMBOL_GPL(vsock_assign_transport); 475 476 bool vsock_find_cid(unsigned int cid) 477 { 478 if (transport_g2h && cid == transport_g2h->get_local_cid()) 479 return true; 480 481 if (transport_h2g && cid == VMADDR_CID_HOST) 482 return true; 483 484 if (transport_local && cid == VMADDR_CID_LOCAL) 485 return true; 486 487 return false; 488 } 489 EXPORT_SYMBOL_GPL(vsock_find_cid); 490 491 static struct sock *vsock_dequeue_accept(struct sock *listener) 492 { 493 struct vsock_sock *vlistener; 494 struct vsock_sock *vconnected; 495 496 vlistener = vsock_sk(listener); 497 498 if (list_empty(&vlistener->accept_queue)) 499 return NULL; 500 501 vconnected = list_entry(vlistener->accept_queue.next, 502 struct vsock_sock, accept_queue); 503 504 list_del_init(&vconnected->accept_queue); 505 sock_put(listener); 506 /* The caller will need a reference on the connected socket so we let 507 * it call sock_put(). 508 */ 509 510 return sk_vsock(vconnected); 511 } 512 513 static bool vsock_is_accept_queue_empty(struct sock *sk) 514 { 515 struct vsock_sock *vsk = vsock_sk(sk); 516 return list_empty(&vsk->accept_queue); 517 } 518 519 static bool vsock_is_pending(struct sock *sk) 520 { 521 struct vsock_sock *vsk = vsock_sk(sk); 522 return !list_empty(&vsk->pending_links); 523 } 524 525 static int vsock_send_shutdown(struct sock *sk, int mode) 526 { 527 struct vsock_sock *vsk = vsock_sk(sk); 528 529 if (!vsk->transport) 530 return -ENODEV; 531 532 return vsk->transport->shutdown(vsk, mode); 533 } 534 535 static void vsock_pending_work(struct work_struct *work) 536 { 537 struct sock *sk; 538 struct sock *listener; 539 struct vsock_sock *vsk; 540 bool cleanup; 541 542 vsk = container_of(work, struct vsock_sock, pending_work.work); 543 sk = sk_vsock(vsk); 544 listener = vsk->listener; 545 cleanup = true; 546 547 lock_sock(listener); 548 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 549 550 if (vsock_is_pending(sk)) { 551 vsock_remove_pending(listener, sk); 552 553 sk_acceptq_removed(listener); 554 } else if (!vsk->rejected) { 555 /* We are not on the pending list and accept() did not reject 556 * us, so we must have been accepted by our user process. We 557 * just need to drop our references to the sockets and be on 558 * our way. 559 */ 560 cleanup = false; 561 goto out; 562 } 563 564 /* We need to remove ourself from the global connected sockets list so 565 * incoming packets can't find this socket, and to reduce the reference 566 * count. 567 */ 568 vsock_remove_connected(vsk); 569 570 sk->sk_state = TCP_CLOSE; 571 572 out: 573 release_sock(sk); 574 release_sock(listener); 575 if (cleanup) 576 sock_put(sk); 577 578 sock_put(sk); 579 sock_put(listener); 580 } 581 582 /**** SOCKET OPERATIONS ****/ 583 584 static int __vsock_bind_stream(struct vsock_sock *vsk, 585 struct sockaddr_vm *addr) 586 { 587 static u32 port; 588 struct sockaddr_vm new_addr; 589 590 if (!port) 591 port = LAST_RESERVED_PORT + 1 + 592 prandom_u32_max(U32_MAX - LAST_RESERVED_PORT); 593 594 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); 595 596 if (addr->svm_port == VMADDR_PORT_ANY) { 597 bool found = false; 598 unsigned int i; 599 600 for (i = 0; i < MAX_PORT_RETRIES; i++) { 601 if (port <= LAST_RESERVED_PORT) 602 port = LAST_RESERVED_PORT + 1; 603 604 new_addr.svm_port = port++; 605 606 if (!__vsock_find_bound_socket(&new_addr)) { 607 found = true; 608 break; 609 } 610 } 611 612 if (!found) 613 return -EADDRNOTAVAIL; 614 } else { 615 /* If port is in reserved range, ensure caller 616 * has necessary privileges. 617 */ 618 if (addr->svm_port <= LAST_RESERVED_PORT && 619 !capable(CAP_NET_BIND_SERVICE)) { 620 return -EACCES; 621 } 622 623 if (__vsock_find_bound_socket(&new_addr)) 624 return -EADDRINUSE; 625 } 626 627 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port); 628 629 /* Remove stream sockets from the unbound list and add them to the hash 630 * table for easy lookup by its address. The unbound list is simply an 631 * extra entry at the end of the hash table, a trick used by AF_UNIX. 632 */ 633 __vsock_remove_bound(vsk); 634 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk); 635 636 return 0; 637 } 638 639 static int __vsock_bind_dgram(struct vsock_sock *vsk, 640 struct sockaddr_vm *addr) 641 { 642 return vsk->transport->dgram_bind(vsk, addr); 643 } 644 645 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) 646 { 647 struct vsock_sock *vsk = vsock_sk(sk); 648 int retval; 649 650 /* First ensure this socket isn't already bound. */ 651 if (vsock_addr_bound(&vsk->local_addr)) 652 return -EINVAL; 653 654 /* Now bind to the provided address or select appropriate values if 655 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that 656 * like AF_INET prevents binding to a non-local IP address (in most 657 * cases), we only allow binding to a local CID. 658 */ 659 if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid)) 660 return -EADDRNOTAVAIL; 661 662 switch (sk->sk_socket->type) { 663 case SOCK_STREAM: 664 spin_lock_bh(&vsock_table_lock); 665 retval = __vsock_bind_stream(vsk, addr); 666 spin_unlock_bh(&vsock_table_lock); 667 break; 668 669 case SOCK_DGRAM: 670 retval = __vsock_bind_dgram(vsk, addr); 671 break; 672 673 default: 674 retval = -EINVAL; 675 break; 676 } 677 678 return retval; 679 } 680 681 static void vsock_connect_timeout(struct work_struct *work); 682 683 static struct sock *__vsock_create(struct net *net, 684 struct socket *sock, 685 struct sock *parent, 686 gfp_t priority, 687 unsigned short type, 688 int kern) 689 { 690 struct sock *sk; 691 struct vsock_sock *psk; 692 struct vsock_sock *vsk; 693 694 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern); 695 if (!sk) 696 return NULL; 697 698 sock_init_data(sock, sk); 699 700 /* sk->sk_type is normally set in sock_init_data, but only if sock is 701 * non-NULL. We make sure that our sockets always have a type by 702 * setting it here if needed. 703 */ 704 if (!sock) 705 sk->sk_type = type; 706 707 vsk = vsock_sk(sk); 708 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 709 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 710 711 sk->sk_destruct = vsock_sk_destruct; 712 sk->sk_backlog_rcv = vsock_queue_rcv_skb; 713 sock_reset_flag(sk, SOCK_DONE); 714 715 INIT_LIST_HEAD(&vsk->bound_table); 716 INIT_LIST_HEAD(&vsk->connected_table); 717 vsk->listener = NULL; 718 INIT_LIST_HEAD(&vsk->pending_links); 719 INIT_LIST_HEAD(&vsk->accept_queue); 720 vsk->rejected = false; 721 vsk->sent_request = false; 722 vsk->ignore_connecting_rst = false; 723 vsk->peer_shutdown = 0; 724 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout); 725 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work); 726 727 psk = parent ? vsock_sk(parent) : NULL; 728 if (parent) { 729 vsk->trusted = psk->trusted; 730 vsk->owner = get_cred(psk->owner); 731 vsk->connect_timeout = psk->connect_timeout; 732 vsk->buffer_size = psk->buffer_size; 733 vsk->buffer_min_size = psk->buffer_min_size; 734 vsk->buffer_max_size = psk->buffer_max_size; 735 } else { 736 vsk->trusted = capable(CAP_NET_ADMIN); 737 vsk->owner = get_current_cred(); 738 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; 739 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE; 740 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE; 741 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE; 742 } 743 744 return sk; 745 } 746 747 static void __vsock_release(struct sock *sk, int level) 748 { 749 if (sk) { 750 struct sock *pending; 751 struct vsock_sock *vsk; 752 753 vsk = vsock_sk(sk); 754 pending = NULL; /* Compiler warning. */ 755 756 /* The release call is supposed to use lock_sock_nested() 757 * rather than lock_sock(), if a sock lock should be acquired. 758 */ 759 if (vsk->transport) 760 vsk->transport->release(vsk); 761 else if (sk->sk_type == SOCK_STREAM) 762 vsock_remove_sock(vsk); 763 764 /* When "level" is SINGLE_DEPTH_NESTING, use the nested 765 * version to avoid the warning "possible recursive locking 766 * detected". When "level" is 0, lock_sock_nested(sk, level) 767 * is the same as lock_sock(sk). 768 */ 769 lock_sock_nested(sk, level); 770 sock_orphan(sk); 771 sk->sk_shutdown = SHUTDOWN_MASK; 772 773 skb_queue_purge(&sk->sk_receive_queue); 774 775 /* Clean up any sockets that never were accepted. */ 776 while ((pending = vsock_dequeue_accept(sk)) != NULL) { 777 __vsock_release(pending, SINGLE_DEPTH_NESTING); 778 sock_put(pending); 779 } 780 781 release_sock(sk); 782 sock_put(sk); 783 } 784 } 785 786 static void vsock_sk_destruct(struct sock *sk) 787 { 788 struct vsock_sock *vsk = vsock_sk(sk); 789 790 vsock_deassign_transport(vsk); 791 792 /* When clearing these addresses, there's no need to set the family and 793 * possibly register the address family with the kernel. 794 */ 795 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 796 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 797 798 put_cred(vsk->owner); 799 } 800 801 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 802 { 803 int err; 804 805 err = sock_queue_rcv_skb(sk, skb); 806 if (err) 807 kfree_skb(skb); 808 809 return err; 810 } 811 812 struct sock *vsock_create_connected(struct sock *parent) 813 { 814 return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL, 815 parent->sk_type, 0); 816 } 817 EXPORT_SYMBOL_GPL(vsock_create_connected); 818 819 s64 vsock_stream_has_data(struct vsock_sock *vsk) 820 { 821 return vsk->transport->stream_has_data(vsk); 822 } 823 EXPORT_SYMBOL_GPL(vsock_stream_has_data); 824 825 s64 vsock_stream_has_space(struct vsock_sock *vsk) 826 { 827 return vsk->transport->stream_has_space(vsk); 828 } 829 EXPORT_SYMBOL_GPL(vsock_stream_has_space); 830 831 static int vsock_release(struct socket *sock) 832 { 833 __vsock_release(sock->sk, 0); 834 sock->sk = NULL; 835 sock->state = SS_FREE; 836 837 return 0; 838 } 839 840 static int 841 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 842 { 843 int err; 844 struct sock *sk; 845 struct sockaddr_vm *vm_addr; 846 847 sk = sock->sk; 848 849 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0) 850 return -EINVAL; 851 852 lock_sock(sk); 853 err = __vsock_bind(sk, vm_addr); 854 release_sock(sk); 855 856 return err; 857 } 858 859 static int vsock_getname(struct socket *sock, 860 struct sockaddr *addr, int peer) 861 { 862 int err; 863 struct sock *sk; 864 struct vsock_sock *vsk; 865 struct sockaddr_vm *vm_addr; 866 867 sk = sock->sk; 868 vsk = vsock_sk(sk); 869 err = 0; 870 871 lock_sock(sk); 872 873 if (peer) { 874 if (sock->state != SS_CONNECTED) { 875 err = -ENOTCONN; 876 goto out; 877 } 878 vm_addr = &vsk->remote_addr; 879 } else { 880 vm_addr = &vsk->local_addr; 881 } 882 883 if (!vm_addr) { 884 err = -EINVAL; 885 goto out; 886 } 887 888 /* sys_getsockname() and sys_getpeername() pass us a 889 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately 890 * that macro is defined in socket.c instead of .h, so we hardcode its 891 * value here. 892 */ 893 BUILD_BUG_ON(sizeof(*vm_addr) > 128); 894 memcpy(addr, vm_addr, sizeof(*vm_addr)); 895 err = sizeof(*vm_addr); 896 897 out: 898 release_sock(sk); 899 return err; 900 } 901 902 static int vsock_shutdown(struct socket *sock, int mode) 903 { 904 int err; 905 struct sock *sk; 906 907 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses 908 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode 909 * here like the other address families do. Note also that the 910 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3), 911 * which is what we want. 912 */ 913 mode++; 914 915 if ((mode & ~SHUTDOWN_MASK) || !mode) 916 return -EINVAL; 917 918 /* If this is a STREAM socket and it is not connected then bail out 919 * immediately. If it is a DGRAM socket then we must first kick the 920 * socket so that it wakes up from any sleeping calls, for example 921 * recv(), and then afterwards return the error. 922 */ 923 924 sk = sock->sk; 925 if (sock->state == SS_UNCONNECTED) { 926 err = -ENOTCONN; 927 if (sk->sk_type == SOCK_STREAM) 928 return err; 929 } else { 930 sock->state = SS_DISCONNECTING; 931 err = 0; 932 } 933 934 /* Receive and send shutdowns are treated alike. */ 935 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); 936 if (mode) { 937 lock_sock(sk); 938 sk->sk_shutdown |= mode; 939 sk->sk_state_change(sk); 940 release_sock(sk); 941 942 if (sk->sk_type == SOCK_STREAM) { 943 sock_reset_flag(sk, SOCK_DONE); 944 vsock_send_shutdown(sk, mode); 945 } 946 } 947 948 return err; 949 } 950 951 static __poll_t vsock_poll(struct file *file, struct socket *sock, 952 poll_table *wait) 953 { 954 struct sock *sk; 955 __poll_t mask; 956 struct vsock_sock *vsk; 957 958 sk = sock->sk; 959 vsk = vsock_sk(sk); 960 961 poll_wait(file, sk_sleep(sk), wait); 962 mask = 0; 963 964 if (sk->sk_err) 965 /* Signify that there has been an error on this socket. */ 966 mask |= EPOLLERR; 967 968 /* INET sockets treat local write shutdown and peer write shutdown as a 969 * case of EPOLLHUP set. 970 */ 971 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 972 ((sk->sk_shutdown & SEND_SHUTDOWN) && 973 (vsk->peer_shutdown & SEND_SHUTDOWN))) { 974 mask |= EPOLLHUP; 975 } 976 977 if (sk->sk_shutdown & RCV_SHUTDOWN || 978 vsk->peer_shutdown & SEND_SHUTDOWN) { 979 mask |= EPOLLRDHUP; 980 } 981 982 if (sock->type == SOCK_DGRAM) { 983 /* For datagram sockets we can read if there is something in 984 * the queue and write as long as the socket isn't shutdown for 985 * sending. 986 */ 987 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || 988 (sk->sk_shutdown & RCV_SHUTDOWN)) { 989 mask |= EPOLLIN | EPOLLRDNORM; 990 } 991 992 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 993 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 994 995 } else if (sock->type == SOCK_STREAM) { 996 const struct vsock_transport *transport = vsk->transport; 997 lock_sock(sk); 998 999 /* Listening sockets that have connections in their accept 1000 * queue can be read. 1001 */ 1002 if (sk->sk_state == TCP_LISTEN 1003 && !vsock_is_accept_queue_empty(sk)) 1004 mask |= EPOLLIN | EPOLLRDNORM; 1005 1006 /* If there is something in the queue then we can read. */ 1007 if (transport && transport->stream_is_active(vsk) && 1008 !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1009 bool data_ready_now = false; 1010 int ret = transport->notify_poll_in( 1011 vsk, 1, &data_ready_now); 1012 if (ret < 0) { 1013 mask |= EPOLLERR; 1014 } else { 1015 if (data_ready_now) 1016 mask |= EPOLLIN | EPOLLRDNORM; 1017 1018 } 1019 } 1020 1021 /* Sockets whose connections have been closed, reset, or 1022 * terminated should also be considered read, and we check the 1023 * shutdown flag for that. 1024 */ 1025 if (sk->sk_shutdown & RCV_SHUTDOWN || 1026 vsk->peer_shutdown & SEND_SHUTDOWN) { 1027 mask |= EPOLLIN | EPOLLRDNORM; 1028 } 1029 1030 /* Connected sockets that can produce data can be written. */ 1031 if (sk->sk_state == TCP_ESTABLISHED) { 1032 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1033 bool space_avail_now = false; 1034 int ret = transport->notify_poll_out( 1035 vsk, 1, &space_avail_now); 1036 if (ret < 0) { 1037 mask |= EPOLLERR; 1038 } else { 1039 if (space_avail_now) 1040 /* Remove EPOLLWRBAND since INET 1041 * sockets are not setting it. 1042 */ 1043 mask |= EPOLLOUT | EPOLLWRNORM; 1044 1045 } 1046 } 1047 } 1048 1049 /* Simulate INET socket poll behaviors, which sets 1050 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read, 1051 * but local send is not shutdown. 1052 */ 1053 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { 1054 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 1055 mask |= EPOLLOUT | EPOLLWRNORM; 1056 1057 } 1058 1059 release_sock(sk); 1060 } 1061 1062 return mask; 1063 } 1064 1065 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg, 1066 size_t len) 1067 { 1068 int err; 1069 struct sock *sk; 1070 struct vsock_sock *vsk; 1071 struct sockaddr_vm *remote_addr; 1072 const struct vsock_transport *transport; 1073 1074 if (msg->msg_flags & MSG_OOB) 1075 return -EOPNOTSUPP; 1076 1077 /* For now, MSG_DONTWAIT is always assumed... */ 1078 err = 0; 1079 sk = sock->sk; 1080 vsk = vsock_sk(sk); 1081 transport = vsk->transport; 1082 1083 lock_sock(sk); 1084 1085 err = vsock_auto_bind(vsk); 1086 if (err) 1087 goto out; 1088 1089 1090 /* If the provided message contains an address, use that. Otherwise 1091 * fall back on the socket's remote handle (if it has been connected). 1092 */ 1093 if (msg->msg_name && 1094 vsock_addr_cast(msg->msg_name, msg->msg_namelen, 1095 &remote_addr) == 0) { 1096 /* Ensure this address is of the right type and is a valid 1097 * destination. 1098 */ 1099 1100 if (remote_addr->svm_cid == VMADDR_CID_ANY) 1101 remote_addr->svm_cid = transport->get_local_cid(); 1102 1103 if (!vsock_addr_bound(remote_addr)) { 1104 err = -EINVAL; 1105 goto out; 1106 } 1107 } else if (sock->state == SS_CONNECTED) { 1108 remote_addr = &vsk->remote_addr; 1109 1110 if (remote_addr->svm_cid == VMADDR_CID_ANY) 1111 remote_addr->svm_cid = transport->get_local_cid(); 1112 1113 /* XXX Should connect() or this function ensure remote_addr is 1114 * bound? 1115 */ 1116 if (!vsock_addr_bound(&vsk->remote_addr)) { 1117 err = -EINVAL; 1118 goto out; 1119 } 1120 } else { 1121 err = -EINVAL; 1122 goto out; 1123 } 1124 1125 if (!transport->dgram_allow(remote_addr->svm_cid, 1126 remote_addr->svm_port)) { 1127 err = -EINVAL; 1128 goto out; 1129 } 1130 1131 err = transport->dgram_enqueue(vsk, remote_addr, msg, len); 1132 1133 out: 1134 release_sock(sk); 1135 return err; 1136 } 1137 1138 static int vsock_dgram_connect(struct socket *sock, 1139 struct sockaddr *addr, int addr_len, int flags) 1140 { 1141 int err; 1142 struct sock *sk; 1143 struct vsock_sock *vsk; 1144 struct sockaddr_vm *remote_addr; 1145 1146 sk = sock->sk; 1147 vsk = vsock_sk(sk); 1148 1149 err = vsock_addr_cast(addr, addr_len, &remote_addr); 1150 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) { 1151 lock_sock(sk); 1152 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, 1153 VMADDR_PORT_ANY); 1154 sock->state = SS_UNCONNECTED; 1155 release_sock(sk); 1156 return 0; 1157 } else if (err != 0) 1158 return -EINVAL; 1159 1160 lock_sock(sk); 1161 1162 err = vsock_auto_bind(vsk); 1163 if (err) 1164 goto out; 1165 1166 if (!vsk->transport->dgram_allow(remote_addr->svm_cid, 1167 remote_addr->svm_port)) { 1168 err = -EINVAL; 1169 goto out; 1170 } 1171 1172 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); 1173 sock->state = SS_CONNECTED; 1174 1175 out: 1176 release_sock(sk); 1177 return err; 1178 } 1179 1180 static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 1181 size_t len, int flags) 1182 { 1183 struct vsock_sock *vsk = vsock_sk(sock->sk); 1184 1185 return vsk->transport->dgram_dequeue(vsk, msg, len, flags); 1186 } 1187 1188 static const struct proto_ops vsock_dgram_ops = { 1189 .family = PF_VSOCK, 1190 .owner = THIS_MODULE, 1191 .release = vsock_release, 1192 .bind = vsock_bind, 1193 .connect = vsock_dgram_connect, 1194 .socketpair = sock_no_socketpair, 1195 .accept = sock_no_accept, 1196 .getname = vsock_getname, 1197 .poll = vsock_poll, 1198 .ioctl = sock_no_ioctl, 1199 .listen = sock_no_listen, 1200 .shutdown = vsock_shutdown, 1201 .setsockopt = sock_no_setsockopt, 1202 .getsockopt = sock_no_getsockopt, 1203 .sendmsg = vsock_dgram_sendmsg, 1204 .recvmsg = vsock_dgram_recvmsg, 1205 .mmap = sock_no_mmap, 1206 .sendpage = sock_no_sendpage, 1207 }; 1208 1209 static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) 1210 { 1211 const struct vsock_transport *transport = vsk->transport; 1212 1213 if (!transport->cancel_pkt) 1214 return -EOPNOTSUPP; 1215 1216 return transport->cancel_pkt(vsk); 1217 } 1218 1219 static void vsock_connect_timeout(struct work_struct *work) 1220 { 1221 struct sock *sk; 1222 struct vsock_sock *vsk; 1223 int cancel = 0; 1224 1225 vsk = container_of(work, struct vsock_sock, connect_work.work); 1226 sk = sk_vsock(vsk); 1227 1228 lock_sock(sk); 1229 if (sk->sk_state == TCP_SYN_SENT && 1230 (sk->sk_shutdown != SHUTDOWN_MASK)) { 1231 sk->sk_state = TCP_CLOSE; 1232 sk->sk_err = ETIMEDOUT; 1233 sk->sk_error_report(sk); 1234 cancel = 1; 1235 } 1236 release_sock(sk); 1237 if (cancel) 1238 vsock_transport_cancel_pkt(vsk); 1239 1240 sock_put(sk); 1241 } 1242 1243 static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, 1244 int addr_len, int flags) 1245 { 1246 int err; 1247 struct sock *sk; 1248 struct vsock_sock *vsk; 1249 const struct vsock_transport *transport; 1250 struct sockaddr_vm *remote_addr; 1251 long timeout; 1252 DEFINE_WAIT(wait); 1253 1254 err = 0; 1255 sk = sock->sk; 1256 vsk = vsock_sk(sk); 1257 1258 lock_sock(sk); 1259 1260 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ 1261 switch (sock->state) { 1262 case SS_CONNECTED: 1263 err = -EISCONN; 1264 goto out; 1265 case SS_DISCONNECTING: 1266 err = -EINVAL; 1267 goto out; 1268 case SS_CONNECTING: 1269 /* This continues on so we can move sock into the SS_CONNECTED 1270 * state once the connection has completed (at which point err 1271 * will be set to zero also). Otherwise, we will either wait 1272 * for the connection or return -EALREADY should this be a 1273 * non-blocking call. 1274 */ 1275 err = -EALREADY; 1276 break; 1277 default: 1278 if ((sk->sk_state == TCP_LISTEN) || 1279 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { 1280 err = -EINVAL; 1281 goto out; 1282 } 1283 1284 /* Set the remote address that we are connecting to. */ 1285 memcpy(&vsk->remote_addr, remote_addr, 1286 sizeof(vsk->remote_addr)); 1287 1288 err = vsock_assign_transport(vsk, NULL); 1289 if (err) 1290 goto out; 1291 1292 transport = vsk->transport; 1293 1294 /* The hypervisor and well-known contexts do not have socket 1295 * endpoints. 1296 */ 1297 if (!transport || 1298 !transport->stream_allow(remote_addr->svm_cid, 1299 remote_addr->svm_port)) { 1300 err = -ENETUNREACH; 1301 goto out; 1302 } 1303 1304 err = vsock_auto_bind(vsk); 1305 if (err) 1306 goto out; 1307 1308 sk->sk_state = TCP_SYN_SENT; 1309 1310 err = transport->connect(vsk); 1311 if (err < 0) 1312 goto out; 1313 1314 /* Mark sock as connecting and set the error code to in 1315 * progress in case this is a non-blocking connect. 1316 */ 1317 sock->state = SS_CONNECTING; 1318 err = -EINPROGRESS; 1319 } 1320 1321 /* The receive path will handle all communication until we are able to 1322 * enter the connected state. Here we wait for the connection to be 1323 * completed or a notification of an error. 1324 */ 1325 timeout = vsk->connect_timeout; 1326 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1327 1328 while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { 1329 if (flags & O_NONBLOCK) { 1330 /* If we're not going to block, we schedule a timeout 1331 * function to generate a timeout on the connection 1332 * attempt, in case the peer doesn't respond in a 1333 * timely manner. We hold on to the socket until the 1334 * timeout fires. 1335 */ 1336 sock_hold(sk); 1337 schedule_delayed_work(&vsk->connect_work, timeout); 1338 1339 /* Skip ahead to preserve error code set above. */ 1340 goto out_wait; 1341 } 1342 1343 release_sock(sk); 1344 timeout = schedule_timeout(timeout); 1345 lock_sock(sk); 1346 1347 if (signal_pending(current)) { 1348 err = sock_intr_errno(timeout); 1349 sk->sk_state = TCP_CLOSE; 1350 sock->state = SS_UNCONNECTED; 1351 vsock_transport_cancel_pkt(vsk); 1352 goto out_wait; 1353 } else if (timeout == 0) { 1354 err = -ETIMEDOUT; 1355 sk->sk_state = TCP_CLOSE; 1356 sock->state = SS_UNCONNECTED; 1357 vsock_transport_cancel_pkt(vsk); 1358 goto out_wait; 1359 } 1360 1361 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1362 } 1363 1364 if (sk->sk_err) { 1365 err = -sk->sk_err; 1366 sk->sk_state = TCP_CLOSE; 1367 sock->state = SS_UNCONNECTED; 1368 } else { 1369 err = 0; 1370 } 1371 1372 out_wait: 1373 finish_wait(sk_sleep(sk), &wait); 1374 out: 1375 release_sock(sk); 1376 return err; 1377 } 1378 1379 static int vsock_accept(struct socket *sock, struct socket *newsock, int flags, 1380 bool kern) 1381 { 1382 struct sock *listener; 1383 int err; 1384 struct sock *connected; 1385 struct vsock_sock *vconnected; 1386 long timeout; 1387 DEFINE_WAIT(wait); 1388 1389 err = 0; 1390 listener = sock->sk; 1391 1392 lock_sock(listener); 1393 1394 if (sock->type != SOCK_STREAM) { 1395 err = -EOPNOTSUPP; 1396 goto out; 1397 } 1398 1399 if (listener->sk_state != TCP_LISTEN) { 1400 err = -EINVAL; 1401 goto out; 1402 } 1403 1404 /* Wait for children sockets to appear; these are the new sockets 1405 * created upon connection establishment. 1406 */ 1407 timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); 1408 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1409 1410 while ((connected = vsock_dequeue_accept(listener)) == NULL && 1411 listener->sk_err == 0) { 1412 release_sock(listener); 1413 timeout = schedule_timeout(timeout); 1414 finish_wait(sk_sleep(listener), &wait); 1415 lock_sock(listener); 1416 1417 if (signal_pending(current)) { 1418 err = sock_intr_errno(timeout); 1419 goto out; 1420 } else if (timeout == 0) { 1421 err = -EAGAIN; 1422 goto out; 1423 } 1424 1425 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1426 } 1427 finish_wait(sk_sleep(listener), &wait); 1428 1429 if (listener->sk_err) 1430 err = -listener->sk_err; 1431 1432 if (connected) { 1433 sk_acceptq_removed(listener); 1434 1435 lock_sock_nested(connected, SINGLE_DEPTH_NESTING); 1436 vconnected = vsock_sk(connected); 1437 1438 /* If the listener socket has received an error, then we should 1439 * reject this socket and return. Note that we simply mark the 1440 * socket rejected, drop our reference, and let the cleanup 1441 * function handle the cleanup; the fact that we found it in 1442 * the listener's accept queue guarantees that the cleanup 1443 * function hasn't run yet. 1444 */ 1445 if (err) { 1446 vconnected->rejected = true; 1447 } else { 1448 newsock->state = SS_CONNECTED; 1449 sock_graft(connected, newsock); 1450 } 1451 1452 release_sock(connected); 1453 sock_put(connected); 1454 } 1455 1456 out: 1457 release_sock(listener); 1458 return err; 1459 } 1460 1461 static int vsock_listen(struct socket *sock, int backlog) 1462 { 1463 int err; 1464 struct sock *sk; 1465 struct vsock_sock *vsk; 1466 1467 sk = sock->sk; 1468 1469 lock_sock(sk); 1470 1471 if (sock->type != SOCK_STREAM) { 1472 err = -EOPNOTSUPP; 1473 goto out; 1474 } 1475 1476 if (sock->state != SS_UNCONNECTED) { 1477 err = -EINVAL; 1478 goto out; 1479 } 1480 1481 vsk = vsock_sk(sk); 1482 1483 if (!vsock_addr_bound(&vsk->local_addr)) { 1484 err = -EINVAL; 1485 goto out; 1486 } 1487 1488 sk->sk_max_ack_backlog = backlog; 1489 sk->sk_state = TCP_LISTEN; 1490 1491 err = 0; 1492 1493 out: 1494 release_sock(sk); 1495 return err; 1496 } 1497 1498 static void vsock_update_buffer_size(struct vsock_sock *vsk, 1499 const struct vsock_transport *transport, 1500 u64 val) 1501 { 1502 if (val > vsk->buffer_max_size) 1503 val = vsk->buffer_max_size; 1504 1505 if (val < vsk->buffer_min_size) 1506 val = vsk->buffer_min_size; 1507 1508 if (val != vsk->buffer_size && 1509 transport && transport->notify_buffer_size) 1510 transport->notify_buffer_size(vsk, &val); 1511 1512 vsk->buffer_size = val; 1513 } 1514 1515 static int vsock_stream_setsockopt(struct socket *sock, 1516 int level, 1517 int optname, 1518 char __user *optval, 1519 unsigned int optlen) 1520 { 1521 int err; 1522 struct sock *sk; 1523 struct vsock_sock *vsk; 1524 const struct vsock_transport *transport; 1525 u64 val; 1526 1527 if (level != AF_VSOCK) 1528 return -ENOPROTOOPT; 1529 1530 #define COPY_IN(_v) \ 1531 do { \ 1532 if (optlen < sizeof(_v)) { \ 1533 err = -EINVAL; \ 1534 goto exit; \ 1535 } \ 1536 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \ 1537 err = -EFAULT; \ 1538 goto exit; \ 1539 } \ 1540 } while (0) 1541 1542 err = 0; 1543 sk = sock->sk; 1544 vsk = vsock_sk(sk); 1545 transport = vsk->transport; 1546 1547 lock_sock(sk); 1548 1549 switch (optname) { 1550 case SO_VM_SOCKETS_BUFFER_SIZE: 1551 COPY_IN(val); 1552 vsock_update_buffer_size(vsk, transport, val); 1553 break; 1554 1555 case SO_VM_SOCKETS_BUFFER_MAX_SIZE: 1556 COPY_IN(val); 1557 vsk->buffer_max_size = val; 1558 vsock_update_buffer_size(vsk, transport, vsk->buffer_size); 1559 break; 1560 1561 case SO_VM_SOCKETS_BUFFER_MIN_SIZE: 1562 COPY_IN(val); 1563 vsk->buffer_min_size = val; 1564 vsock_update_buffer_size(vsk, transport, vsk->buffer_size); 1565 break; 1566 1567 case SO_VM_SOCKETS_CONNECT_TIMEOUT: { 1568 struct __kernel_old_timeval tv; 1569 COPY_IN(tv); 1570 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && 1571 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { 1572 vsk->connect_timeout = tv.tv_sec * HZ + 1573 DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ)); 1574 if (vsk->connect_timeout == 0) 1575 vsk->connect_timeout = 1576 VSOCK_DEFAULT_CONNECT_TIMEOUT; 1577 1578 } else { 1579 err = -ERANGE; 1580 } 1581 break; 1582 } 1583 1584 default: 1585 err = -ENOPROTOOPT; 1586 break; 1587 } 1588 1589 #undef COPY_IN 1590 1591 exit: 1592 release_sock(sk); 1593 return err; 1594 } 1595 1596 static int vsock_stream_getsockopt(struct socket *sock, 1597 int level, int optname, 1598 char __user *optval, 1599 int __user *optlen) 1600 { 1601 int err; 1602 int len; 1603 struct sock *sk; 1604 struct vsock_sock *vsk; 1605 u64 val; 1606 1607 if (level != AF_VSOCK) 1608 return -ENOPROTOOPT; 1609 1610 err = get_user(len, optlen); 1611 if (err != 0) 1612 return err; 1613 1614 #define COPY_OUT(_v) \ 1615 do { \ 1616 if (len < sizeof(_v)) \ 1617 return -EINVAL; \ 1618 \ 1619 len = sizeof(_v); \ 1620 if (copy_to_user(optval, &_v, len) != 0) \ 1621 return -EFAULT; \ 1622 \ 1623 } while (0) 1624 1625 err = 0; 1626 sk = sock->sk; 1627 vsk = vsock_sk(sk); 1628 1629 switch (optname) { 1630 case SO_VM_SOCKETS_BUFFER_SIZE: 1631 val = vsk->buffer_size; 1632 COPY_OUT(val); 1633 break; 1634 1635 case SO_VM_SOCKETS_BUFFER_MAX_SIZE: 1636 val = vsk->buffer_max_size; 1637 COPY_OUT(val); 1638 break; 1639 1640 case SO_VM_SOCKETS_BUFFER_MIN_SIZE: 1641 val = vsk->buffer_min_size; 1642 COPY_OUT(val); 1643 break; 1644 1645 case SO_VM_SOCKETS_CONNECT_TIMEOUT: { 1646 struct __kernel_old_timeval tv; 1647 tv.tv_sec = vsk->connect_timeout / HZ; 1648 tv.tv_usec = 1649 (vsk->connect_timeout - 1650 tv.tv_sec * HZ) * (1000000 / HZ); 1651 COPY_OUT(tv); 1652 break; 1653 } 1654 default: 1655 return -ENOPROTOOPT; 1656 } 1657 1658 err = put_user(len, optlen); 1659 if (err != 0) 1660 return -EFAULT; 1661 1662 #undef COPY_OUT 1663 1664 return 0; 1665 } 1666 1667 static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg, 1668 size_t len) 1669 { 1670 struct sock *sk; 1671 struct vsock_sock *vsk; 1672 const struct vsock_transport *transport; 1673 ssize_t total_written; 1674 long timeout; 1675 int err; 1676 struct vsock_transport_send_notify_data send_data; 1677 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1678 1679 sk = sock->sk; 1680 vsk = vsock_sk(sk); 1681 transport = vsk->transport; 1682 total_written = 0; 1683 err = 0; 1684 1685 if (msg->msg_flags & MSG_OOB) 1686 return -EOPNOTSUPP; 1687 1688 lock_sock(sk); 1689 1690 /* Callers should not provide a destination with stream sockets. */ 1691 if (msg->msg_namelen) { 1692 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; 1693 goto out; 1694 } 1695 1696 /* Send data only if both sides are not shutdown in the direction. */ 1697 if (sk->sk_shutdown & SEND_SHUTDOWN || 1698 vsk->peer_shutdown & RCV_SHUTDOWN) { 1699 err = -EPIPE; 1700 goto out; 1701 } 1702 1703 if (!transport || sk->sk_state != TCP_ESTABLISHED || 1704 !vsock_addr_bound(&vsk->local_addr)) { 1705 err = -ENOTCONN; 1706 goto out; 1707 } 1708 1709 if (!vsock_addr_bound(&vsk->remote_addr)) { 1710 err = -EDESTADDRREQ; 1711 goto out; 1712 } 1713 1714 /* Wait for room in the produce queue to enqueue our user's data. */ 1715 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1716 1717 err = transport->notify_send_init(vsk, &send_data); 1718 if (err < 0) 1719 goto out; 1720 1721 while (total_written < len) { 1722 ssize_t written; 1723 1724 add_wait_queue(sk_sleep(sk), &wait); 1725 while (vsock_stream_has_space(vsk) == 0 && 1726 sk->sk_err == 0 && 1727 !(sk->sk_shutdown & SEND_SHUTDOWN) && 1728 !(vsk->peer_shutdown & RCV_SHUTDOWN)) { 1729 1730 /* Don't wait for non-blocking sockets. */ 1731 if (timeout == 0) { 1732 err = -EAGAIN; 1733 remove_wait_queue(sk_sleep(sk), &wait); 1734 goto out_err; 1735 } 1736 1737 err = transport->notify_send_pre_block(vsk, &send_data); 1738 if (err < 0) { 1739 remove_wait_queue(sk_sleep(sk), &wait); 1740 goto out_err; 1741 } 1742 1743 release_sock(sk); 1744 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); 1745 lock_sock(sk); 1746 if (signal_pending(current)) { 1747 err = sock_intr_errno(timeout); 1748 remove_wait_queue(sk_sleep(sk), &wait); 1749 goto out_err; 1750 } else if (timeout == 0) { 1751 err = -EAGAIN; 1752 remove_wait_queue(sk_sleep(sk), &wait); 1753 goto out_err; 1754 } 1755 } 1756 remove_wait_queue(sk_sleep(sk), &wait); 1757 1758 /* These checks occur both as part of and after the loop 1759 * conditional since we need to check before and after 1760 * sleeping. 1761 */ 1762 if (sk->sk_err) { 1763 err = -sk->sk_err; 1764 goto out_err; 1765 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || 1766 (vsk->peer_shutdown & RCV_SHUTDOWN)) { 1767 err = -EPIPE; 1768 goto out_err; 1769 } 1770 1771 err = transport->notify_send_pre_enqueue(vsk, &send_data); 1772 if (err < 0) 1773 goto out_err; 1774 1775 /* Note that enqueue will only write as many bytes as are free 1776 * in the produce queue, so we don't need to ensure len is 1777 * smaller than the queue size. It is the caller's 1778 * responsibility to check how many bytes we were able to send. 1779 */ 1780 1781 written = transport->stream_enqueue( 1782 vsk, msg, 1783 len - total_written); 1784 if (written < 0) { 1785 err = -ENOMEM; 1786 goto out_err; 1787 } 1788 1789 total_written += written; 1790 1791 err = transport->notify_send_post_enqueue( 1792 vsk, written, &send_data); 1793 if (err < 0) 1794 goto out_err; 1795 1796 } 1797 1798 out_err: 1799 if (total_written > 0) 1800 err = total_written; 1801 out: 1802 release_sock(sk); 1803 return err; 1804 } 1805 1806 1807 static int 1808 vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 1809 int flags) 1810 { 1811 struct sock *sk; 1812 struct vsock_sock *vsk; 1813 const struct vsock_transport *transport; 1814 int err; 1815 size_t target; 1816 ssize_t copied; 1817 long timeout; 1818 struct vsock_transport_recv_notify_data recv_data; 1819 1820 DEFINE_WAIT(wait); 1821 1822 sk = sock->sk; 1823 vsk = vsock_sk(sk); 1824 transport = vsk->transport; 1825 err = 0; 1826 1827 lock_sock(sk); 1828 1829 if (!transport || sk->sk_state != TCP_ESTABLISHED) { 1830 /* Recvmsg is supposed to return 0 if a peer performs an 1831 * orderly shutdown. Differentiate between that case and when a 1832 * peer has not connected or a local shutdown occured with the 1833 * SOCK_DONE flag. 1834 */ 1835 if (sock_flag(sk, SOCK_DONE)) 1836 err = 0; 1837 else 1838 err = -ENOTCONN; 1839 1840 goto out; 1841 } 1842 1843 if (flags & MSG_OOB) { 1844 err = -EOPNOTSUPP; 1845 goto out; 1846 } 1847 1848 /* We don't check peer_shutdown flag here since peer may actually shut 1849 * down, but there can be data in the queue that a local socket can 1850 * receive. 1851 */ 1852 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1853 err = 0; 1854 goto out; 1855 } 1856 1857 /* It is valid on Linux to pass in a zero-length receive buffer. This 1858 * is not an error. We may as well bail out now. 1859 */ 1860 if (!len) { 1861 err = 0; 1862 goto out; 1863 } 1864 1865 /* We must not copy less than target bytes into the user's buffer 1866 * before returning successfully, so we wait for the consume queue to 1867 * have that much data to consume before dequeueing. Note that this 1868 * makes it impossible to handle cases where target is greater than the 1869 * queue size. 1870 */ 1871 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1872 if (target >= transport->stream_rcvhiwat(vsk)) { 1873 err = -ENOMEM; 1874 goto out; 1875 } 1876 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1877 copied = 0; 1878 1879 err = transport->notify_recv_init(vsk, target, &recv_data); 1880 if (err < 0) 1881 goto out; 1882 1883 1884 while (1) { 1885 s64 ready; 1886 1887 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1888 ready = vsock_stream_has_data(vsk); 1889 1890 if (ready == 0) { 1891 if (sk->sk_err != 0 || 1892 (sk->sk_shutdown & RCV_SHUTDOWN) || 1893 (vsk->peer_shutdown & SEND_SHUTDOWN)) { 1894 finish_wait(sk_sleep(sk), &wait); 1895 break; 1896 } 1897 /* Don't wait for non-blocking sockets. */ 1898 if (timeout == 0) { 1899 err = -EAGAIN; 1900 finish_wait(sk_sleep(sk), &wait); 1901 break; 1902 } 1903 1904 err = transport->notify_recv_pre_block( 1905 vsk, target, &recv_data); 1906 if (err < 0) { 1907 finish_wait(sk_sleep(sk), &wait); 1908 break; 1909 } 1910 release_sock(sk); 1911 timeout = schedule_timeout(timeout); 1912 lock_sock(sk); 1913 1914 if (signal_pending(current)) { 1915 err = sock_intr_errno(timeout); 1916 finish_wait(sk_sleep(sk), &wait); 1917 break; 1918 } else if (timeout == 0) { 1919 err = -EAGAIN; 1920 finish_wait(sk_sleep(sk), &wait); 1921 break; 1922 } 1923 } else { 1924 ssize_t read; 1925 1926 finish_wait(sk_sleep(sk), &wait); 1927 1928 if (ready < 0) { 1929 /* Invalid queue pair content. XXX This should 1930 * be changed to a connection reset in a later 1931 * change. 1932 */ 1933 1934 err = -ENOMEM; 1935 goto out; 1936 } 1937 1938 err = transport->notify_recv_pre_dequeue( 1939 vsk, target, &recv_data); 1940 if (err < 0) 1941 break; 1942 1943 read = transport->stream_dequeue( 1944 vsk, msg, 1945 len - copied, flags); 1946 if (read < 0) { 1947 err = -ENOMEM; 1948 break; 1949 } 1950 1951 copied += read; 1952 1953 err = transport->notify_recv_post_dequeue( 1954 vsk, target, read, 1955 !(flags & MSG_PEEK), &recv_data); 1956 if (err < 0) 1957 goto out; 1958 1959 if (read >= target || flags & MSG_PEEK) 1960 break; 1961 1962 target -= read; 1963 } 1964 } 1965 1966 if (sk->sk_err) 1967 err = -sk->sk_err; 1968 else if (sk->sk_shutdown & RCV_SHUTDOWN) 1969 err = 0; 1970 1971 if (copied > 0) 1972 err = copied; 1973 1974 out: 1975 release_sock(sk); 1976 return err; 1977 } 1978 1979 static const struct proto_ops vsock_stream_ops = { 1980 .family = PF_VSOCK, 1981 .owner = THIS_MODULE, 1982 .release = vsock_release, 1983 .bind = vsock_bind, 1984 .connect = vsock_stream_connect, 1985 .socketpair = sock_no_socketpair, 1986 .accept = vsock_accept, 1987 .getname = vsock_getname, 1988 .poll = vsock_poll, 1989 .ioctl = sock_no_ioctl, 1990 .listen = vsock_listen, 1991 .shutdown = vsock_shutdown, 1992 .setsockopt = vsock_stream_setsockopt, 1993 .getsockopt = vsock_stream_getsockopt, 1994 .sendmsg = vsock_stream_sendmsg, 1995 .recvmsg = vsock_stream_recvmsg, 1996 .mmap = sock_no_mmap, 1997 .sendpage = sock_no_sendpage, 1998 }; 1999 2000 static int vsock_create(struct net *net, struct socket *sock, 2001 int protocol, int kern) 2002 { 2003 struct vsock_sock *vsk; 2004 struct sock *sk; 2005 int ret; 2006 2007 if (!sock) 2008 return -EINVAL; 2009 2010 if (protocol && protocol != PF_VSOCK) 2011 return -EPROTONOSUPPORT; 2012 2013 switch (sock->type) { 2014 case SOCK_DGRAM: 2015 sock->ops = &vsock_dgram_ops; 2016 break; 2017 case SOCK_STREAM: 2018 sock->ops = &vsock_stream_ops; 2019 break; 2020 default: 2021 return -ESOCKTNOSUPPORT; 2022 } 2023 2024 sock->state = SS_UNCONNECTED; 2025 2026 sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern); 2027 if (!sk) 2028 return -ENOMEM; 2029 2030 vsk = vsock_sk(sk); 2031 2032 if (sock->type == SOCK_DGRAM) { 2033 ret = vsock_assign_transport(vsk, NULL); 2034 if (ret < 0) { 2035 sock_put(sk); 2036 return ret; 2037 } 2038 } 2039 2040 vsock_insert_unbound(vsk); 2041 2042 return 0; 2043 } 2044 2045 static const struct net_proto_family vsock_family_ops = { 2046 .family = AF_VSOCK, 2047 .create = vsock_create, 2048 .owner = THIS_MODULE, 2049 }; 2050 2051 static long vsock_dev_do_ioctl(struct file *filp, 2052 unsigned int cmd, void __user *ptr) 2053 { 2054 u32 __user *p = ptr; 2055 u32 cid = VMADDR_CID_ANY; 2056 int retval = 0; 2057 2058 switch (cmd) { 2059 case IOCTL_VM_SOCKETS_GET_LOCAL_CID: 2060 /* To be compatible with the VMCI behavior, we prioritize the 2061 * guest CID instead of well-know host CID (VMADDR_CID_HOST). 2062 */ 2063 if (transport_g2h) 2064 cid = transport_g2h->get_local_cid(); 2065 else if (transport_h2g) 2066 cid = transport_h2g->get_local_cid(); 2067 2068 if (put_user(cid, p) != 0) 2069 retval = -EFAULT; 2070 break; 2071 2072 default: 2073 pr_err("Unknown ioctl %d\n", cmd); 2074 retval = -EINVAL; 2075 } 2076 2077 return retval; 2078 } 2079 2080 static long vsock_dev_ioctl(struct file *filp, 2081 unsigned int cmd, unsigned long arg) 2082 { 2083 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg); 2084 } 2085 2086 #ifdef CONFIG_COMPAT 2087 static long vsock_dev_compat_ioctl(struct file *filp, 2088 unsigned int cmd, unsigned long arg) 2089 { 2090 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg)); 2091 } 2092 #endif 2093 2094 static const struct file_operations vsock_device_ops = { 2095 .owner = THIS_MODULE, 2096 .unlocked_ioctl = vsock_dev_ioctl, 2097 #ifdef CONFIG_COMPAT 2098 .compat_ioctl = vsock_dev_compat_ioctl, 2099 #endif 2100 .open = nonseekable_open, 2101 }; 2102 2103 static struct miscdevice vsock_device = { 2104 .name = "vsock", 2105 .fops = &vsock_device_ops, 2106 }; 2107 2108 static int __init vsock_init(void) 2109 { 2110 int err = 0; 2111 2112 vsock_init_tables(); 2113 2114 vsock_proto.owner = THIS_MODULE; 2115 vsock_device.minor = MISC_DYNAMIC_MINOR; 2116 err = misc_register(&vsock_device); 2117 if (err) { 2118 pr_err("Failed to register misc device\n"); 2119 goto err_reset_transport; 2120 } 2121 2122 err = proto_register(&vsock_proto, 1); /* we want our slab */ 2123 if (err) { 2124 pr_err("Cannot register vsock protocol\n"); 2125 goto err_deregister_misc; 2126 } 2127 2128 err = sock_register(&vsock_family_ops); 2129 if (err) { 2130 pr_err("could not register af_vsock (%d) address family: %d\n", 2131 AF_VSOCK, err); 2132 goto err_unregister_proto; 2133 } 2134 2135 return 0; 2136 2137 err_unregister_proto: 2138 proto_unregister(&vsock_proto); 2139 err_deregister_misc: 2140 misc_deregister(&vsock_device); 2141 err_reset_transport: 2142 return err; 2143 } 2144 2145 static void __exit vsock_exit(void) 2146 { 2147 misc_deregister(&vsock_device); 2148 sock_unregister(AF_VSOCK); 2149 proto_unregister(&vsock_proto); 2150 } 2151 2152 const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk) 2153 { 2154 return vsk->transport; 2155 } 2156 EXPORT_SYMBOL_GPL(vsock_core_get_transport); 2157 2158 int vsock_core_register(const struct vsock_transport *t, int features) 2159 { 2160 const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local; 2161 int err = mutex_lock_interruptible(&vsock_register_mutex); 2162 2163 if (err) 2164 return err; 2165 2166 t_h2g = transport_h2g; 2167 t_g2h = transport_g2h; 2168 t_dgram = transport_dgram; 2169 t_local = transport_local; 2170 2171 if (features & VSOCK_TRANSPORT_F_H2G) { 2172 if (t_h2g) { 2173 err = -EBUSY; 2174 goto err_busy; 2175 } 2176 t_h2g = t; 2177 } 2178 2179 if (features & VSOCK_TRANSPORT_F_G2H) { 2180 if (t_g2h) { 2181 err = -EBUSY; 2182 goto err_busy; 2183 } 2184 t_g2h = t; 2185 } 2186 2187 if (features & VSOCK_TRANSPORT_F_DGRAM) { 2188 if (t_dgram) { 2189 err = -EBUSY; 2190 goto err_busy; 2191 } 2192 t_dgram = t; 2193 } 2194 2195 if (features & VSOCK_TRANSPORT_F_LOCAL) { 2196 if (t_local) { 2197 err = -EBUSY; 2198 goto err_busy; 2199 } 2200 t_local = t; 2201 } 2202 2203 transport_h2g = t_h2g; 2204 transport_g2h = t_g2h; 2205 transport_dgram = t_dgram; 2206 transport_local = t_local; 2207 2208 err_busy: 2209 mutex_unlock(&vsock_register_mutex); 2210 return err; 2211 } 2212 EXPORT_SYMBOL_GPL(vsock_core_register); 2213 2214 void vsock_core_unregister(const struct vsock_transport *t) 2215 { 2216 mutex_lock(&vsock_register_mutex); 2217 2218 if (transport_h2g == t) 2219 transport_h2g = NULL; 2220 2221 if (transport_g2h == t) 2222 transport_g2h = NULL; 2223 2224 if (transport_dgram == t) 2225 transport_dgram = NULL; 2226 2227 if (transport_local == t) 2228 transport_local = NULL; 2229 2230 mutex_unlock(&vsock_register_mutex); 2231 } 2232 EXPORT_SYMBOL_GPL(vsock_core_unregister); 2233 2234 module_init(vsock_init); 2235 module_exit(vsock_exit); 2236 2237 MODULE_AUTHOR("VMware, Inc."); 2238 MODULE_DESCRIPTION("VMware Virtual Socket Family"); 2239 MODULE_VERSION("1.0.2.0-k"); 2240 MODULE_LICENSE("GPL v2"); 2241