1 /* AF_RXRPC implementation 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/net.h> 17 #include <linux/slab.h> 18 #include <linux/skbuff.h> 19 #include <linux/random.h> 20 #include <linux/poll.h> 21 #include <linux/proc_fs.h> 22 #include <linux/key-type.h> 23 #include <net/net_namespace.h> 24 #include <net/sock.h> 25 #include <net/af_rxrpc.h> 26 #define CREATE_TRACE_POINTS 27 #include "ar-internal.h" 28 29 MODULE_DESCRIPTION("RxRPC network protocol"); 30 MODULE_AUTHOR("Red Hat, Inc."); 31 MODULE_LICENSE("GPL"); 32 MODULE_ALIAS_NETPROTO(PF_RXRPC); 33 34 unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO; 35 module_param_named(debug, rxrpc_debug, uint, 0644); 36 MODULE_PARM_DESC(debug, "RxRPC debugging mask"); 37 38 static struct proto rxrpc_proto; 39 static const struct proto_ops rxrpc_rpc_ops; 40 41 /* current debugging ID */ 42 atomic_t rxrpc_debug_id; 43 EXPORT_SYMBOL(rxrpc_debug_id); 44 45 /* count of skbs currently in use */ 46 atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; 47 48 struct workqueue_struct *rxrpc_workqueue; 49 50 static void rxrpc_sock_destructor(struct sock *); 51 52 /* 53 * see if an RxRPC socket is currently writable 54 */ 55 static inline int rxrpc_writable(struct sock *sk) 56 { 57 return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; 58 } 59 60 /* 61 * wait for write bufferage to become available 62 */ 63 static void rxrpc_write_space(struct sock *sk) 64 { 65 _enter("%p", sk); 66 rcu_read_lock(); 67 if (rxrpc_writable(sk)) { 68 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 69 70 if (skwq_has_sleeper(wq)) 71 wake_up_interruptible(&wq->wait); 72 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 73 } 74 rcu_read_unlock(); 75 } 76 77 /* 78 * validate an RxRPC address 79 */ 80 static int rxrpc_validate_address(struct rxrpc_sock *rx, 81 struct sockaddr_rxrpc *srx, 82 int len) 83 { 84 unsigned int tail; 85 86 if (len < sizeof(struct sockaddr_rxrpc)) 87 return -EINVAL; 88 89 if (srx->srx_family != AF_RXRPC) 90 return -EAFNOSUPPORT; 91 92 if (srx->transport_type != SOCK_DGRAM) 93 return -ESOCKTNOSUPPORT; 94 95 len -= offsetof(struct sockaddr_rxrpc, transport); 96 if (srx->transport_len < sizeof(sa_family_t) || 97 srx->transport_len > len) 98 return -EINVAL; 99 100 if (srx->transport.family != rx->family) 101 return -EAFNOSUPPORT; 102 103 switch (srx->transport.family) { 104 case AF_INET: 105 if (srx->transport_len < sizeof(struct sockaddr_in)) 106 return -EINVAL; 107 tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad); 108 break; 109 110 #ifdef CONFIG_AF_RXRPC_IPV6 111 case AF_INET6: 112 if (srx->transport_len < sizeof(struct sockaddr_in6)) 113 return -EINVAL; 114 tail = offsetof(struct sockaddr_rxrpc, transport) + 115 sizeof(struct sockaddr_in6); 116 break; 117 #endif 118 119 default: 120 return -EAFNOSUPPORT; 121 } 122 123 if (tail < len) 124 memset((void *)srx + tail, 0, len - tail); 125 _debug("INET: %pISp", &srx->transport); 126 return 0; 127 } 128 129 /* 130 * bind a local address to an RxRPC socket 131 */ 132 static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) 133 { 134 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; 135 struct rxrpc_local *local; 136 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 137 u16 service_id = srx->srx_service; 138 int ret; 139 140 _enter("%p,%p,%d", rx, saddr, len); 141 142 ret = rxrpc_validate_address(rx, srx, len); 143 if (ret < 0) 144 goto error; 145 146 lock_sock(&rx->sk); 147 148 switch (rx->sk.sk_state) { 149 case RXRPC_UNBOUND: 150 rx->srx = *srx; 151 local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx); 152 if (IS_ERR(local)) { 153 ret = PTR_ERR(local); 154 goto error_unlock; 155 } 156 157 if (service_id) { 158 write_lock(&local->services_lock); 159 if (rcu_access_pointer(local->service)) 160 goto service_in_use; 161 rx->local = local; 162 rcu_assign_pointer(local->service, rx); 163 write_unlock(&local->services_lock); 164 165 rx->sk.sk_state = RXRPC_SERVER_BOUND; 166 } else { 167 rx->local = local; 168 rx->sk.sk_state = RXRPC_CLIENT_BOUND; 169 } 170 break; 171 172 case RXRPC_SERVER_BOUND: 173 ret = -EINVAL; 174 if (service_id == 0) 175 goto error_unlock; 176 ret = -EADDRINUSE; 177 if (service_id == rx->srx.srx_service) 178 goto error_unlock; 179 ret = -EINVAL; 180 srx->srx_service = rx->srx.srx_service; 181 if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0) 182 goto error_unlock; 183 rx->second_service = service_id; 184 rx->sk.sk_state = RXRPC_SERVER_BOUND2; 185 break; 186 187 default: 188 ret = -EINVAL; 189 goto error_unlock; 190 } 191 192 release_sock(&rx->sk); 193 _leave(" = 0"); 194 return 0; 195 196 service_in_use: 197 write_unlock(&local->services_lock); 198 rxrpc_put_local(local); 199 ret = -EADDRINUSE; 200 error_unlock: 201 release_sock(&rx->sk); 202 error: 203 _leave(" = %d", ret); 204 return ret; 205 } 206 207 /* 208 * set the number of pending calls permitted on a listening socket 209 */ 210 static int rxrpc_listen(struct socket *sock, int backlog) 211 { 212 struct sock *sk = sock->sk; 213 struct rxrpc_sock *rx = rxrpc_sk(sk); 214 unsigned int max, old; 215 int ret; 216 217 _enter("%p,%d", rx, backlog); 218 219 lock_sock(&rx->sk); 220 221 switch (rx->sk.sk_state) { 222 case RXRPC_UNBOUND: 223 ret = -EADDRNOTAVAIL; 224 break; 225 case RXRPC_SERVER_BOUND: 226 case RXRPC_SERVER_BOUND2: 227 ASSERT(rx->local != NULL); 228 max = READ_ONCE(rxrpc_max_backlog); 229 ret = -EINVAL; 230 if (backlog == INT_MAX) 231 backlog = max; 232 else if (backlog < 0 || backlog > max) 233 break; 234 old = sk->sk_max_ack_backlog; 235 sk->sk_max_ack_backlog = backlog; 236 ret = rxrpc_service_prealloc(rx, GFP_KERNEL); 237 if (ret == 0) 238 rx->sk.sk_state = RXRPC_SERVER_LISTENING; 239 else 240 sk->sk_max_ack_backlog = old; 241 break; 242 case RXRPC_SERVER_LISTENING: 243 if (backlog == 0) { 244 rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED; 245 sk->sk_max_ack_backlog = 0; 246 rxrpc_discard_prealloc(rx); 247 ret = 0; 248 break; 249 } 250 /* Fall through */ 251 default: 252 ret = -EBUSY; 253 break; 254 } 255 256 release_sock(&rx->sk); 257 _leave(" = %d", ret); 258 return ret; 259 } 260 261 /** 262 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call 263 * @sock: The socket on which to make the call 264 * @srx: The address of the peer to contact 265 * @key: The security context to use (defaults to socket setting) 266 * @user_call_ID: The ID to use 267 * @tx_total_len: Total length of data to transmit during the call (or -1) 268 * @gfp: The allocation constraints 269 * @notify_rx: Where to send notifications instead of socket queue 270 * @upgrade: Request service upgrade for call 271 * @debug_id: The debug ID for tracing to be assigned to the call 272 * 273 * Allow a kernel service to begin a call on the nominated socket. This just 274 * sets up all the internal tracking structures and allocates connection and 275 * call IDs as appropriate. The call to be used is returned. 276 * 277 * The default socket destination address and security may be overridden by 278 * supplying @srx and @key. 279 */ 280 struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, 281 struct sockaddr_rxrpc *srx, 282 struct key *key, 283 unsigned long user_call_ID, 284 s64 tx_total_len, 285 gfp_t gfp, 286 rxrpc_notify_rx_t notify_rx, 287 bool upgrade, 288 unsigned int debug_id) 289 { 290 struct rxrpc_conn_parameters cp; 291 struct rxrpc_call_params p; 292 struct rxrpc_call *call; 293 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 294 int ret; 295 296 _enter(",,%x,%lx", key_serial(key), user_call_ID); 297 298 ret = rxrpc_validate_address(rx, srx, sizeof(*srx)); 299 if (ret < 0) 300 return ERR_PTR(ret); 301 302 lock_sock(&rx->sk); 303 304 if (!key) 305 key = rx->key; 306 if (key && !key->payload.data[0]) 307 key = NULL; /* a no-security key */ 308 309 memset(&p, 0, sizeof(p)); 310 p.user_call_ID = user_call_ID; 311 p.tx_total_len = tx_total_len; 312 313 memset(&cp, 0, sizeof(cp)); 314 cp.local = rx->local; 315 cp.key = key; 316 cp.security_level = rx->min_sec_level; 317 cp.exclusive = false; 318 cp.upgrade = upgrade; 319 cp.service_id = srx->srx_service; 320 call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id); 321 /* The socket has been unlocked. */ 322 if (!IS_ERR(call)) { 323 call->notify_rx = notify_rx; 324 mutex_unlock(&call->user_mutex); 325 } 326 327 rxrpc_put_peer(cp.peer); 328 _leave(" = %p", call); 329 return call; 330 } 331 EXPORT_SYMBOL(rxrpc_kernel_begin_call); 332 333 /* 334 * Dummy function used to stop the notifier talking to recvmsg(). 335 */ 336 static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall, 337 unsigned long call_user_ID) 338 { 339 } 340 341 /** 342 * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using 343 * @sock: The socket the call is on 344 * @call: The call to end 345 * 346 * Allow a kernel service to end a call it was using. The call must be 347 * complete before this is called (the call should be aborted if necessary). 348 */ 349 void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) 350 { 351 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 352 353 mutex_lock(&call->user_mutex); 354 rxrpc_release_call(rxrpc_sk(sock->sk), call); 355 356 /* Make sure we're not going to call back into a kernel service */ 357 if (call->notify_rx) { 358 spin_lock_bh(&call->notify_lock); 359 call->notify_rx = rxrpc_dummy_notify_rx; 360 spin_unlock_bh(&call->notify_lock); 361 } 362 363 mutex_unlock(&call->user_mutex); 364 rxrpc_put_call(call, rxrpc_call_put_kernel); 365 } 366 EXPORT_SYMBOL(rxrpc_kernel_end_call); 367 368 /** 369 * rxrpc_kernel_check_life - Check to see whether a call is still alive 370 * @sock: The socket the call is on 371 * @call: The call to check 372 * 373 * Allow a kernel service to find out whether a call is still alive - ie. we're 374 * getting ACKs from the server. Returns a number representing the life state 375 * which can be compared to that returned by a previous call. 376 * 377 * If this is a client call, ping ACKs will be sent to the server to find out 378 * whether it's still responsive and whether the call is still alive on the 379 * server. 380 */ 381 u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call) 382 { 383 return call->acks_latest; 384 } 385 EXPORT_SYMBOL(rxrpc_kernel_check_life); 386 387 /** 388 * rxrpc_kernel_check_call - Check a call's state 389 * @sock: The socket the call is on 390 * @call: The call to check 391 * @_compl: Where to store the completion state 392 * @_abort_code: Where to store any abort code 393 * 394 * Allow a kernel service to query the state of a call and find out the manner 395 * of its termination if it has completed. Returns -EINPROGRESS if the call is 396 * still going, 0 if the call finished successfully, -ECONNABORTED if the call 397 * was aborted and an appropriate error if the call failed in some other way. 398 */ 399 int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, 400 enum rxrpc_call_completion *_compl, u32 *_abort_code) 401 { 402 if (call->state != RXRPC_CALL_COMPLETE) 403 return -EINPROGRESS; 404 smp_rmb(); 405 *_compl = call->completion; 406 *_abort_code = call->abort_code; 407 return call->error; 408 } 409 EXPORT_SYMBOL(rxrpc_kernel_check_call); 410 411 /** 412 * rxrpc_kernel_retry_call - Allow a kernel service to retry a call 413 * @sock: The socket the call is on 414 * @call: The call to retry 415 * @srx: The address of the peer to contact 416 * @key: The security context to use (defaults to socket setting) 417 * 418 * Allow a kernel service to try resending a client call that failed due to a 419 * network error to a new address. The Tx queue is maintained intact, thereby 420 * relieving the need to re-encrypt any request data that has already been 421 * buffered. 422 */ 423 int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call, 424 struct sockaddr_rxrpc *srx, struct key *key) 425 { 426 struct rxrpc_conn_parameters cp; 427 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 428 int ret; 429 430 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 431 432 if (!key) 433 key = rx->key; 434 if (key && !key->payload.data[0]) 435 key = NULL; /* a no-security key */ 436 437 memset(&cp, 0, sizeof(cp)); 438 cp.local = rx->local; 439 cp.key = key; 440 cp.security_level = 0; 441 cp.exclusive = false; 442 cp.service_id = srx->srx_service; 443 444 mutex_lock(&call->user_mutex); 445 446 ret = rxrpc_prepare_call_for_retry(rx, call); 447 if (ret == 0) 448 ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL); 449 450 mutex_unlock(&call->user_mutex); 451 rxrpc_put_peer(cp.peer); 452 _leave(" = %d", ret); 453 return ret; 454 } 455 EXPORT_SYMBOL(rxrpc_kernel_retry_call); 456 457 /** 458 * rxrpc_kernel_new_call_notification - Get notifications of new calls 459 * @sock: The socket to intercept received messages on 460 * @notify_new_call: Function to be called when new calls appear 461 * @discard_new_call: Function to discard preallocated calls 462 * 463 * Allow a kernel service to be given notifications about new calls. 464 */ 465 void rxrpc_kernel_new_call_notification( 466 struct socket *sock, 467 rxrpc_notify_new_call_t notify_new_call, 468 rxrpc_discard_new_call_t discard_new_call) 469 { 470 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 471 472 rx->notify_new_call = notify_new_call; 473 rx->discard_new_call = discard_new_call; 474 } 475 EXPORT_SYMBOL(rxrpc_kernel_new_call_notification); 476 477 /* 478 * connect an RxRPC socket 479 * - this just targets it at a specific destination; no actual connection 480 * negotiation takes place 481 */ 482 static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, 483 int addr_len, int flags) 484 { 485 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr; 486 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 487 int ret; 488 489 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); 490 491 ret = rxrpc_validate_address(rx, srx, addr_len); 492 if (ret < 0) { 493 _leave(" = %d [bad addr]", ret); 494 return ret; 495 } 496 497 lock_sock(&rx->sk); 498 499 ret = -EISCONN; 500 if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) 501 goto error; 502 503 switch (rx->sk.sk_state) { 504 case RXRPC_UNBOUND: 505 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND; 506 case RXRPC_CLIENT_UNBOUND: 507 case RXRPC_CLIENT_BOUND: 508 break; 509 default: 510 ret = -EBUSY; 511 goto error; 512 } 513 514 rx->connect_srx = *srx; 515 set_bit(RXRPC_SOCK_CONNECTED, &rx->flags); 516 ret = 0; 517 518 error: 519 release_sock(&rx->sk); 520 return ret; 521 } 522 523 /* 524 * send a message through an RxRPC socket 525 * - in a client this does a number of things: 526 * - finds/sets up a connection for the security specified (if any) 527 * - initiates a call (ID in control data) 528 * - ends the request phase of a call (if MSG_MORE is not set) 529 * - sends a call data packet 530 * - may send an abort (abort code in control data) 531 */ 532 static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 533 { 534 struct rxrpc_local *local; 535 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 536 int ret; 537 538 _enter(",{%d},,%zu", rx->sk.sk_state, len); 539 540 if (m->msg_flags & MSG_OOB) 541 return -EOPNOTSUPP; 542 543 if (m->msg_name) { 544 ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); 545 if (ret < 0) { 546 _leave(" = %d [bad addr]", ret); 547 return ret; 548 } 549 } 550 551 lock_sock(&rx->sk); 552 553 switch (rx->sk.sk_state) { 554 case RXRPC_UNBOUND: 555 rx->srx.srx_family = AF_RXRPC; 556 rx->srx.srx_service = 0; 557 rx->srx.transport_type = SOCK_DGRAM; 558 rx->srx.transport.family = rx->family; 559 switch (rx->family) { 560 case AF_INET: 561 rx->srx.transport_len = sizeof(struct sockaddr_in); 562 break; 563 #ifdef CONFIG_AF_RXRPC_IPV6 564 case AF_INET6: 565 rx->srx.transport_len = sizeof(struct sockaddr_in6); 566 break; 567 #endif 568 default: 569 ret = -EAFNOSUPPORT; 570 goto error_unlock; 571 } 572 local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx); 573 if (IS_ERR(local)) { 574 ret = PTR_ERR(local); 575 goto error_unlock; 576 } 577 578 rx->local = local; 579 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND; 580 /* Fall through */ 581 582 case RXRPC_CLIENT_UNBOUND: 583 case RXRPC_CLIENT_BOUND: 584 if (!m->msg_name && 585 test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) { 586 m->msg_name = &rx->connect_srx; 587 m->msg_namelen = sizeof(rx->connect_srx); 588 } 589 /* Fall through */ 590 case RXRPC_SERVER_BOUND: 591 case RXRPC_SERVER_LISTENING: 592 ret = rxrpc_do_sendmsg(rx, m, len); 593 /* The socket has been unlocked */ 594 goto out; 595 default: 596 ret = -EINVAL; 597 goto error_unlock; 598 } 599 600 error_unlock: 601 release_sock(&rx->sk); 602 out: 603 _leave(" = %d", ret); 604 return ret; 605 } 606 607 /* 608 * set RxRPC socket options 609 */ 610 static int rxrpc_setsockopt(struct socket *sock, int level, int optname, 611 char __user *optval, unsigned int optlen) 612 { 613 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 614 unsigned int min_sec_level; 615 u16 service_upgrade[2]; 616 int ret; 617 618 _enter(",%d,%d,,%d", level, optname, optlen); 619 620 lock_sock(&rx->sk); 621 ret = -EOPNOTSUPP; 622 623 if (level == SOL_RXRPC) { 624 switch (optname) { 625 case RXRPC_EXCLUSIVE_CONNECTION: 626 ret = -EINVAL; 627 if (optlen != 0) 628 goto error; 629 ret = -EISCONN; 630 if (rx->sk.sk_state != RXRPC_UNBOUND) 631 goto error; 632 rx->exclusive = true; 633 goto success; 634 635 case RXRPC_SECURITY_KEY: 636 ret = -EINVAL; 637 if (rx->key) 638 goto error; 639 ret = -EISCONN; 640 if (rx->sk.sk_state != RXRPC_UNBOUND) 641 goto error; 642 ret = rxrpc_request_key(rx, optval, optlen); 643 goto error; 644 645 case RXRPC_SECURITY_KEYRING: 646 ret = -EINVAL; 647 if (rx->key) 648 goto error; 649 ret = -EISCONN; 650 if (rx->sk.sk_state != RXRPC_UNBOUND) 651 goto error; 652 ret = rxrpc_server_keyring(rx, optval, optlen); 653 goto error; 654 655 case RXRPC_MIN_SECURITY_LEVEL: 656 ret = -EINVAL; 657 if (optlen != sizeof(unsigned int)) 658 goto error; 659 ret = -EISCONN; 660 if (rx->sk.sk_state != RXRPC_UNBOUND) 661 goto error; 662 ret = get_user(min_sec_level, 663 (unsigned int __user *) optval); 664 if (ret < 0) 665 goto error; 666 ret = -EINVAL; 667 if (min_sec_level > RXRPC_SECURITY_MAX) 668 goto error; 669 rx->min_sec_level = min_sec_level; 670 goto success; 671 672 case RXRPC_UPGRADEABLE_SERVICE: 673 ret = -EINVAL; 674 if (optlen != sizeof(service_upgrade) || 675 rx->service_upgrade.from != 0) 676 goto error; 677 ret = -EISCONN; 678 if (rx->sk.sk_state != RXRPC_SERVER_BOUND2) 679 goto error; 680 ret = -EFAULT; 681 if (copy_from_user(service_upgrade, optval, 682 sizeof(service_upgrade)) != 0) 683 goto error; 684 ret = -EINVAL; 685 if ((service_upgrade[0] != rx->srx.srx_service || 686 service_upgrade[1] != rx->second_service) && 687 (service_upgrade[0] != rx->second_service || 688 service_upgrade[1] != rx->srx.srx_service)) 689 goto error; 690 rx->service_upgrade.from = service_upgrade[0]; 691 rx->service_upgrade.to = service_upgrade[1]; 692 goto success; 693 694 default: 695 break; 696 } 697 } 698 699 success: 700 ret = 0; 701 error: 702 release_sock(&rx->sk); 703 return ret; 704 } 705 706 /* 707 * Get socket options. 708 */ 709 static int rxrpc_getsockopt(struct socket *sock, int level, int optname, 710 char __user *optval, int __user *_optlen) 711 { 712 int optlen; 713 714 if (level != SOL_RXRPC) 715 return -EOPNOTSUPP; 716 717 if (get_user(optlen, _optlen)) 718 return -EFAULT; 719 720 switch (optname) { 721 case RXRPC_SUPPORTED_CMSG: 722 if (optlen < sizeof(int)) 723 return -ETOOSMALL; 724 if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) || 725 put_user(sizeof(int), _optlen)) 726 return -EFAULT; 727 return 0; 728 729 default: 730 return -EOPNOTSUPP; 731 } 732 } 733 734 /* 735 * permit an RxRPC socket to be polled 736 */ 737 static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events) 738 { 739 struct sock *sk = sock->sk; 740 struct rxrpc_sock *rx = rxrpc_sk(sk); 741 __poll_t mask = 0; 742 743 /* the socket is readable if there are any messages waiting on the Rx 744 * queue */ 745 if (!list_empty(&rx->recvmsg_q)) 746 mask |= EPOLLIN | EPOLLRDNORM; 747 748 /* the socket is writable if there is space to add new data to the 749 * socket; there is no guarantee that any particular call in progress 750 * on the socket may have space in the Tx ACK window */ 751 if (rxrpc_writable(sk)) 752 mask |= EPOLLOUT | EPOLLWRNORM; 753 754 return mask; 755 } 756 757 /* 758 * create an RxRPC socket 759 */ 760 static int rxrpc_create(struct net *net, struct socket *sock, int protocol, 761 int kern) 762 { 763 struct rxrpc_net *rxnet; 764 struct rxrpc_sock *rx; 765 struct sock *sk; 766 767 _enter("%p,%d", sock, protocol); 768 769 /* we support transport protocol UDP/UDP6 only */ 770 if (protocol != PF_INET && 771 IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6) 772 return -EPROTONOSUPPORT; 773 774 if (sock->type != SOCK_DGRAM) 775 return -ESOCKTNOSUPPORT; 776 777 sock->ops = &rxrpc_rpc_ops; 778 sock->state = SS_UNCONNECTED; 779 780 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern); 781 if (!sk) 782 return -ENOMEM; 783 784 sock_init_data(sock, sk); 785 sock_set_flag(sk, SOCK_RCU_FREE); 786 sk->sk_state = RXRPC_UNBOUND; 787 sk->sk_write_space = rxrpc_write_space; 788 sk->sk_max_ack_backlog = 0; 789 sk->sk_destruct = rxrpc_sock_destructor; 790 791 rx = rxrpc_sk(sk); 792 rx->family = protocol; 793 rx->calls = RB_ROOT; 794 795 spin_lock_init(&rx->incoming_lock); 796 INIT_LIST_HEAD(&rx->sock_calls); 797 INIT_LIST_HEAD(&rx->to_be_accepted); 798 INIT_LIST_HEAD(&rx->recvmsg_q); 799 rwlock_init(&rx->recvmsg_lock); 800 rwlock_init(&rx->call_lock); 801 memset(&rx->srx, 0, sizeof(rx->srx)); 802 803 rxnet = rxrpc_net(sock_net(&rx->sk)); 804 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1); 805 806 _leave(" = 0 [%p]", rx); 807 return 0; 808 } 809 810 /* 811 * Kill all the calls on a socket and shut it down. 812 */ 813 static int rxrpc_shutdown(struct socket *sock, int flags) 814 { 815 struct sock *sk = sock->sk; 816 struct rxrpc_sock *rx = rxrpc_sk(sk); 817 int ret = 0; 818 819 _enter("%p,%d", sk, flags); 820 821 if (flags != SHUT_RDWR) 822 return -EOPNOTSUPP; 823 if (sk->sk_state == RXRPC_CLOSE) 824 return -ESHUTDOWN; 825 826 lock_sock(sk); 827 828 spin_lock_bh(&sk->sk_receive_queue.lock); 829 if (sk->sk_state < RXRPC_CLOSE) { 830 sk->sk_state = RXRPC_CLOSE; 831 sk->sk_shutdown = SHUTDOWN_MASK; 832 } else { 833 ret = -ESHUTDOWN; 834 } 835 spin_unlock_bh(&sk->sk_receive_queue.lock); 836 837 rxrpc_discard_prealloc(rx); 838 839 release_sock(sk); 840 return ret; 841 } 842 843 /* 844 * RxRPC socket destructor 845 */ 846 static void rxrpc_sock_destructor(struct sock *sk) 847 { 848 _enter("%p", sk); 849 850 rxrpc_purge_queue(&sk->sk_receive_queue); 851 852 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 853 WARN_ON(!sk_unhashed(sk)); 854 WARN_ON(sk->sk_socket); 855 856 if (!sock_flag(sk, SOCK_DEAD)) { 857 printk("Attempt to release alive rxrpc socket: %p\n", sk); 858 return; 859 } 860 } 861 862 /* 863 * release an RxRPC socket 864 */ 865 static int rxrpc_release_sock(struct sock *sk) 866 { 867 struct rxrpc_sock *rx = rxrpc_sk(sk); 868 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 869 870 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 871 872 /* declare the socket closed for business */ 873 sock_orphan(sk); 874 sk->sk_shutdown = SHUTDOWN_MASK; 875 876 /* We want to kill off all connections from a service socket 877 * as fast as possible because we can't share these; client 878 * sockets, on the other hand, can share an endpoint. 879 */ 880 switch (sk->sk_state) { 881 case RXRPC_SERVER_BOUND: 882 case RXRPC_SERVER_BOUND2: 883 case RXRPC_SERVER_LISTENING: 884 case RXRPC_SERVER_LISTEN_DISABLED: 885 rx->local->service_closed = true; 886 break; 887 } 888 889 spin_lock_bh(&sk->sk_receive_queue.lock); 890 sk->sk_state = RXRPC_CLOSE; 891 spin_unlock_bh(&sk->sk_receive_queue.lock); 892 893 if (rx->local && rcu_access_pointer(rx->local->service) == rx) { 894 write_lock(&rx->local->services_lock); 895 rcu_assign_pointer(rx->local->service, NULL); 896 write_unlock(&rx->local->services_lock); 897 } 898 899 /* try to flush out this socket */ 900 rxrpc_discard_prealloc(rx); 901 rxrpc_release_calls_on_socket(rx); 902 flush_workqueue(rxrpc_workqueue); 903 rxrpc_purge_queue(&sk->sk_receive_queue); 904 rxrpc_queue_work(&rxnet->service_conn_reaper); 905 rxrpc_queue_work(&rxnet->client_conn_reaper); 906 907 rxrpc_put_local(rx->local); 908 rx->local = NULL; 909 key_put(rx->key); 910 rx->key = NULL; 911 key_put(rx->securities); 912 rx->securities = NULL; 913 sock_put(sk); 914 915 _leave(" = 0"); 916 return 0; 917 } 918 919 /* 920 * release an RxRPC BSD socket on close() or equivalent 921 */ 922 static int rxrpc_release(struct socket *sock) 923 { 924 struct sock *sk = sock->sk; 925 926 _enter("%p{%p}", sock, sk); 927 928 if (!sk) 929 return 0; 930 931 sock->sk = NULL; 932 933 return rxrpc_release_sock(sk); 934 } 935 936 /* 937 * RxRPC network protocol 938 */ 939 static const struct proto_ops rxrpc_rpc_ops = { 940 .family = PF_RXRPC, 941 .owner = THIS_MODULE, 942 .release = rxrpc_release, 943 .bind = rxrpc_bind, 944 .connect = rxrpc_connect, 945 .socketpair = sock_no_socketpair, 946 .accept = sock_no_accept, 947 .getname = sock_no_getname, 948 .poll_mask = rxrpc_poll_mask, 949 .ioctl = sock_no_ioctl, 950 .listen = rxrpc_listen, 951 .shutdown = rxrpc_shutdown, 952 .setsockopt = rxrpc_setsockopt, 953 .getsockopt = rxrpc_getsockopt, 954 .sendmsg = rxrpc_sendmsg, 955 .recvmsg = rxrpc_recvmsg, 956 .mmap = sock_no_mmap, 957 .sendpage = sock_no_sendpage, 958 }; 959 960 static struct proto rxrpc_proto = { 961 .name = "RXRPC", 962 .owner = THIS_MODULE, 963 .obj_size = sizeof(struct rxrpc_sock), 964 .max_header = sizeof(struct rxrpc_wire_header), 965 }; 966 967 static const struct net_proto_family rxrpc_family_ops = { 968 .family = PF_RXRPC, 969 .create = rxrpc_create, 970 .owner = THIS_MODULE, 971 }; 972 973 /* 974 * initialise and register the RxRPC protocol 975 */ 976 static int __init af_rxrpc_init(void) 977 { 978 int ret = -1; 979 unsigned int tmp; 980 981 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); 982 983 get_random_bytes(&tmp, sizeof(tmp)); 984 tmp &= 0x3fffffff; 985 if (tmp == 0) 986 tmp = 1; 987 idr_set_cursor(&rxrpc_client_conn_ids, tmp); 988 989 ret = -ENOMEM; 990 rxrpc_call_jar = kmem_cache_create( 991 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 992 SLAB_HWCACHE_ALIGN, NULL); 993 if (!rxrpc_call_jar) { 994 pr_notice("Failed to allocate call jar\n"); 995 goto error_call_jar; 996 } 997 998 rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); 999 if (!rxrpc_workqueue) { 1000 pr_notice("Failed to allocate work queue\n"); 1001 goto error_work_queue; 1002 } 1003 1004 ret = rxrpc_init_security(); 1005 if (ret < 0) { 1006 pr_crit("Cannot initialise security\n"); 1007 goto error_security; 1008 } 1009 1010 ret = register_pernet_subsys(&rxrpc_net_ops); 1011 if (ret) 1012 goto error_pernet; 1013 1014 ret = proto_register(&rxrpc_proto, 1); 1015 if (ret < 0) { 1016 pr_crit("Cannot register protocol\n"); 1017 goto error_proto; 1018 } 1019 1020 ret = sock_register(&rxrpc_family_ops); 1021 if (ret < 0) { 1022 pr_crit("Cannot register socket family\n"); 1023 goto error_sock; 1024 } 1025 1026 ret = register_key_type(&key_type_rxrpc); 1027 if (ret < 0) { 1028 pr_crit("Cannot register client key type\n"); 1029 goto error_key_type; 1030 } 1031 1032 ret = register_key_type(&key_type_rxrpc_s); 1033 if (ret < 0) { 1034 pr_crit("Cannot register server key type\n"); 1035 goto error_key_type_s; 1036 } 1037 1038 ret = rxrpc_sysctl_init(); 1039 if (ret < 0) { 1040 pr_crit("Cannot register sysctls\n"); 1041 goto error_sysctls; 1042 } 1043 1044 return 0; 1045 1046 error_sysctls: 1047 unregister_key_type(&key_type_rxrpc_s); 1048 error_key_type_s: 1049 unregister_key_type(&key_type_rxrpc); 1050 error_key_type: 1051 sock_unregister(PF_RXRPC); 1052 error_sock: 1053 proto_unregister(&rxrpc_proto); 1054 error_proto: 1055 unregister_pernet_subsys(&rxrpc_net_ops); 1056 error_pernet: 1057 rxrpc_exit_security(); 1058 error_security: 1059 destroy_workqueue(rxrpc_workqueue); 1060 error_work_queue: 1061 kmem_cache_destroy(rxrpc_call_jar); 1062 error_call_jar: 1063 return ret; 1064 } 1065 1066 /* 1067 * unregister the RxRPC protocol 1068 */ 1069 static void __exit af_rxrpc_exit(void) 1070 { 1071 _enter(""); 1072 rxrpc_sysctl_exit(); 1073 unregister_key_type(&key_type_rxrpc_s); 1074 unregister_key_type(&key_type_rxrpc); 1075 sock_unregister(PF_RXRPC); 1076 proto_unregister(&rxrpc_proto); 1077 unregister_pernet_subsys(&rxrpc_net_ops); 1078 ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); 1079 ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); 1080 1081 /* Make sure the local and peer records pinned by any dying connections 1082 * are released. 1083 */ 1084 rcu_barrier(); 1085 rxrpc_destroy_client_conn_ids(); 1086 1087 destroy_workqueue(rxrpc_workqueue); 1088 rxrpc_exit_security(); 1089 kmem_cache_destroy(rxrpc_call_jar); 1090 _leave(""); 1091 } 1092 1093 module_init(af_rxrpc_init); 1094 module_exit(af_rxrpc_exit); 1095