1 /* AF_RXRPC implementation 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/net.h> 17 #include <linux/slab.h> 18 #include <linux/skbuff.h> 19 #include <linux/random.h> 20 #include <linux/poll.h> 21 #include <linux/proc_fs.h> 22 #include <linux/key-type.h> 23 #include <net/net_namespace.h> 24 #include <net/sock.h> 25 #include <net/af_rxrpc.h> 26 #define CREATE_TRACE_POINTS 27 #include "ar-internal.h" 28 29 MODULE_DESCRIPTION("RxRPC network protocol"); 30 MODULE_AUTHOR("Red Hat, Inc."); 31 MODULE_LICENSE("GPL"); 32 MODULE_ALIAS_NETPROTO(PF_RXRPC); 33 34 unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO; 35 module_param_named(debug, rxrpc_debug, uint, 0644); 36 MODULE_PARM_DESC(debug, "RxRPC debugging mask"); 37 38 static struct proto rxrpc_proto; 39 static const struct proto_ops rxrpc_rpc_ops; 40 41 /* current debugging ID */ 42 atomic_t rxrpc_debug_id; 43 EXPORT_SYMBOL(rxrpc_debug_id); 44 45 /* count of skbs currently in use */ 46 atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; 47 48 struct workqueue_struct *rxrpc_workqueue; 49 50 static void rxrpc_sock_destructor(struct sock *); 51 52 /* 53 * see if an RxRPC socket is currently writable 54 */ 55 static inline int rxrpc_writable(struct sock *sk) 56 { 57 return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; 58 } 59 60 /* 61 * wait for write bufferage to become available 62 */ 63 static void rxrpc_write_space(struct sock *sk) 64 { 65 _enter("%p", sk); 66 rcu_read_lock(); 67 if (rxrpc_writable(sk)) { 68 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 69 70 if (skwq_has_sleeper(wq)) 71 wake_up_interruptible(&wq->wait); 72 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 73 } 74 rcu_read_unlock(); 75 } 76 77 /* 78 * validate an RxRPC address 79 */ 80 static int rxrpc_validate_address(struct rxrpc_sock *rx, 81 struct sockaddr_rxrpc *srx, 82 int len) 83 { 84 unsigned int tail; 85 86 if (len < sizeof(struct sockaddr_rxrpc)) 87 return -EINVAL; 88 89 if (srx->srx_family != AF_RXRPC) 90 return -EAFNOSUPPORT; 91 92 if (srx->transport_type != SOCK_DGRAM) 93 return -ESOCKTNOSUPPORT; 94 95 len -= offsetof(struct sockaddr_rxrpc, transport); 96 if (srx->transport_len < sizeof(sa_family_t) || 97 srx->transport_len > len) 98 return -EINVAL; 99 100 if (srx->transport.family != rx->family && 101 srx->transport.family == AF_INET && rx->family != AF_INET6) 102 return -EAFNOSUPPORT; 103 104 switch (srx->transport.family) { 105 case AF_INET: 106 if (srx->transport_len < sizeof(struct sockaddr_in)) 107 return -EINVAL; 108 tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad); 109 break; 110 111 #ifdef CONFIG_AF_RXRPC_IPV6 112 case AF_INET6: 113 if (srx->transport_len < sizeof(struct sockaddr_in6)) 114 return -EINVAL; 115 tail = offsetof(struct sockaddr_rxrpc, transport) + 116 sizeof(struct sockaddr_in6); 117 break; 118 #endif 119 120 default: 121 return -EAFNOSUPPORT; 122 } 123 124 if (tail < len) 125 memset((void *)srx + tail, 0, len - tail); 126 _debug("INET: %pISp", &srx->transport); 127 return 0; 128 } 129 130 /* 131 * bind a local address to an RxRPC socket 132 */ 133 static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) 134 { 135 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; 136 struct rxrpc_local *local; 137 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 138 u16 service_id = srx->srx_service; 139 int ret; 140 141 _enter("%p,%p,%d", rx, saddr, len); 142 143 ret = rxrpc_validate_address(rx, srx, len); 144 if (ret < 0) 145 goto error; 146 147 lock_sock(&rx->sk); 148 149 switch (rx->sk.sk_state) { 150 case RXRPC_UNBOUND: 151 rx->srx = *srx; 152 local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx); 153 if (IS_ERR(local)) { 154 ret = PTR_ERR(local); 155 goto error_unlock; 156 } 157 158 if (service_id) { 159 write_lock(&local->services_lock); 160 if (rcu_access_pointer(local->service)) 161 goto service_in_use; 162 rx->local = local; 163 rcu_assign_pointer(local->service, rx); 164 write_unlock(&local->services_lock); 165 166 rx->sk.sk_state = RXRPC_SERVER_BOUND; 167 } else { 168 rx->local = local; 169 rx->sk.sk_state = RXRPC_CLIENT_BOUND; 170 } 171 break; 172 173 case RXRPC_SERVER_BOUND: 174 ret = -EINVAL; 175 if (service_id == 0) 176 goto error_unlock; 177 ret = -EADDRINUSE; 178 if (service_id == rx->srx.srx_service) 179 goto error_unlock; 180 ret = -EINVAL; 181 srx->srx_service = rx->srx.srx_service; 182 if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0) 183 goto error_unlock; 184 rx->second_service = service_id; 185 rx->sk.sk_state = RXRPC_SERVER_BOUND2; 186 break; 187 188 default: 189 ret = -EINVAL; 190 goto error_unlock; 191 } 192 193 release_sock(&rx->sk); 194 _leave(" = 0"); 195 return 0; 196 197 service_in_use: 198 write_unlock(&local->services_lock); 199 rxrpc_put_local(local); 200 ret = -EADDRINUSE; 201 error_unlock: 202 release_sock(&rx->sk); 203 error: 204 _leave(" = %d", ret); 205 return ret; 206 } 207 208 /* 209 * set the number of pending calls permitted on a listening socket 210 */ 211 static int rxrpc_listen(struct socket *sock, int backlog) 212 { 213 struct sock *sk = sock->sk; 214 struct rxrpc_sock *rx = rxrpc_sk(sk); 215 unsigned int max, old; 216 int ret; 217 218 _enter("%p,%d", rx, backlog); 219 220 lock_sock(&rx->sk); 221 222 switch (rx->sk.sk_state) { 223 case RXRPC_UNBOUND: 224 ret = -EADDRNOTAVAIL; 225 break; 226 case RXRPC_SERVER_BOUND: 227 case RXRPC_SERVER_BOUND2: 228 ASSERT(rx->local != NULL); 229 max = READ_ONCE(rxrpc_max_backlog); 230 ret = -EINVAL; 231 if (backlog == INT_MAX) 232 backlog = max; 233 else if (backlog < 0 || backlog > max) 234 break; 235 old = sk->sk_max_ack_backlog; 236 sk->sk_max_ack_backlog = backlog; 237 ret = rxrpc_service_prealloc(rx, GFP_KERNEL); 238 if (ret == 0) 239 rx->sk.sk_state = RXRPC_SERVER_LISTENING; 240 else 241 sk->sk_max_ack_backlog = old; 242 break; 243 case RXRPC_SERVER_LISTENING: 244 if (backlog == 0) { 245 rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED; 246 sk->sk_max_ack_backlog = 0; 247 rxrpc_discard_prealloc(rx); 248 ret = 0; 249 break; 250 } 251 /* Fall through */ 252 default: 253 ret = -EBUSY; 254 break; 255 } 256 257 release_sock(&rx->sk); 258 _leave(" = %d", ret); 259 return ret; 260 } 261 262 /** 263 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call 264 * @sock: The socket on which to make the call 265 * @srx: The address of the peer to contact 266 * @key: The security context to use (defaults to socket setting) 267 * @user_call_ID: The ID to use 268 * @tx_total_len: Total length of data to transmit during the call (or -1) 269 * @gfp: The allocation constraints 270 * @notify_rx: Where to send notifications instead of socket queue 271 * @upgrade: Request service upgrade for call 272 * @debug_id: The debug ID for tracing to be assigned to the call 273 * 274 * Allow a kernel service to begin a call on the nominated socket. This just 275 * sets up all the internal tracking structures and allocates connection and 276 * call IDs as appropriate. The call to be used is returned. 277 * 278 * The default socket destination address and security may be overridden by 279 * supplying @srx and @key. 280 */ 281 struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, 282 struct sockaddr_rxrpc *srx, 283 struct key *key, 284 unsigned long user_call_ID, 285 s64 tx_total_len, 286 gfp_t gfp, 287 rxrpc_notify_rx_t notify_rx, 288 bool upgrade, 289 unsigned int debug_id) 290 { 291 struct rxrpc_conn_parameters cp; 292 struct rxrpc_call_params p; 293 struct rxrpc_call *call; 294 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 295 int ret; 296 297 _enter(",,%x,%lx", key_serial(key), user_call_ID); 298 299 ret = rxrpc_validate_address(rx, srx, sizeof(*srx)); 300 if (ret < 0) 301 return ERR_PTR(ret); 302 303 lock_sock(&rx->sk); 304 305 if (!key) 306 key = rx->key; 307 if (key && !key->payload.data[0]) 308 key = NULL; /* a no-security key */ 309 310 memset(&p, 0, sizeof(p)); 311 p.user_call_ID = user_call_ID; 312 p.tx_total_len = tx_total_len; 313 314 memset(&cp, 0, sizeof(cp)); 315 cp.local = rx->local; 316 cp.key = key; 317 cp.security_level = rx->min_sec_level; 318 cp.exclusive = false; 319 cp.upgrade = upgrade; 320 cp.service_id = srx->srx_service; 321 call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id); 322 /* The socket has been unlocked. */ 323 if (!IS_ERR(call)) { 324 call->notify_rx = notify_rx; 325 mutex_unlock(&call->user_mutex); 326 } 327 328 rxrpc_put_peer(cp.peer); 329 _leave(" = %p", call); 330 return call; 331 } 332 EXPORT_SYMBOL(rxrpc_kernel_begin_call); 333 334 /* 335 * Dummy function used to stop the notifier talking to recvmsg(). 336 */ 337 static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall, 338 unsigned long call_user_ID) 339 { 340 } 341 342 /** 343 * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using 344 * @sock: The socket the call is on 345 * @call: The call to end 346 * 347 * Allow a kernel service to end a call it was using. The call must be 348 * complete before this is called (the call should be aborted if necessary). 349 */ 350 void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) 351 { 352 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 353 354 mutex_lock(&call->user_mutex); 355 rxrpc_release_call(rxrpc_sk(sock->sk), call); 356 357 /* Make sure we're not going to call back into a kernel service */ 358 if (call->notify_rx) { 359 spin_lock_bh(&call->notify_lock); 360 call->notify_rx = rxrpc_dummy_notify_rx; 361 spin_unlock_bh(&call->notify_lock); 362 } 363 364 mutex_unlock(&call->user_mutex); 365 rxrpc_put_call(call, rxrpc_call_put_kernel); 366 } 367 EXPORT_SYMBOL(rxrpc_kernel_end_call); 368 369 /** 370 * rxrpc_kernel_check_life - Check to see whether a call is still alive 371 * @sock: The socket the call is on 372 * @call: The call to check 373 * 374 * Allow a kernel service to find out whether a call is still alive - ie. we're 375 * getting ACKs from the server. Returns a number representing the life state 376 * which can be compared to that returned by a previous call. 377 * 378 * If the life state stalls, rxrpc_kernel_probe_life() should be called and 379 * then 2RTT waited. 380 */ 381 u32 rxrpc_kernel_check_life(const struct socket *sock, 382 const struct rxrpc_call *call) 383 { 384 return call->acks_latest; 385 } 386 EXPORT_SYMBOL(rxrpc_kernel_check_life); 387 388 /** 389 * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive 390 * @sock: The socket the call is on 391 * @call: The call to check 392 * 393 * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to 394 * find out whether a call is still alive by pinging it. This should cause the 395 * life state to be bumped in about 2*RTT. 396 * 397 * The must be called in TASK_RUNNING state on pain of might_sleep() objecting. 398 */ 399 void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) 400 { 401 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 402 rxrpc_propose_ack_ping_for_check_life); 403 rxrpc_send_ack_packet(call, true, NULL); 404 } 405 EXPORT_SYMBOL(rxrpc_kernel_probe_life); 406 407 /** 408 * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. 409 * @sock: The socket the call is on 410 * @call: The call to query 411 * 412 * Allow a kernel service to retrieve the epoch value from a service call to 413 * see if the client at the other end rebooted. 414 */ 415 u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call) 416 { 417 return call->conn->proto.epoch; 418 } 419 EXPORT_SYMBOL(rxrpc_kernel_get_epoch); 420 421 /** 422 * rxrpc_kernel_check_call - Check a call's state 423 * @sock: The socket the call is on 424 * @call: The call to check 425 * @_compl: Where to store the completion state 426 * @_abort_code: Where to store any abort code 427 * 428 * Allow a kernel service to query the state of a call and find out the manner 429 * of its termination if it has completed. Returns -EINPROGRESS if the call is 430 * still going, 0 if the call finished successfully, -ECONNABORTED if the call 431 * was aborted and an appropriate error if the call failed in some other way. 432 */ 433 int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, 434 enum rxrpc_call_completion *_compl, u32 *_abort_code) 435 { 436 if (call->state != RXRPC_CALL_COMPLETE) 437 return -EINPROGRESS; 438 smp_rmb(); 439 *_compl = call->completion; 440 *_abort_code = call->abort_code; 441 return call->error; 442 } 443 EXPORT_SYMBOL(rxrpc_kernel_check_call); 444 445 /** 446 * rxrpc_kernel_retry_call - Allow a kernel service to retry a call 447 * @sock: The socket the call is on 448 * @call: The call to retry 449 * @srx: The address of the peer to contact 450 * @key: The security context to use (defaults to socket setting) 451 * 452 * Allow a kernel service to try resending a client call that failed due to a 453 * network error to a new address. The Tx queue is maintained intact, thereby 454 * relieving the need to re-encrypt any request data that has already been 455 * buffered. 456 */ 457 int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call, 458 struct sockaddr_rxrpc *srx, struct key *key) 459 { 460 struct rxrpc_conn_parameters cp; 461 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 462 int ret; 463 464 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 465 466 if (!key) 467 key = rx->key; 468 if (key && !key->payload.data[0]) 469 key = NULL; /* a no-security key */ 470 471 memset(&cp, 0, sizeof(cp)); 472 cp.local = rx->local; 473 cp.key = key; 474 cp.security_level = 0; 475 cp.exclusive = false; 476 cp.service_id = srx->srx_service; 477 478 mutex_lock(&call->user_mutex); 479 480 ret = rxrpc_prepare_call_for_retry(rx, call); 481 if (ret == 0) 482 ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL); 483 484 mutex_unlock(&call->user_mutex); 485 rxrpc_put_peer(cp.peer); 486 _leave(" = %d", ret); 487 return ret; 488 } 489 EXPORT_SYMBOL(rxrpc_kernel_retry_call); 490 491 /** 492 * rxrpc_kernel_new_call_notification - Get notifications of new calls 493 * @sock: The socket to intercept received messages on 494 * @notify_new_call: Function to be called when new calls appear 495 * @discard_new_call: Function to discard preallocated calls 496 * 497 * Allow a kernel service to be given notifications about new calls. 498 */ 499 void rxrpc_kernel_new_call_notification( 500 struct socket *sock, 501 rxrpc_notify_new_call_t notify_new_call, 502 rxrpc_discard_new_call_t discard_new_call) 503 { 504 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 505 506 rx->notify_new_call = notify_new_call; 507 rx->discard_new_call = discard_new_call; 508 } 509 EXPORT_SYMBOL(rxrpc_kernel_new_call_notification); 510 511 /* 512 * connect an RxRPC socket 513 * - this just targets it at a specific destination; no actual connection 514 * negotiation takes place 515 */ 516 static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, 517 int addr_len, int flags) 518 { 519 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr; 520 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 521 int ret; 522 523 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); 524 525 ret = rxrpc_validate_address(rx, srx, addr_len); 526 if (ret < 0) { 527 _leave(" = %d [bad addr]", ret); 528 return ret; 529 } 530 531 lock_sock(&rx->sk); 532 533 ret = -EISCONN; 534 if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) 535 goto error; 536 537 switch (rx->sk.sk_state) { 538 case RXRPC_UNBOUND: 539 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND; 540 case RXRPC_CLIENT_UNBOUND: 541 case RXRPC_CLIENT_BOUND: 542 break; 543 default: 544 ret = -EBUSY; 545 goto error; 546 } 547 548 rx->connect_srx = *srx; 549 set_bit(RXRPC_SOCK_CONNECTED, &rx->flags); 550 ret = 0; 551 552 error: 553 release_sock(&rx->sk); 554 return ret; 555 } 556 557 /* 558 * send a message through an RxRPC socket 559 * - in a client this does a number of things: 560 * - finds/sets up a connection for the security specified (if any) 561 * - initiates a call (ID in control data) 562 * - ends the request phase of a call (if MSG_MORE is not set) 563 * - sends a call data packet 564 * - may send an abort (abort code in control data) 565 */ 566 static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 567 { 568 struct rxrpc_local *local; 569 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 570 int ret; 571 572 _enter(",{%d},,%zu", rx->sk.sk_state, len); 573 574 if (m->msg_flags & MSG_OOB) 575 return -EOPNOTSUPP; 576 577 if (m->msg_name) { 578 ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); 579 if (ret < 0) { 580 _leave(" = %d [bad addr]", ret); 581 return ret; 582 } 583 } 584 585 lock_sock(&rx->sk); 586 587 switch (rx->sk.sk_state) { 588 case RXRPC_UNBOUND: 589 rx->srx.srx_family = AF_RXRPC; 590 rx->srx.srx_service = 0; 591 rx->srx.transport_type = SOCK_DGRAM; 592 rx->srx.transport.family = rx->family; 593 switch (rx->family) { 594 case AF_INET: 595 rx->srx.transport_len = sizeof(struct sockaddr_in); 596 break; 597 #ifdef CONFIG_AF_RXRPC_IPV6 598 case AF_INET6: 599 rx->srx.transport_len = sizeof(struct sockaddr_in6); 600 break; 601 #endif 602 default: 603 ret = -EAFNOSUPPORT; 604 goto error_unlock; 605 } 606 local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx); 607 if (IS_ERR(local)) { 608 ret = PTR_ERR(local); 609 goto error_unlock; 610 } 611 612 rx->local = local; 613 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND; 614 /* Fall through */ 615 616 case RXRPC_CLIENT_UNBOUND: 617 case RXRPC_CLIENT_BOUND: 618 if (!m->msg_name && 619 test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) { 620 m->msg_name = &rx->connect_srx; 621 m->msg_namelen = sizeof(rx->connect_srx); 622 } 623 /* Fall through */ 624 case RXRPC_SERVER_BOUND: 625 case RXRPC_SERVER_LISTENING: 626 ret = rxrpc_do_sendmsg(rx, m, len); 627 /* The socket has been unlocked */ 628 goto out; 629 default: 630 ret = -EINVAL; 631 goto error_unlock; 632 } 633 634 error_unlock: 635 release_sock(&rx->sk); 636 out: 637 _leave(" = %d", ret); 638 return ret; 639 } 640 641 /* 642 * set RxRPC socket options 643 */ 644 static int rxrpc_setsockopt(struct socket *sock, int level, int optname, 645 char __user *optval, unsigned int optlen) 646 { 647 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 648 unsigned int min_sec_level; 649 u16 service_upgrade[2]; 650 int ret; 651 652 _enter(",%d,%d,,%d", level, optname, optlen); 653 654 lock_sock(&rx->sk); 655 ret = -EOPNOTSUPP; 656 657 if (level == SOL_RXRPC) { 658 switch (optname) { 659 case RXRPC_EXCLUSIVE_CONNECTION: 660 ret = -EINVAL; 661 if (optlen != 0) 662 goto error; 663 ret = -EISCONN; 664 if (rx->sk.sk_state != RXRPC_UNBOUND) 665 goto error; 666 rx->exclusive = true; 667 goto success; 668 669 case RXRPC_SECURITY_KEY: 670 ret = -EINVAL; 671 if (rx->key) 672 goto error; 673 ret = -EISCONN; 674 if (rx->sk.sk_state != RXRPC_UNBOUND) 675 goto error; 676 ret = rxrpc_request_key(rx, optval, optlen); 677 goto error; 678 679 case RXRPC_SECURITY_KEYRING: 680 ret = -EINVAL; 681 if (rx->key) 682 goto error; 683 ret = -EISCONN; 684 if (rx->sk.sk_state != RXRPC_UNBOUND) 685 goto error; 686 ret = rxrpc_server_keyring(rx, optval, optlen); 687 goto error; 688 689 case RXRPC_MIN_SECURITY_LEVEL: 690 ret = -EINVAL; 691 if (optlen != sizeof(unsigned int)) 692 goto error; 693 ret = -EISCONN; 694 if (rx->sk.sk_state != RXRPC_UNBOUND) 695 goto error; 696 ret = get_user(min_sec_level, 697 (unsigned int __user *) optval); 698 if (ret < 0) 699 goto error; 700 ret = -EINVAL; 701 if (min_sec_level > RXRPC_SECURITY_MAX) 702 goto error; 703 rx->min_sec_level = min_sec_level; 704 goto success; 705 706 case RXRPC_UPGRADEABLE_SERVICE: 707 ret = -EINVAL; 708 if (optlen != sizeof(service_upgrade) || 709 rx->service_upgrade.from != 0) 710 goto error; 711 ret = -EISCONN; 712 if (rx->sk.sk_state != RXRPC_SERVER_BOUND2) 713 goto error; 714 ret = -EFAULT; 715 if (copy_from_user(service_upgrade, optval, 716 sizeof(service_upgrade)) != 0) 717 goto error; 718 ret = -EINVAL; 719 if ((service_upgrade[0] != rx->srx.srx_service || 720 service_upgrade[1] != rx->second_service) && 721 (service_upgrade[0] != rx->second_service || 722 service_upgrade[1] != rx->srx.srx_service)) 723 goto error; 724 rx->service_upgrade.from = service_upgrade[0]; 725 rx->service_upgrade.to = service_upgrade[1]; 726 goto success; 727 728 default: 729 break; 730 } 731 } 732 733 success: 734 ret = 0; 735 error: 736 release_sock(&rx->sk); 737 return ret; 738 } 739 740 /* 741 * Get socket options. 742 */ 743 static int rxrpc_getsockopt(struct socket *sock, int level, int optname, 744 char __user *optval, int __user *_optlen) 745 { 746 int optlen; 747 748 if (level != SOL_RXRPC) 749 return -EOPNOTSUPP; 750 751 if (get_user(optlen, _optlen)) 752 return -EFAULT; 753 754 switch (optname) { 755 case RXRPC_SUPPORTED_CMSG: 756 if (optlen < sizeof(int)) 757 return -ETOOSMALL; 758 if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) || 759 put_user(sizeof(int), _optlen)) 760 return -EFAULT; 761 return 0; 762 763 default: 764 return -EOPNOTSUPP; 765 } 766 } 767 768 /* 769 * permit an RxRPC socket to be polled 770 */ 771 static __poll_t rxrpc_poll(struct file *file, struct socket *sock, 772 poll_table *wait) 773 { 774 struct sock *sk = sock->sk; 775 struct rxrpc_sock *rx = rxrpc_sk(sk); 776 __poll_t mask; 777 778 sock_poll_wait(file, sock, wait); 779 mask = 0; 780 781 /* the socket is readable if there are any messages waiting on the Rx 782 * queue */ 783 if (!list_empty(&rx->recvmsg_q)) 784 mask |= EPOLLIN | EPOLLRDNORM; 785 786 /* the socket is writable if there is space to add new data to the 787 * socket; there is no guarantee that any particular call in progress 788 * on the socket may have space in the Tx ACK window */ 789 if (rxrpc_writable(sk)) 790 mask |= EPOLLOUT | EPOLLWRNORM; 791 792 return mask; 793 } 794 795 /* 796 * create an RxRPC socket 797 */ 798 static int rxrpc_create(struct net *net, struct socket *sock, int protocol, 799 int kern) 800 { 801 struct rxrpc_net *rxnet; 802 struct rxrpc_sock *rx; 803 struct sock *sk; 804 805 _enter("%p,%d", sock, protocol); 806 807 /* we support transport protocol UDP/UDP6 only */ 808 if (protocol != PF_INET && 809 IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6) 810 return -EPROTONOSUPPORT; 811 812 if (sock->type != SOCK_DGRAM) 813 return -ESOCKTNOSUPPORT; 814 815 sock->ops = &rxrpc_rpc_ops; 816 sock->state = SS_UNCONNECTED; 817 818 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern); 819 if (!sk) 820 return -ENOMEM; 821 822 sock_init_data(sock, sk); 823 sock_set_flag(sk, SOCK_RCU_FREE); 824 sk->sk_state = RXRPC_UNBOUND; 825 sk->sk_write_space = rxrpc_write_space; 826 sk->sk_max_ack_backlog = 0; 827 sk->sk_destruct = rxrpc_sock_destructor; 828 829 rx = rxrpc_sk(sk); 830 rx->family = protocol; 831 rx->calls = RB_ROOT; 832 833 spin_lock_init(&rx->incoming_lock); 834 INIT_LIST_HEAD(&rx->sock_calls); 835 INIT_LIST_HEAD(&rx->to_be_accepted); 836 INIT_LIST_HEAD(&rx->recvmsg_q); 837 rwlock_init(&rx->recvmsg_lock); 838 rwlock_init(&rx->call_lock); 839 memset(&rx->srx, 0, sizeof(rx->srx)); 840 841 rxnet = rxrpc_net(sock_net(&rx->sk)); 842 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1); 843 844 _leave(" = 0 [%p]", rx); 845 return 0; 846 } 847 848 /* 849 * Kill all the calls on a socket and shut it down. 850 */ 851 static int rxrpc_shutdown(struct socket *sock, int flags) 852 { 853 struct sock *sk = sock->sk; 854 struct rxrpc_sock *rx = rxrpc_sk(sk); 855 int ret = 0; 856 857 _enter("%p,%d", sk, flags); 858 859 if (flags != SHUT_RDWR) 860 return -EOPNOTSUPP; 861 if (sk->sk_state == RXRPC_CLOSE) 862 return -ESHUTDOWN; 863 864 lock_sock(sk); 865 866 spin_lock_bh(&sk->sk_receive_queue.lock); 867 if (sk->sk_state < RXRPC_CLOSE) { 868 sk->sk_state = RXRPC_CLOSE; 869 sk->sk_shutdown = SHUTDOWN_MASK; 870 } else { 871 ret = -ESHUTDOWN; 872 } 873 spin_unlock_bh(&sk->sk_receive_queue.lock); 874 875 rxrpc_discard_prealloc(rx); 876 877 release_sock(sk); 878 return ret; 879 } 880 881 /* 882 * RxRPC socket destructor 883 */ 884 static void rxrpc_sock_destructor(struct sock *sk) 885 { 886 _enter("%p", sk); 887 888 rxrpc_purge_queue(&sk->sk_receive_queue); 889 890 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 891 WARN_ON(!sk_unhashed(sk)); 892 WARN_ON(sk->sk_socket); 893 894 if (!sock_flag(sk, SOCK_DEAD)) { 895 printk("Attempt to release alive rxrpc socket: %p\n", sk); 896 return; 897 } 898 } 899 900 /* 901 * release an RxRPC socket 902 */ 903 static int rxrpc_release_sock(struct sock *sk) 904 { 905 struct rxrpc_sock *rx = rxrpc_sk(sk); 906 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 907 908 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 909 910 /* declare the socket closed for business */ 911 sock_orphan(sk); 912 sk->sk_shutdown = SHUTDOWN_MASK; 913 914 /* We want to kill off all connections from a service socket 915 * as fast as possible because we can't share these; client 916 * sockets, on the other hand, can share an endpoint. 917 */ 918 switch (sk->sk_state) { 919 case RXRPC_SERVER_BOUND: 920 case RXRPC_SERVER_BOUND2: 921 case RXRPC_SERVER_LISTENING: 922 case RXRPC_SERVER_LISTEN_DISABLED: 923 rx->local->service_closed = true; 924 break; 925 } 926 927 spin_lock_bh(&sk->sk_receive_queue.lock); 928 sk->sk_state = RXRPC_CLOSE; 929 spin_unlock_bh(&sk->sk_receive_queue.lock); 930 931 if (rx->local && rcu_access_pointer(rx->local->service) == rx) { 932 write_lock(&rx->local->services_lock); 933 rcu_assign_pointer(rx->local->service, NULL); 934 write_unlock(&rx->local->services_lock); 935 } 936 937 /* try to flush out this socket */ 938 rxrpc_discard_prealloc(rx); 939 rxrpc_release_calls_on_socket(rx); 940 flush_workqueue(rxrpc_workqueue); 941 rxrpc_purge_queue(&sk->sk_receive_queue); 942 rxrpc_queue_work(&rxnet->service_conn_reaper); 943 rxrpc_queue_work(&rxnet->client_conn_reaper); 944 945 rxrpc_put_local(rx->local); 946 rx->local = NULL; 947 key_put(rx->key); 948 rx->key = NULL; 949 key_put(rx->securities); 950 rx->securities = NULL; 951 sock_put(sk); 952 953 _leave(" = 0"); 954 return 0; 955 } 956 957 /* 958 * release an RxRPC BSD socket on close() or equivalent 959 */ 960 static int rxrpc_release(struct socket *sock) 961 { 962 struct sock *sk = sock->sk; 963 964 _enter("%p{%p}", sock, sk); 965 966 if (!sk) 967 return 0; 968 969 sock->sk = NULL; 970 971 return rxrpc_release_sock(sk); 972 } 973 974 /* 975 * RxRPC network protocol 976 */ 977 static const struct proto_ops rxrpc_rpc_ops = { 978 .family = PF_RXRPC, 979 .owner = THIS_MODULE, 980 .release = rxrpc_release, 981 .bind = rxrpc_bind, 982 .connect = rxrpc_connect, 983 .socketpair = sock_no_socketpair, 984 .accept = sock_no_accept, 985 .getname = sock_no_getname, 986 .poll = rxrpc_poll, 987 .ioctl = sock_no_ioctl, 988 .listen = rxrpc_listen, 989 .shutdown = rxrpc_shutdown, 990 .setsockopt = rxrpc_setsockopt, 991 .getsockopt = rxrpc_getsockopt, 992 .sendmsg = rxrpc_sendmsg, 993 .recvmsg = rxrpc_recvmsg, 994 .mmap = sock_no_mmap, 995 .sendpage = sock_no_sendpage, 996 }; 997 998 static struct proto rxrpc_proto = { 999 .name = "RXRPC", 1000 .owner = THIS_MODULE, 1001 .obj_size = sizeof(struct rxrpc_sock), 1002 .max_header = sizeof(struct rxrpc_wire_header), 1003 }; 1004 1005 static const struct net_proto_family rxrpc_family_ops = { 1006 .family = PF_RXRPC, 1007 .create = rxrpc_create, 1008 .owner = THIS_MODULE, 1009 }; 1010 1011 /* 1012 * initialise and register the RxRPC protocol 1013 */ 1014 static int __init af_rxrpc_init(void) 1015 { 1016 int ret = -1; 1017 unsigned int tmp; 1018 1019 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); 1020 1021 get_random_bytes(&tmp, sizeof(tmp)); 1022 tmp &= 0x3fffffff; 1023 if (tmp == 0) 1024 tmp = 1; 1025 idr_set_cursor(&rxrpc_client_conn_ids, tmp); 1026 1027 ret = -ENOMEM; 1028 rxrpc_call_jar = kmem_cache_create( 1029 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 1030 SLAB_HWCACHE_ALIGN, NULL); 1031 if (!rxrpc_call_jar) { 1032 pr_notice("Failed to allocate call jar\n"); 1033 goto error_call_jar; 1034 } 1035 1036 rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); 1037 if (!rxrpc_workqueue) { 1038 pr_notice("Failed to allocate work queue\n"); 1039 goto error_work_queue; 1040 } 1041 1042 ret = rxrpc_init_security(); 1043 if (ret < 0) { 1044 pr_crit("Cannot initialise security\n"); 1045 goto error_security; 1046 } 1047 1048 ret = register_pernet_subsys(&rxrpc_net_ops); 1049 if (ret) 1050 goto error_pernet; 1051 1052 ret = proto_register(&rxrpc_proto, 1); 1053 if (ret < 0) { 1054 pr_crit("Cannot register protocol\n"); 1055 goto error_proto; 1056 } 1057 1058 ret = sock_register(&rxrpc_family_ops); 1059 if (ret < 0) { 1060 pr_crit("Cannot register socket family\n"); 1061 goto error_sock; 1062 } 1063 1064 ret = register_key_type(&key_type_rxrpc); 1065 if (ret < 0) { 1066 pr_crit("Cannot register client key type\n"); 1067 goto error_key_type; 1068 } 1069 1070 ret = register_key_type(&key_type_rxrpc_s); 1071 if (ret < 0) { 1072 pr_crit("Cannot register server key type\n"); 1073 goto error_key_type_s; 1074 } 1075 1076 ret = rxrpc_sysctl_init(); 1077 if (ret < 0) { 1078 pr_crit("Cannot register sysctls\n"); 1079 goto error_sysctls; 1080 } 1081 1082 return 0; 1083 1084 error_sysctls: 1085 unregister_key_type(&key_type_rxrpc_s); 1086 error_key_type_s: 1087 unregister_key_type(&key_type_rxrpc); 1088 error_key_type: 1089 sock_unregister(PF_RXRPC); 1090 error_sock: 1091 proto_unregister(&rxrpc_proto); 1092 error_proto: 1093 unregister_pernet_subsys(&rxrpc_net_ops); 1094 error_pernet: 1095 rxrpc_exit_security(); 1096 error_security: 1097 destroy_workqueue(rxrpc_workqueue); 1098 error_work_queue: 1099 kmem_cache_destroy(rxrpc_call_jar); 1100 error_call_jar: 1101 return ret; 1102 } 1103 1104 /* 1105 * unregister the RxRPC protocol 1106 */ 1107 static void __exit af_rxrpc_exit(void) 1108 { 1109 _enter(""); 1110 rxrpc_sysctl_exit(); 1111 unregister_key_type(&key_type_rxrpc_s); 1112 unregister_key_type(&key_type_rxrpc); 1113 sock_unregister(PF_RXRPC); 1114 proto_unregister(&rxrpc_proto); 1115 unregister_pernet_subsys(&rxrpc_net_ops); 1116 ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); 1117 ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); 1118 1119 /* Make sure the local and peer records pinned by any dying connections 1120 * are released. 1121 */ 1122 rcu_barrier(); 1123 rxrpc_destroy_client_conn_ids(); 1124 1125 destroy_workqueue(rxrpc_workqueue); 1126 rxrpc_exit_security(); 1127 kmem_cache_destroy(rxrpc_call_jar); 1128 _leave(""); 1129 } 1130 1131 module_init(af_rxrpc_init); 1132 module_exit(af_rxrpc_exit); 1133