1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AF_RXRPC implementation 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/net.h> 13 #include <linux/slab.h> 14 #include <linux/skbuff.h> 15 #include <linux/random.h> 16 #include <linux/poll.h> 17 #include <linux/proc_fs.h> 18 #include <linux/key-type.h> 19 #include <net/net_namespace.h> 20 #include <net/sock.h> 21 #include <net/af_rxrpc.h> 22 #define CREATE_TRACE_POINTS 23 #include "ar-internal.h" 24 25 MODULE_DESCRIPTION("RxRPC network protocol"); 26 MODULE_AUTHOR("Red Hat, Inc."); 27 MODULE_LICENSE("GPL"); 28 MODULE_ALIAS_NETPROTO(PF_RXRPC); 29 30 unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO; 31 module_param_named(debug, rxrpc_debug, uint, 0644); 32 MODULE_PARM_DESC(debug, "RxRPC debugging mask"); 33 34 static struct proto rxrpc_proto; 35 static const struct proto_ops rxrpc_rpc_ops; 36 37 /* current debugging ID */ 38 atomic_t rxrpc_debug_id; 39 EXPORT_SYMBOL(rxrpc_debug_id); 40 41 /* count of skbs currently in use */ 42 atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; 43 44 struct workqueue_struct *rxrpc_workqueue; 45 46 static void rxrpc_sock_destructor(struct sock *); 47 48 /* 49 * see if an RxRPC socket is currently writable 50 */ 51 static inline int rxrpc_writable(struct sock *sk) 52 { 53 return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; 54 } 55 56 /* 57 * wait for write bufferage to become available 58 */ 59 static void rxrpc_write_space(struct sock *sk) 60 { 61 _enter("%p", sk); 62 rcu_read_lock(); 63 if (rxrpc_writable(sk)) { 64 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 65 66 if (skwq_has_sleeper(wq)) 67 wake_up_interruptible(&wq->wait); 68 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 69 } 70 rcu_read_unlock(); 71 } 72 73 /* 74 * validate an RxRPC address 75 */ 76 static int rxrpc_validate_address(struct rxrpc_sock *rx, 77 struct sockaddr_rxrpc *srx, 78 int len) 79 { 80 unsigned int tail; 81 82 if (len < sizeof(struct sockaddr_rxrpc)) 83 return -EINVAL; 84 85 if (srx->srx_family != AF_RXRPC) 86 return -EAFNOSUPPORT; 87 88 if (srx->transport_type != SOCK_DGRAM) 89 return -ESOCKTNOSUPPORT; 90 91 len -= offsetof(struct sockaddr_rxrpc, transport); 92 if (srx->transport_len < sizeof(sa_family_t) || 93 srx->transport_len > len) 94 return -EINVAL; 95 96 if (srx->transport.family != rx->family && 97 srx->transport.family == AF_INET && rx->family != AF_INET6) 98 return -EAFNOSUPPORT; 99 100 switch (srx->transport.family) { 101 case AF_INET: 102 if (srx->transport_len < sizeof(struct sockaddr_in)) 103 return -EINVAL; 104 tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad); 105 break; 106 107 #ifdef CONFIG_AF_RXRPC_IPV6 108 case AF_INET6: 109 if (srx->transport_len < sizeof(struct sockaddr_in6)) 110 return -EINVAL; 111 tail = offsetof(struct sockaddr_rxrpc, transport) + 112 sizeof(struct sockaddr_in6); 113 break; 114 #endif 115 116 default: 117 return -EAFNOSUPPORT; 118 } 119 120 if (tail < len) 121 memset((void *)srx + tail, 0, len - tail); 122 _debug("INET: %pISp", &srx->transport); 123 return 0; 124 } 125 126 /* 127 * bind a local address to an RxRPC socket 128 */ 129 static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) 130 { 131 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; 132 struct rxrpc_local *local; 133 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 134 u16 service_id; 135 int ret; 136 137 _enter("%p,%p,%d", rx, saddr, len); 138 139 ret = rxrpc_validate_address(rx, srx, len); 140 if (ret < 0) 141 goto error; 142 service_id = srx->srx_service; 143 144 lock_sock(&rx->sk); 145 146 switch (rx->sk.sk_state) { 147 case RXRPC_UNBOUND: 148 rx->srx = *srx; 149 local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx); 150 if (IS_ERR(local)) { 151 ret = PTR_ERR(local); 152 goto error_unlock; 153 } 154 155 if (service_id) { 156 write_lock(&local->services_lock); 157 if (rcu_access_pointer(local->service)) 158 goto service_in_use; 159 rx->local = local; 160 rcu_assign_pointer(local->service, rx); 161 write_unlock(&local->services_lock); 162 163 rx->sk.sk_state = RXRPC_SERVER_BOUND; 164 } else { 165 rx->local = local; 166 rx->sk.sk_state = RXRPC_CLIENT_BOUND; 167 } 168 break; 169 170 case RXRPC_SERVER_BOUND: 171 ret = -EINVAL; 172 if (service_id == 0) 173 goto error_unlock; 174 ret = -EADDRINUSE; 175 if (service_id == rx->srx.srx_service) 176 goto error_unlock; 177 ret = -EINVAL; 178 srx->srx_service = rx->srx.srx_service; 179 if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0) 180 goto error_unlock; 181 rx->second_service = service_id; 182 rx->sk.sk_state = RXRPC_SERVER_BOUND2; 183 break; 184 185 default: 186 ret = -EINVAL; 187 goto error_unlock; 188 } 189 190 release_sock(&rx->sk); 191 _leave(" = 0"); 192 return 0; 193 194 service_in_use: 195 write_unlock(&local->services_lock); 196 rxrpc_unuse_local(local); 197 rxrpc_put_local(local); 198 ret = -EADDRINUSE; 199 error_unlock: 200 release_sock(&rx->sk); 201 error: 202 _leave(" = %d", ret); 203 return ret; 204 } 205 206 /* 207 * set the number of pending calls permitted on a listening socket 208 */ 209 static int rxrpc_listen(struct socket *sock, int backlog) 210 { 211 struct sock *sk = sock->sk; 212 struct rxrpc_sock *rx = rxrpc_sk(sk); 213 unsigned int max, old; 214 int ret; 215 216 _enter("%p,%d", rx, backlog); 217 218 lock_sock(&rx->sk); 219 220 switch (rx->sk.sk_state) { 221 case RXRPC_UNBOUND: 222 ret = -EADDRNOTAVAIL; 223 break; 224 case RXRPC_SERVER_BOUND: 225 case RXRPC_SERVER_BOUND2: 226 ASSERT(rx->local != NULL); 227 max = READ_ONCE(rxrpc_max_backlog); 228 ret = -EINVAL; 229 if (backlog == INT_MAX) 230 backlog = max; 231 else if (backlog < 0 || backlog > max) 232 break; 233 old = sk->sk_max_ack_backlog; 234 sk->sk_max_ack_backlog = backlog; 235 ret = rxrpc_service_prealloc(rx, GFP_KERNEL); 236 if (ret == 0) 237 rx->sk.sk_state = RXRPC_SERVER_LISTENING; 238 else 239 sk->sk_max_ack_backlog = old; 240 break; 241 case RXRPC_SERVER_LISTENING: 242 if (backlog == 0) { 243 rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED; 244 sk->sk_max_ack_backlog = 0; 245 rxrpc_discard_prealloc(rx); 246 ret = 0; 247 break; 248 } 249 /* Fall through */ 250 default: 251 ret = -EBUSY; 252 break; 253 } 254 255 release_sock(&rx->sk); 256 _leave(" = %d", ret); 257 return ret; 258 } 259 260 /** 261 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call 262 * @sock: The socket on which to make the call 263 * @srx: The address of the peer to contact 264 * @key: The security context to use (defaults to socket setting) 265 * @user_call_ID: The ID to use 266 * @tx_total_len: Total length of data to transmit during the call (or -1) 267 * @gfp: The allocation constraints 268 * @notify_rx: Where to send notifications instead of socket queue 269 * @upgrade: Request service upgrade for call 270 * @intr: The call is interruptible 271 * @debug_id: The debug ID for tracing to be assigned to the call 272 * 273 * Allow a kernel service to begin a call on the nominated socket. This just 274 * sets up all the internal tracking structures and allocates connection and 275 * call IDs as appropriate. The call to be used is returned. 276 * 277 * The default socket destination address and security may be overridden by 278 * supplying @srx and @key. 279 */ 280 struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, 281 struct sockaddr_rxrpc *srx, 282 struct key *key, 283 unsigned long user_call_ID, 284 s64 tx_total_len, 285 gfp_t gfp, 286 rxrpc_notify_rx_t notify_rx, 287 bool upgrade, 288 bool intr, 289 unsigned int debug_id) 290 { 291 struct rxrpc_conn_parameters cp; 292 struct rxrpc_call_params p; 293 struct rxrpc_call *call; 294 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 295 int ret; 296 297 _enter(",,%x,%lx", key_serial(key), user_call_ID); 298 299 ret = rxrpc_validate_address(rx, srx, sizeof(*srx)); 300 if (ret < 0) 301 return ERR_PTR(ret); 302 303 lock_sock(&rx->sk); 304 305 if (!key) 306 key = rx->key; 307 if (key && !key->payload.data[0]) 308 key = NULL; /* a no-security key */ 309 310 memset(&p, 0, sizeof(p)); 311 p.user_call_ID = user_call_ID; 312 p.tx_total_len = tx_total_len; 313 p.intr = intr; 314 315 memset(&cp, 0, sizeof(cp)); 316 cp.local = rx->local; 317 cp.key = key; 318 cp.security_level = rx->min_sec_level; 319 cp.exclusive = false; 320 cp.upgrade = upgrade; 321 cp.service_id = srx->srx_service; 322 call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id); 323 /* The socket has been unlocked. */ 324 if (!IS_ERR(call)) { 325 call->notify_rx = notify_rx; 326 mutex_unlock(&call->user_mutex); 327 } 328 329 rxrpc_put_peer(cp.peer); 330 _leave(" = %p", call); 331 return call; 332 } 333 EXPORT_SYMBOL(rxrpc_kernel_begin_call); 334 335 /* 336 * Dummy function used to stop the notifier talking to recvmsg(). 337 */ 338 static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall, 339 unsigned long call_user_ID) 340 { 341 } 342 343 /** 344 * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using 345 * @sock: The socket the call is on 346 * @call: The call to end 347 * 348 * Allow a kernel service to end a call it was using. The call must be 349 * complete before this is called (the call should be aborted if necessary). 350 */ 351 void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) 352 { 353 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 354 355 mutex_lock(&call->user_mutex); 356 rxrpc_release_call(rxrpc_sk(sock->sk), call); 357 358 /* Make sure we're not going to call back into a kernel service */ 359 if (call->notify_rx) { 360 spin_lock_bh(&call->notify_lock); 361 call->notify_rx = rxrpc_dummy_notify_rx; 362 spin_unlock_bh(&call->notify_lock); 363 } 364 365 mutex_unlock(&call->user_mutex); 366 rxrpc_put_call(call, rxrpc_call_put_kernel); 367 } 368 EXPORT_SYMBOL(rxrpc_kernel_end_call); 369 370 /** 371 * rxrpc_kernel_check_life - Check to see whether a call is still alive 372 * @sock: The socket the call is on 373 * @call: The call to check 374 * @_life: Where to store the life value 375 * 376 * Allow a kernel service to find out whether a call is still alive - ie. we're 377 * getting ACKs from the server. Passes back in *_life a number representing 378 * the life state which can be compared to that returned by a previous call and 379 * return true if the call is still alive. 380 * 381 * If the life state stalls, rxrpc_kernel_probe_life() should be called and 382 * then 2RTT waited. 383 */ 384 bool rxrpc_kernel_check_life(const struct socket *sock, 385 const struct rxrpc_call *call, 386 u32 *_life) 387 { 388 *_life = call->acks_latest; 389 return call->state != RXRPC_CALL_COMPLETE; 390 } 391 EXPORT_SYMBOL(rxrpc_kernel_check_life); 392 393 /** 394 * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive 395 * @sock: The socket the call is on 396 * @call: The call to check 397 * 398 * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to 399 * find out whether a call is still alive by pinging it. This should cause the 400 * life state to be bumped in about 2*RTT. 401 * 402 * The must be called in TASK_RUNNING state on pain of might_sleep() objecting. 403 */ 404 void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) 405 { 406 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, 407 rxrpc_propose_ack_ping_for_check_life); 408 rxrpc_send_ack_packet(call, true, NULL); 409 } 410 EXPORT_SYMBOL(rxrpc_kernel_probe_life); 411 412 /** 413 * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. 414 * @sock: The socket the call is on 415 * @call: The call to query 416 * 417 * Allow a kernel service to retrieve the epoch value from a service call to 418 * see if the client at the other end rebooted. 419 */ 420 u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call) 421 { 422 return call->conn->proto.epoch; 423 } 424 EXPORT_SYMBOL(rxrpc_kernel_get_epoch); 425 426 /** 427 * rxrpc_kernel_new_call_notification - Get notifications of new calls 428 * @sock: The socket to intercept received messages on 429 * @notify_new_call: Function to be called when new calls appear 430 * @discard_new_call: Function to discard preallocated calls 431 * 432 * Allow a kernel service to be given notifications about new calls. 433 */ 434 void rxrpc_kernel_new_call_notification( 435 struct socket *sock, 436 rxrpc_notify_new_call_t notify_new_call, 437 rxrpc_discard_new_call_t discard_new_call) 438 { 439 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 440 441 rx->notify_new_call = notify_new_call; 442 rx->discard_new_call = discard_new_call; 443 } 444 EXPORT_SYMBOL(rxrpc_kernel_new_call_notification); 445 446 /** 447 * rxrpc_kernel_set_max_life - Set maximum lifespan on a call 448 * @sock: The socket the call is on 449 * @call: The call to configure 450 * @hard_timeout: The maximum lifespan of the call in jiffies 451 * 452 * Set the maximum lifespan of a call. The call will end with ETIME or 453 * ETIMEDOUT if it takes longer than this. 454 */ 455 void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call, 456 unsigned long hard_timeout) 457 { 458 unsigned long now; 459 460 mutex_lock(&call->user_mutex); 461 462 now = jiffies; 463 hard_timeout += now; 464 WRITE_ONCE(call->expect_term_by, hard_timeout); 465 rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard); 466 467 mutex_unlock(&call->user_mutex); 468 } 469 EXPORT_SYMBOL(rxrpc_kernel_set_max_life); 470 471 /* 472 * connect an RxRPC socket 473 * - this just targets it at a specific destination; no actual connection 474 * negotiation takes place 475 */ 476 static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, 477 int addr_len, int flags) 478 { 479 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr; 480 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 481 int ret; 482 483 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); 484 485 ret = rxrpc_validate_address(rx, srx, addr_len); 486 if (ret < 0) { 487 _leave(" = %d [bad addr]", ret); 488 return ret; 489 } 490 491 lock_sock(&rx->sk); 492 493 ret = -EISCONN; 494 if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) 495 goto error; 496 497 switch (rx->sk.sk_state) { 498 case RXRPC_UNBOUND: 499 rx->sk.sk_state = RXRPC_CLIENT_UNBOUND; 500 case RXRPC_CLIENT_UNBOUND: 501 case RXRPC_CLIENT_BOUND: 502 break; 503 default: 504 ret = -EBUSY; 505 goto error; 506 } 507 508 rx->connect_srx = *srx; 509 set_bit(RXRPC_SOCK_CONNECTED, &rx->flags); 510 ret = 0; 511 512 error: 513 release_sock(&rx->sk); 514 return ret; 515 } 516 517 /* 518 * send a message through an RxRPC socket 519 * - in a client this does a number of things: 520 * - finds/sets up a connection for the security specified (if any) 521 * - initiates a call (ID in control data) 522 * - ends the request phase of a call (if MSG_MORE is not set) 523 * - sends a call data packet 524 * - may send an abort (abort code in control data) 525 */ 526 static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 527 { 528 struct rxrpc_local *local; 529 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 530 int ret; 531 532 _enter(",{%d},,%zu", rx->sk.sk_state, len); 533 534 if (m->msg_flags & MSG_OOB) 535 return -EOPNOTSUPP; 536 537 if (m->msg_name) { 538 ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); 539 if (ret < 0) { 540 _leave(" = %d [bad addr]", ret); 541 return ret; 542 } 543 } 544 545 lock_sock(&rx->sk); 546 547 switch (rx->sk.sk_state) { 548 case RXRPC_UNBOUND: 549 case RXRPC_CLIENT_UNBOUND: 550 rx->srx.srx_family = AF_RXRPC; 551 rx->srx.srx_service = 0; 552 rx->srx.transport_type = SOCK_DGRAM; 553 rx->srx.transport.family = rx->family; 554 switch (rx->family) { 555 case AF_INET: 556 rx->srx.transport_len = sizeof(struct sockaddr_in); 557 break; 558 #ifdef CONFIG_AF_RXRPC_IPV6 559 case AF_INET6: 560 rx->srx.transport_len = sizeof(struct sockaddr_in6); 561 break; 562 #endif 563 default: 564 ret = -EAFNOSUPPORT; 565 goto error_unlock; 566 } 567 local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx); 568 if (IS_ERR(local)) { 569 ret = PTR_ERR(local); 570 goto error_unlock; 571 } 572 573 rx->local = local; 574 rx->sk.sk_state = RXRPC_CLIENT_BOUND; 575 /* Fall through */ 576 577 case RXRPC_CLIENT_BOUND: 578 if (!m->msg_name && 579 test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) { 580 m->msg_name = &rx->connect_srx; 581 m->msg_namelen = sizeof(rx->connect_srx); 582 } 583 /* Fall through */ 584 case RXRPC_SERVER_BOUND: 585 case RXRPC_SERVER_LISTENING: 586 ret = rxrpc_do_sendmsg(rx, m, len); 587 /* The socket has been unlocked */ 588 goto out; 589 default: 590 ret = -EINVAL; 591 goto error_unlock; 592 } 593 594 error_unlock: 595 release_sock(&rx->sk); 596 out: 597 _leave(" = %d", ret); 598 return ret; 599 } 600 601 /* 602 * set RxRPC socket options 603 */ 604 static int rxrpc_setsockopt(struct socket *sock, int level, int optname, 605 char __user *optval, unsigned int optlen) 606 { 607 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 608 unsigned int min_sec_level; 609 u16 service_upgrade[2]; 610 int ret; 611 612 _enter(",%d,%d,,%d", level, optname, optlen); 613 614 lock_sock(&rx->sk); 615 ret = -EOPNOTSUPP; 616 617 if (level == SOL_RXRPC) { 618 switch (optname) { 619 case RXRPC_EXCLUSIVE_CONNECTION: 620 ret = -EINVAL; 621 if (optlen != 0) 622 goto error; 623 ret = -EISCONN; 624 if (rx->sk.sk_state != RXRPC_UNBOUND) 625 goto error; 626 rx->exclusive = true; 627 goto success; 628 629 case RXRPC_SECURITY_KEY: 630 ret = -EINVAL; 631 if (rx->key) 632 goto error; 633 ret = -EISCONN; 634 if (rx->sk.sk_state != RXRPC_UNBOUND) 635 goto error; 636 ret = rxrpc_request_key(rx, optval, optlen); 637 goto error; 638 639 case RXRPC_SECURITY_KEYRING: 640 ret = -EINVAL; 641 if (rx->key) 642 goto error; 643 ret = -EISCONN; 644 if (rx->sk.sk_state != RXRPC_UNBOUND) 645 goto error; 646 ret = rxrpc_server_keyring(rx, optval, optlen); 647 goto error; 648 649 case RXRPC_MIN_SECURITY_LEVEL: 650 ret = -EINVAL; 651 if (optlen != sizeof(unsigned int)) 652 goto error; 653 ret = -EISCONN; 654 if (rx->sk.sk_state != RXRPC_UNBOUND) 655 goto error; 656 ret = get_user(min_sec_level, 657 (unsigned int __user *) optval); 658 if (ret < 0) 659 goto error; 660 ret = -EINVAL; 661 if (min_sec_level > RXRPC_SECURITY_MAX) 662 goto error; 663 rx->min_sec_level = min_sec_level; 664 goto success; 665 666 case RXRPC_UPGRADEABLE_SERVICE: 667 ret = -EINVAL; 668 if (optlen != sizeof(service_upgrade) || 669 rx->service_upgrade.from != 0) 670 goto error; 671 ret = -EISCONN; 672 if (rx->sk.sk_state != RXRPC_SERVER_BOUND2) 673 goto error; 674 ret = -EFAULT; 675 if (copy_from_user(service_upgrade, optval, 676 sizeof(service_upgrade)) != 0) 677 goto error; 678 ret = -EINVAL; 679 if ((service_upgrade[0] != rx->srx.srx_service || 680 service_upgrade[1] != rx->second_service) && 681 (service_upgrade[0] != rx->second_service || 682 service_upgrade[1] != rx->srx.srx_service)) 683 goto error; 684 rx->service_upgrade.from = service_upgrade[0]; 685 rx->service_upgrade.to = service_upgrade[1]; 686 goto success; 687 688 default: 689 break; 690 } 691 } 692 693 success: 694 ret = 0; 695 error: 696 release_sock(&rx->sk); 697 return ret; 698 } 699 700 /* 701 * Get socket options. 702 */ 703 static int rxrpc_getsockopt(struct socket *sock, int level, int optname, 704 char __user *optval, int __user *_optlen) 705 { 706 int optlen; 707 708 if (level != SOL_RXRPC) 709 return -EOPNOTSUPP; 710 711 if (get_user(optlen, _optlen)) 712 return -EFAULT; 713 714 switch (optname) { 715 case RXRPC_SUPPORTED_CMSG: 716 if (optlen < sizeof(int)) 717 return -ETOOSMALL; 718 if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) || 719 put_user(sizeof(int), _optlen)) 720 return -EFAULT; 721 return 0; 722 723 default: 724 return -EOPNOTSUPP; 725 } 726 } 727 728 /* 729 * permit an RxRPC socket to be polled 730 */ 731 static __poll_t rxrpc_poll(struct file *file, struct socket *sock, 732 poll_table *wait) 733 { 734 struct sock *sk = sock->sk; 735 struct rxrpc_sock *rx = rxrpc_sk(sk); 736 __poll_t mask; 737 738 sock_poll_wait(file, sock, wait); 739 mask = 0; 740 741 /* the socket is readable if there are any messages waiting on the Rx 742 * queue */ 743 if (!list_empty(&rx->recvmsg_q)) 744 mask |= EPOLLIN | EPOLLRDNORM; 745 746 /* the socket is writable if there is space to add new data to the 747 * socket; there is no guarantee that any particular call in progress 748 * on the socket may have space in the Tx ACK window */ 749 if (rxrpc_writable(sk)) 750 mask |= EPOLLOUT | EPOLLWRNORM; 751 752 return mask; 753 } 754 755 /* 756 * create an RxRPC socket 757 */ 758 static int rxrpc_create(struct net *net, struct socket *sock, int protocol, 759 int kern) 760 { 761 struct rxrpc_net *rxnet; 762 struct rxrpc_sock *rx; 763 struct sock *sk; 764 765 _enter("%p,%d", sock, protocol); 766 767 /* we support transport protocol UDP/UDP6 only */ 768 if (protocol != PF_INET && 769 IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6) 770 return -EPROTONOSUPPORT; 771 772 if (sock->type != SOCK_DGRAM) 773 return -ESOCKTNOSUPPORT; 774 775 sock->ops = &rxrpc_rpc_ops; 776 sock->state = SS_UNCONNECTED; 777 778 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern); 779 if (!sk) 780 return -ENOMEM; 781 782 sock_init_data(sock, sk); 783 sock_set_flag(sk, SOCK_RCU_FREE); 784 sk->sk_state = RXRPC_UNBOUND; 785 sk->sk_write_space = rxrpc_write_space; 786 sk->sk_max_ack_backlog = 0; 787 sk->sk_destruct = rxrpc_sock_destructor; 788 789 rx = rxrpc_sk(sk); 790 rx->family = protocol; 791 rx->calls = RB_ROOT; 792 793 spin_lock_init(&rx->incoming_lock); 794 INIT_LIST_HEAD(&rx->sock_calls); 795 INIT_LIST_HEAD(&rx->to_be_accepted); 796 INIT_LIST_HEAD(&rx->recvmsg_q); 797 rwlock_init(&rx->recvmsg_lock); 798 rwlock_init(&rx->call_lock); 799 memset(&rx->srx, 0, sizeof(rx->srx)); 800 801 rxnet = rxrpc_net(sock_net(&rx->sk)); 802 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1); 803 804 _leave(" = 0 [%p]", rx); 805 return 0; 806 } 807 808 /* 809 * Kill all the calls on a socket and shut it down. 810 */ 811 static int rxrpc_shutdown(struct socket *sock, int flags) 812 { 813 struct sock *sk = sock->sk; 814 struct rxrpc_sock *rx = rxrpc_sk(sk); 815 int ret = 0; 816 817 _enter("%p,%d", sk, flags); 818 819 if (flags != SHUT_RDWR) 820 return -EOPNOTSUPP; 821 if (sk->sk_state == RXRPC_CLOSE) 822 return -ESHUTDOWN; 823 824 lock_sock(sk); 825 826 spin_lock_bh(&sk->sk_receive_queue.lock); 827 if (sk->sk_state < RXRPC_CLOSE) { 828 sk->sk_state = RXRPC_CLOSE; 829 sk->sk_shutdown = SHUTDOWN_MASK; 830 } else { 831 ret = -ESHUTDOWN; 832 } 833 spin_unlock_bh(&sk->sk_receive_queue.lock); 834 835 rxrpc_discard_prealloc(rx); 836 837 release_sock(sk); 838 return ret; 839 } 840 841 /* 842 * RxRPC socket destructor 843 */ 844 static void rxrpc_sock_destructor(struct sock *sk) 845 { 846 _enter("%p", sk); 847 848 rxrpc_purge_queue(&sk->sk_receive_queue); 849 850 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 851 WARN_ON(!sk_unhashed(sk)); 852 WARN_ON(sk->sk_socket); 853 854 if (!sock_flag(sk, SOCK_DEAD)) { 855 printk("Attempt to release alive rxrpc socket: %p\n", sk); 856 return; 857 } 858 } 859 860 /* 861 * release an RxRPC socket 862 */ 863 static int rxrpc_release_sock(struct sock *sk) 864 { 865 struct rxrpc_sock *rx = rxrpc_sk(sk); 866 867 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 868 869 /* declare the socket closed for business */ 870 sock_orphan(sk); 871 sk->sk_shutdown = SHUTDOWN_MASK; 872 873 /* We want to kill off all connections from a service socket 874 * as fast as possible because we can't share these; client 875 * sockets, on the other hand, can share an endpoint. 876 */ 877 switch (sk->sk_state) { 878 case RXRPC_SERVER_BOUND: 879 case RXRPC_SERVER_BOUND2: 880 case RXRPC_SERVER_LISTENING: 881 case RXRPC_SERVER_LISTEN_DISABLED: 882 rx->local->service_closed = true; 883 break; 884 } 885 886 spin_lock_bh(&sk->sk_receive_queue.lock); 887 sk->sk_state = RXRPC_CLOSE; 888 spin_unlock_bh(&sk->sk_receive_queue.lock); 889 890 if (rx->local && rcu_access_pointer(rx->local->service) == rx) { 891 write_lock(&rx->local->services_lock); 892 rcu_assign_pointer(rx->local->service, NULL); 893 write_unlock(&rx->local->services_lock); 894 } 895 896 /* try to flush out this socket */ 897 rxrpc_discard_prealloc(rx); 898 rxrpc_release_calls_on_socket(rx); 899 flush_workqueue(rxrpc_workqueue); 900 rxrpc_purge_queue(&sk->sk_receive_queue); 901 902 rxrpc_unuse_local(rx->local); 903 rxrpc_put_local(rx->local); 904 rx->local = NULL; 905 key_put(rx->key); 906 rx->key = NULL; 907 key_put(rx->securities); 908 rx->securities = NULL; 909 sock_put(sk); 910 911 _leave(" = 0"); 912 return 0; 913 } 914 915 /* 916 * release an RxRPC BSD socket on close() or equivalent 917 */ 918 static int rxrpc_release(struct socket *sock) 919 { 920 struct sock *sk = sock->sk; 921 922 _enter("%p{%p}", sock, sk); 923 924 if (!sk) 925 return 0; 926 927 sock->sk = NULL; 928 929 return rxrpc_release_sock(sk); 930 } 931 932 /* 933 * RxRPC network protocol 934 */ 935 static const struct proto_ops rxrpc_rpc_ops = { 936 .family = PF_RXRPC, 937 .owner = THIS_MODULE, 938 .release = rxrpc_release, 939 .bind = rxrpc_bind, 940 .connect = rxrpc_connect, 941 .socketpair = sock_no_socketpair, 942 .accept = sock_no_accept, 943 .getname = sock_no_getname, 944 .poll = rxrpc_poll, 945 .ioctl = sock_no_ioctl, 946 .listen = rxrpc_listen, 947 .shutdown = rxrpc_shutdown, 948 .setsockopt = rxrpc_setsockopt, 949 .getsockopt = rxrpc_getsockopt, 950 .sendmsg = rxrpc_sendmsg, 951 .recvmsg = rxrpc_recvmsg, 952 .mmap = sock_no_mmap, 953 .sendpage = sock_no_sendpage, 954 }; 955 956 static struct proto rxrpc_proto = { 957 .name = "RXRPC", 958 .owner = THIS_MODULE, 959 .obj_size = sizeof(struct rxrpc_sock), 960 .max_header = sizeof(struct rxrpc_wire_header), 961 }; 962 963 static const struct net_proto_family rxrpc_family_ops = { 964 .family = PF_RXRPC, 965 .create = rxrpc_create, 966 .owner = THIS_MODULE, 967 }; 968 969 /* 970 * initialise and register the RxRPC protocol 971 */ 972 static int __init af_rxrpc_init(void) 973 { 974 int ret = -1; 975 unsigned int tmp; 976 977 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb)); 978 979 get_random_bytes(&tmp, sizeof(tmp)); 980 tmp &= 0x3fffffff; 981 if (tmp == 0) 982 tmp = 1; 983 idr_set_cursor(&rxrpc_client_conn_ids, tmp); 984 985 ret = -ENOMEM; 986 rxrpc_call_jar = kmem_cache_create( 987 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 988 SLAB_HWCACHE_ALIGN, NULL); 989 if (!rxrpc_call_jar) { 990 pr_notice("Failed to allocate call jar\n"); 991 goto error_call_jar; 992 } 993 994 rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); 995 if (!rxrpc_workqueue) { 996 pr_notice("Failed to allocate work queue\n"); 997 goto error_work_queue; 998 } 999 1000 ret = rxrpc_init_security(); 1001 if (ret < 0) { 1002 pr_crit("Cannot initialise security\n"); 1003 goto error_security; 1004 } 1005 1006 ret = register_pernet_subsys(&rxrpc_net_ops); 1007 if (ret) 1008 goto error_pernet; 1009 1010 ret = proto_register(&rxrpc_proto, 1); 1011 if (ret < 0) { 1012 pr_crit("Cannot register protocol\n"); 1013 goto error_proto; 1014 } 1015 1016 ret = sock_register(&rxrpc_family_ops); 1017 if (ret < 0) { 1018 pr_crit("Cannot register socket family\n"); 1019 goto error_sock; 1020 } 1021 1022 ret = register_key_type(&key_type_rxrpc); 1023 if (ret < 0) { 1024 pr_crit("Cannot register client key type\n"); 1025 goto error_key_type; 1026 } 1027 1028 ret = register_key_type(&key_type_rxrpc_s); 1029 if (ret < 0) { 1030 pr_crit("Cannot register server key type\n"); 1031 goto error_key_type_s; 1032 } 1033 1034 ret = rxrpc_sysctl_init(); 1035 if (ret < 0) { 1036 pr_crit("Cannot register sysctls\n"); 1037 goto error_sysctls; 1038 } 1039 1040 return 0; 1041 1042 error_sysctls: 1043 unregister_key_type(&key_type_rxrpc_s); 1044 error_key_type_s: 1045 unregister_key_type(&key_type_rxrpc); 1046 error_key_type: 1047 sock_unregister(PF_RXRPC); 1048 error_sock: 1049 proto_unregister(&rxrpc_proto); 1050 error_proto: 1051 unregister_pernet_subsys(&rxrpc_net_ops); 1052 error_pernet: 1053 rxrpc_exit_security(); 1054 error_security: 1055 destroy_workqueue(rxrpc_workqueue); 1056 error_work_queue: 1057 kmem_cache_destroy(rxrpc_call_jar); 1058 error_call_jar: 1059 return ret; 1060 } 1061 1062 /* 1063 * unregister the RxRPC protocol 1064 */ 1065 static void __exit af_rxrpc_exit(void) 1066 { 1067 _enter(""); 1068 rxrpc_sysctl_exit(); 1069 unregister_key_type(&key_type_rxrpc_s); 1070 unregister_key_type(&key_type_rxrpc); 1071 sock_unregister(PF_RXRPC); 1072 proto_unregister(&rxrpc_proto); 1073 unregister_pernet_subsys(&rxrpc_net_ops); 1074 ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); 1075 ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); 1076 1077 /* Make sure the local and peer records pinned by any dying connections 1078 * are released. 1079 */ 1080 rcu_barrier(); 1081 rxrpc_destroy_client_conn_ids(); 1082 1083 destroy_workqueue(rxrpc_workqueue); 1084 rxrpc_exit_security(); 1085 kmem_cache_destroy(rxrpc_call_jar); 1086 _leave(""); 1087 } 1088 1089 module_init(af_rxrpc_init); 1090 module_exit(af_rxrpc_exit); 1091