1 /* AF_RXRPC implementation 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/net.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/poll.h> 18 #include <linux/proc_fs.h> 19 #include <linux/key-type.h> 20 #include <net/net_namespace.h> 21 #include <net/sock.h> 22 #include <net/af_rxrpc.h> 23 #include "ar-internal.h" 24 25 MODULE_DESCRIPTION("RxRPC network protocol"); 26 MODULE_AUTHOR("Red Hat, Inc."); 27 MODULE_LICENSE("GPL"); 28 MODULE_ALIAS_NETPROTO(PF_RXRPC); 29 30 unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO; 31 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); 32 MODULE_PARM_DESC(debug, "RxRPC debugging mask"); 33 34 static int sysctl_rxrpc_max_qlen __read_mostly = 10; 35 36 static struct proto rxrpc_proto; 37 static const struct proto_ops rxrpc_rpc_ops; 38 39 /* local epoch for detecting local-end reset */ 40 u32 rxrpc_epoch; 41 42 /* current debugging ID */ 43 atomic_t rxrpc_debug_id; 44 45 /* count of skbs currently in use */ 46 atomic_t rxrpc_n_skbs; 47 48 struct workqueue_struct *rxrpc_workqueue; 49 50 static void rxrpc_sock_destructor(struct sock *); 51 52 /* 53 * see if an RxRPC socket is currently writable 54 */ 55 static inline int rxrpc_writable(struct sock *sk) 56 { 57 return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; 58 } 59 60 /* 61 * wait for write bufferage to become available 62 */ 63 static void rxrpc_write_space(struct sock *sk) 64 { 65 _enter("%p", sk); 66 rcu_read_lock(); 67 if (rxrpc_writable(sk)) { 68 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 69 70 if (skwq_has_sleeper(wq)) 71 wake_up_interruptible(&wq->wait); 72 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 73 } 74 rcu_read_unlock(); 75 } 76 77 /* 78 * validate an RxRPC address 79 */ 80 static int rxrpc_validate_address(struct rxrpc_sock *rx, 81 struct sockaddr_rxrpc *srx, 82 int len) 83 { 84 unsigned int tail; 85 86 if (len < sizeof(struct sockaddr_rxrpc)) 87 return -EINVAL; 88 89 if (srx->srx_family != AF_RXRPC) 90 return -EAFNOSUPPORT; 91 92 if (srx->transport_type != SOCK_DGRAM) 93 return -ESOCKTNOSUPPORT; 94 95 len -= offsetof(struct sockaddr_rxrpc, transport); 96 if (srx->transport_len < sizeof(sa_family_t) || 97 srx->transport_len > len) 98 return -EINVAL; 99 100 if (srx->transport.family != rx->proto) 101 return -EAFNOSUPPORT; 102 103 switch (srx->transport.family) { 104 case AF_INET: 105 _debug("INET: %x @ %pI4", 106 ntohs(srx->transport.sin.sin_port), 107 &srx->transport.sin.sin_addr); 108 tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad); 109 break; 110 111 case AF_INET6: 112 default: 113 return -EAFNOSUPPORT; 114 } 115 116 if (tail < len) 117 memset((void *)srx + tail, 0, len - tail); 118 return 0; 119 } 120 121 /* 122 * bind a local address to an RxRPC socket 123 */ 124 static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) 125 { 126 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; 127 struct sock *sk = sock->sk; 128 struct rxrpc_local *local; 129 struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; 130 int ret; 131 132 _enter("%p,%p,%d", rx, saddr, len); 133 134 ret = rxrpc_validate_address(rx, srx, len); 135 if (ret < 0) 136 goto error; 137 138 lock_sock(&rx->sk); 139 140 if (rx->sk.sk_state != RXRPC_UNCONNECTED) { 141 ret = -EINVAL; 142 goto error_unlock; 143 } 144 145 memcpy(&rx->srx, srx, sizeof(rx->srx)); 146 147 /* Find or create a local transport endpoint to use */ 148 local = rxrpc_lookup_local(&rx->srx); 149 if (IS_ERR(local)) { 150 ret = PTR_ERR(local); 151 goto error_unlock; 152 } 153 154 rx->local = local; 155 if (srx->srx_service) { 156 write_lock_bh(&local->services_lock); 157 list_for_each_entry(prx, &local->services, listen_link) { 158 if (prx->srx.srx_service == srx->srx_service) 159 goto service_in_use; 160 } 161 162 list_add_tail(&rx->listen_link, &local->services); 163 write_unlock_bh(&local->services_lock); 164 165 rx->sk.sk_state = RXRPC_SERVER_BOUND; 166 } else { 167 rx->sk.sk_state = RXRPC_CLIENT_BOUND; 168 } 169 170 release_sock(&rx->sk); 171 _leave(" = 0"); 172 return 0; 173 174 service_in_use: 175 ret = -EADDRINUSE; 176 write_unlock_bh(&local->services_lock); 177 error_unlock: 178 release_sock(&rx->sk); 179 error: 180 _leave(" = %d", ret); 181 return ret; 182 } 183 184 /* 185 * set the number of pending calls permitted on a listening socket 186 */ 187 static int rxrpc_listen(struct socket *sock, int backlog) 188 { 189 struct sock *sk = sock->sk; 190 struct rxrpc_sock *rx = rxrpc_sk(sk); 191 int ret; 192 193 _enter("%p,%d", rx, backlog); 194 195 lock_sock(&rx->sk); 196 197 switch (rx->sk.sk_state) { 198 case RXRPC_UNCONNECTED: 199 ret = -EADDRNOTAVAIL; 200 break; 201 case RXRPC_CLIENT_BOUND: 202 case RXRPC_CLIENT_CONNECTED: 203 default: 204 ret = -EBUSY; 205 break; 206 case RXRPC_SERVER_BOUND: 207 ASSERT(rx->local != NULL); 208 sk->sk_max_ack_backlog = backlog; 209 rx->sk.sk_state = RXRPC_SERVER_LISTENING; 210 ret = 0; 211 break; 212 } 213 214 release_sock(&rx->sk); 215 _leave(" = %d", ret); 216 return ret; 217 } 218 219 /* 220 * find a transport by address 221 */ 222 static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, 223 struct sockaddr *addr, 224 int addr_len, int flags, 225 gfp_t gfp) 226 { 227 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; 228 struct rxrpc_transport *trans; 229 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 230 struct rxrpc_peer *peer; 231 232 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); 233 234 ASSERT(rx->local != NULL); 235 ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED); 236 237 if (rx->srx.transport_type != srx->transport_type) 238 return ERR_PTR(-ESOCKTNOSUPPORT); 239 if (rx->srx.transport.family != srx->transport.family) 240 return ERR_PTR(-EAFNOSUPPORT); 241 242 /* find a remote transport endpoint from the local one */ 243 peer = rxrpc_get_peer(srx, gfp); 244 if (IS_ERR(peer)) 245 return ERR_CAST(peer); 246 247 /* find a transport */ 248 trans = rxrpc_get_transport(rx->local, peer, gfp); 249 rxrpc_put_peer(peer); 250 _leave(" = %p", trans); 251 return trans; 252 } 253 254 /** 255 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call 256 * @sock: The socket on which to make the call 257 * @srx: The address of the peer to contact (defaults to socket setting) 258 * @key: The security context to use (defaults to socket setting) 259 * @user_call_ID: The ID to use 260 * 261 * Allow a kernel service to begin a call on the nominated socket. This just 262 * sets up all the internal tracking structures and allocates connection and 263 * call IDs as appropriate. The call to be used is returned. 264 * 265 * The default socket destination address and security may be overridden by 266 * supplying @srx and @key. 267 */ 268 struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, 269 struct sockaddr_rxrpc *srx, 270 struct key *key, 271 unsigned long user_call_ID, 272 gfp_t gfp) 273 { 274 struct rxrpc_conn_bundle *bundle; 275 struct rxrpc_transport *trans; 276 struct rxrpc_call *call; 277 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 278 279 _enter(",,%x,%lx", key_serial(key), user_call_ID); 280 281 lock_sock(&rx->sk); 282 283 if (srx) { 284 trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx, 285 sizeof(*srx), 0, gfp); 286 if (IS_ERR(trans)) { 287 call = ERR_CAST(trans); 288 trans = NULL; 289 goto out_notrans; 290 } 291 } else { 292 trans = rx->trans; 293 if (!trans) { 294 call = ERR_PTR(-ENOTCONN); 295 goto out_notrans; 296 } 297 atomic_inc(&trans->usage); 298 } 299 300 if (!srx) 301 srx = &rx->srx; 302 if (!key) 303 key = rx->key; 304 if (key && !key->payload.data[0]) 305 key = NULL; /* a no-security key */ 306 307 bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, gfp); 308 if (IS_ERR(bundle)) { 309 call = ERR_CAST(bundle); 310 goto out; 311 } 312 313 call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true, 314 gfp); 315 rxrpc_put_bundle(trans, bundle); 316 out: 317 rxrpc_put_transport(trans); 318 out_notrans: 319 release_sock(&rx->sk); 320 _leave(" = %p", call); 321 return call; 322 } 323 EXPORT_SYMBOL(rxrpc_kernel_begin_call); 324 325 /** 326 * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using 327 * @call: The call to end 328 * 329 * Allow a kernel service to end a call it was using. The call must be 330 * complete before this is called (the call should be aborted if necessary). 331 */ 332 void rxrpc_kernel_end_call(struct rxrpc_call *call) 333 { 334 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); 335 rxrpc_remove_user_ID(call->socket, call); 336 rxrpc_put_call(call); 337 } 338 EXPORT_SYMBOL(rxrpc_kernel_end_call); 339 340 /** 341 * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages 342 * @sock: The socket to intercept received messages on 343 * @interceptor: The function to pass the messages to 344 * 345 * Allow a kernel service to intercept messages heading for the Rx queue on an 346 * RxRPC socket. They get passed to the specified function instead. 347 * @interceptor should free the socket buffers it is given. @interceptor is 348 * called with the socket receive queue spinlock held and softirqs disabled - 349 * this ensures that the messages will be delivered in the right order. 350 */ 351 void rxrpc_kernel_intercept_rx_messages(struct socket *sock, 352 rxrpc_interceptor_t interceptor) 353 { 354 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 355 356 _enter(""); 357 rx->interceptor = interceptor; 358 } 359 360 EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages); 361 362 /* 363 * connect an RxRPC socket 364 * - this just targets it at a specific destination; no actual connection 365 * negotiation takes place 366 */ 367 static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, 368 int addr_len, int flags) 369 { 370 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; 371 struct sock *sk = sock->sk; 372 struct rxrpc_transport *trans; 373 struct rxrpc_local *local; 374 struct rxrpc_sock *rx = rxrpc_sk(sk); 375 int ret; 376 377 _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); 378 379 ret = rxrpc_validate_address(rx, srx, addr_len); 380 if (ret < 0) { 381 _leave(" = %d [bad addr]", ret); 382 return ret; 383 } 384 385 lock_sock(&rx->sk); 386 387 switch (rx->sk.sk_state) { 388 case RXRPC_UNCONNECTED: 389 /* find a local transport endpoint if we don't have one already */ 390 ASSERTCMP(rx->local, ==, NULL); 391 rx->srx.srx_family = AF_RXRPC; 392 rx->srx.srx_service = 0; 393 rx->srx.transport_type = srx->transport_type; 394 rx->srx.transport_len = sizeof(sa_family_t); 395 rx->srx.transport.family = srx->transport.family; 396 local = rxrpc_lookup_local(&rx->srx); 397 if (IS_ERR(local)) { 398 release_sock(&rx->sk); 399 return PTR_ERR(local); 400 } 401 rx->local = local; 402 rx->sk.sk_state = RXRPC_CLIENT_BOUND; 403 case RXRPC_CLIENT_BOUND: 404 break; 405 case RXRPC_CLIENT_CONNECTED: 406 release_sock(&rx->sk); 407 return -EISCONN; 408 default: 409 release_sock(&rx->sk); 410 return -EBUSY; /* server sockets can't connect as well */ 411 } 412 413 trans = rxrpc_name_to_transport(sock, addr, addr_len, flags, 414 GFP_KERNEL); 415 if (IS_ERR(trans)) { 416 release_sock(&rx->sk); 417 _leave(" = %ld", PTR_ERR(trans)); 418 return PTR_ERR(trans); 419 } 420 421 rx->trans = trans; 422 rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; 423 424 release_sock(&rx->sk); 425 return 0; 426 } 427 428 /* 429 * send a message through an RxRPC socket 430 * - in a client this does a number of things: 431 * - finds/sets up a connection for the security specified (if any) 432 * - initiates a call (ID in control data) 433 * - ends the request phase of a call (if MSG_MORE is not set) 434 * - sends a call data packet 435 * - may send an abort (abort code in control data) 436 */ 437 static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 438 { 439 struct rxrpc_transport *trans; 440 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 441 int ret; 442 443 _enter(",{%d},,%zu", rx->sk.sk_state, len); 444 445 if (m->msg_flags & MSG_OOB) 446 return -EOPNOTSUPP; 447 448 if (m->msg_name) { 449 ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); 450 if (ret < 0) { 451 _leave(" = %d [bad addr]", ret); 452 return ret; 453 } 454 } 455 456 trans = NULL; 457 lock_sock(&rx->sk); 458 459 if (m->msg_name) { 460 ret = -EISCONN; 461 trans = rxrpc_name_to_transport(sock, m->msg_name, 462 m->msg_namelen, 0, GFP_KERNEL); 463 if (IS_ERR(trans)) { 464 ret = PTR_ERR(trans); 465 trans = NULL; 466 goto out; 467 } 468 } else { 469 trans = rx->trans; 470 if (trans) 471 atomic_inc(&trans->usage); 472 } 473 474 switch (rx->sk.sk_state) { 475 case RXRPC_SERVER_LISTENING: 476 if (!m->msg_name) { 477 ret = rxrpc_server_sendmsg(rx, m, len); 478 break; 479 } 480 case RXRPC_SERVER_BOUND: 481 case RXRPC_CLIENT_BOUND: 482 if (!m->msg_name) { 483 ret = -ENOTCONN; 484 break; 485 } 486 case RXRPC_CLIENT_CONNECTED: 487 ret = rxrpc_client_sendmsg(rx, trans, m, len); 488 break; 489 default: 490 ret = -ENOTCONN; 491 break; 492 } 493 494 out: 495 release_sock(&rx->sk); 496 if (trans) 497 rxrpc_put_transport(trans); 498 _leave(" = %d", ret); 499 return ret; 500 } 501 502 /* 503 * set RxRPC socket options 504 */ 505 static int rxrpc_setsockopt(struct socket *sock, int level, int optname, 506 char __user *optval, unsigned int optlen) 507 { 508 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 509 unsigned int min_sec_level; 510 int ret; 511 512 _enter(",%d,%d,,%d", level, optname, optlen); 513 514 lock_sock(&rx->sk); 515 ret = -EOPNOTSUPP; 516 517 if (level == SOL_RXRPC) { 518 switch (optname) { 519 case RXRPC_EXCLUSIVE_CONNECTION: 520 ret = -EINVAL; 521 if (optlen != 0) 522 goto error; 523 ret = -EISCONN; 524 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 525 goto error; 526 set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); 527 goto success; 528 529 case RXRPC_SECURITY_KEY: 530 ret = -EINVAL; 531 if (rx->key) 532 goto error; 533 ret = -EISCONN; 534 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 535 goto error; 536 ret = rxrpc_request_key(rx, optval, optlen); 537 goto error; 538 539 case RXRPC_SECURITY_KEYRING: 540 ret = -EINVAL; 541 if (rx->key) 542 goto error; 543 ret = -EISCONN; 544 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 545 goto error; 546 ret = rxrpc_server_keyring(rx, optval, optlen); 547 goto error; 548 549 case RXRPC_MIN_SECURITY_LEVEL: 550 ret = -EINVAL; 551 if (optlen != sizeof(unsigned int)) 552 goto error; 553 ret = -EISCONN; 554 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 555 goto error; 556 ret = get_user(min_sec_level, 557 (unsigned int __user *) optval); 558 if (ret < 0) 559 goto error; 560 ret = -EINVAL; 561 if (min_sec_level > RXRPC_SECURITY_MAX) 562 goto error; 563 rx->min_sec_level = min_sec_level; 564 goto success; 565 566 default: 567 break; 568 } 569 } 570 571 success: 572 ret = 0; 573 error: 574 release_sock(&rx->sk); 575 return ret; 576 } 577 578 /* 579 * permit an RxRPC socket to be polled 580 */ 581 static unsigned int rxrpc_poll(struct file *file, struct socket *sock, 582 poll_table *wait) 583 { 584 unsigned int mask; 585 struct sock *sk = sock->sk; 586 587 sock_poll_wait(file, sk_sleep(sk), wait); 588 mask = 0; 589 590 /* the socket is readable if there are any messages waiting on the Rx 591 * queue */ 592 if (!skb_queue_empty(&sk->sk_receive_queue)) 593 mask |= POLLIN | POLLRDNORM; 594 595 /* the socket is writable if there is space to add new data to the 596 * socket; there is no guarantee that any particular call in progress 597 * on the socket may have space in the Tx ACK window */ 598 if (rxrpc_writable(sk)) 599 mask |= POLLOUT | POLLWRNORM; 600 601 return mask; 602 } 603 604 /* 605 * create an RxRPC socket 606 */ 607 static int rxrpc_create(struct net *net, struct socket *sock, int protocol, 608 int kern) 609 { 610 struct rxrpc_sock *rx; 611 struct sock *sk; 612 613 _enter("%p,%d", sock, protocol); 614 615 if (!net_eq(net, &init_net)) 616 return -EAFNOSUPPORT; 617 618 /* we support transport protocol UDP/UDP6 only */ 619 if (protocol != PF_INET) 620 return -EPROTONOSUPPORT; 621 622 if (sock->type != SOCK_DGRAM) 623 return -ESOCKTNOSUPPORT; 624 625 sock->ops = &rxrpc_rpc_ops; 626 sock->state = SS_UNCONNECTED; 627 628 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern); 629 if (!sk) 630 return -ENOMEM; 631 632 sock_init_data(sock, sk); 633 sk->sk_state = RXRPC_UNCONNECTED; 634 sk->sk_write_space = rxrpc_write_space; 635 sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; 636 sk->sk_destruct = rxrpc_sock_destructor; 637 638 rx = rxrpc_sk(sk); 639 rx->proto = protocol; 640 rx->calls = RB_ROOT; 641 642 INIT_LIST_HEAD(&rx->listen_link); 643 INIT_LIST_HEAD(&rx->secureq); 644 INIT_LIST_HEAD(&rx->acceptq); 645 rwlock_init(&rx->call_lock); 646 memset(&rx->srx, 0, sizeof(rx->srx)); 647 648 _leave(" = 0 [%p]", rx); 649 return 0; 650 } 651 652 /* 653 * RxRPC socket destructor 654 */ 655 static void rxrpc_sock_destructor(struct sock *sk) 656 { 657 _enter("%p", sk); 658 659 rxrpc_purge_queue(&sk->sk_receive_queue); 660 661 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 662 WARN_ON(!sk_unhashed(sk)); 663 WARN_ON(sk->sk_socket); 664 665 if (!sock_flag(sk, SOCK_DEAD)) { 666 printk("Attempt to release alive rxrpc socket: %p\n", sk); 667 return; 668 } 669 } 670 671 /* 672 * release an RxRPC socket 673 */ 674 static int rxrpc_release_sock(struct sock *sk) 675 { 676 struct rxrpc_sock *rx = rxrpc_sk(sk); 677 678 _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); 679 680 /* declare the socket closed for business */ 681 sock_orphan(sk); 682 sk->sk_shutdown = SHUTDOWN_MASK; 683 684 spin_lock_bh(&sk->sk_receive_queue.lock); 685 sk->sk_state = RXRPC_CLOSE; 686 spin_unlock_bh(&sk->sk_receive_queue.lock); 687 688 ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1); 689 690 if (!list_empty(&rx->listen_link)) { 691 write_lock_bh(&rx->local->services_lock); 692 list_del(&rx->listen_link); 693 write_unlock_bh(&rx->local->services_lock); 694 } 695 696 /* try to flush out this socket */ 697 rxrpc_release_calls_on_socket(rx); 698 flush_workqueue(rxrpc_workqueue); 699 rxrpc_purge_queue(&sk->sk_receive_queue); 700 701 if (rx->conn) { 702 rxrpc_put_connection(rx->conn); 703 rx->conn = NULL; 704 } 705 706 if (rx->bundle) { 707 rxrpc_put_bundle(rx->trans, rx->bundle); 708 rx->bundle = NULL; 709 } 710 if (rx->trans) { 711 rxrpc_put_transport(rx->trans); 712 rx->trans = NULL; 713 } 714 if (rx->local) { 715 rxrpc_put_local(rx->local); 716 rx->local = NULL; 717 } 718 719 key_put(rx->key); 720 rx->key = NULL; 721 key_put(rx->securities); 722 rx->securities = NULL; 723 sock_put(sk); 724 725 _leave(" = 0"); 726 return 0; 727 } 728 729 /* 730 * release an RxRPC BSD socket on close() or equivalent 731 */ 732 static int rxrpc_release(struct socket *sock) 733 { 734 struct sock *sk = sock->sk; 735 736 _enter("%p{%p}", sock, sk); 737 738 if (!sk) 739 return 0; 740 741 sock->sk = NULL; 742 743 return rxrpc_release_sock(sk); 744 } 745 746 /* 747 * RxRPC network protocol 748 */ 749 static const struct proto_ops rxrpc_rpc_ops = { 750 .family = PF_RXRPC, 751 .owner = THIS_MODULE, 752 .release = rxrpc_release, 753 .bind = rxrpc_bind, 754 .connect = rxrpc_connect, 755 .socketpair = sock_no_socketpair, 756 .accept = sock_no_accept, 757 .getname = sock_no_getname, 758 .poll = rxrpc_poll, 759 .ioctl = sock_no_ioctl, 760 .listen = rxrpc_listen, 761 .shutdown = sock_no_shutdown, 762 .setsockopt = rxrpc_setsockopt, 763 .getsockopt = sock_no_getsockopt, 764 .sendmsg = rxrpc_sendmsg, 765 .recvmsg = rxrpc_recvmsg, 766 .mmap = sock_no_mmap, 767 .sendpage = sock_no_sendpage, 768 }; 769 770 static struct proto rxrpc_proto = { 771 .name = "RXRPC", 772 .owner = THIS_MODULE, 773 .obj_size = sizeof(struct rxrpc_sock), 774 .max_header = sizeof(struct rxrpc_wire_header), 775 }; 776 777 static const struct net_proto_family rxrpc_family_ops = { 778 .family = PF_RXRPC, 779 .create = rxrpc_create, 780 .owner = THIS_MODULE, 781 }; 782 783 /* 784 * initialise and register the RxRPC protocol 785 */ 786 static int __init af_rxrpc_init(void) 787 { 788 int ret = -1; 789 790 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); 791 792 rxrpc_epoch = get_seconds(); 793 794 ret = -ENOMEM; 795 rxrpc_call_jar = kmem_cache_create( 796 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 797 SLAB_HWCACHE_ALIGN, NULL); 798 if (!rxrpc_call_jar) { 799 printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); 800 goto error_call_jar; 801 } 802 803 rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1); 804 if (!rxrpc_workqueue) { 805 printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); 806 goto error_work_queue; 807 } 808 809 ret = rxrpc_init_security(); 810 if (ret < 0) { 811 printk(KERN_CRIT "RxRPC: Cannot initialise security\n"); 812 goto error_security; 813 } 814 815 ret = proto_register(&rxrpc_proto, 1); 816 if (ret < 0) { 817 printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); 818 goto error_proto; 819 } 820 821 ret = sock_register(&rxrpc_family_ops); 822 if (ret < 0) { 823 printk(KERN_CRIT "RxRPC: Cannot register socket family\n"); 824 goto error_sock; 825 } 826 827 ret = register_key_type(&key_type_rxrpc); 828 if (ret < 0) { 829 printk(KERN_CRIT "RxRPC: Cannot register client key type\n"); 830 goto error_key_type; 831 } 832 833 ret = register_key_type(&key_type_rxrpc_s); 834 if (ret < 0) { 835 printk(KERN_CRIT "RxRPC: Cannot register server key type\n"); 836 goto error_key_type_s; 837 } 838 839 ret = rxrpc_sysctl_init(); 840 if (ret < 0) { 841 printk(KERN_CRIT "RxRPC: Cannot register sysctls\n"); 842 goto error_sysctls; 843 } 844 845 #ifdef CONFIG_PROC_FS 846 proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops); 847 proc_create("rxrpc_conns", 0, init_net.proc_net, 848 &rxrpc_connection_seq_fops); 849 #endif 850 return 0; 851 852 error_sysctls: 853 unregister_key_type(&key_type_rxrpc_s); 854 error_key_type_s: 855 unregister_key_type(&key_type_rxrpc); 856 error_key_type: 857 sock_unregister(PF_RXRPC); 858 error_sock: 859 proto_unregister(&rxrpc_proto); 860 error_proto: 861 destroy_workqueue(rxrpc_workqueue); 862 error_security: 863 rxrpc_exit_security(); 864 error_work_queue: 865 kmem_cache_destroy(rxrpc_call_jar); 866 error_call_jar: 867 return ret; 868 } 869 870 /* 871 * unregister the RxRPC protocol 872 */ 873 static void __exit af_rxrpc_exit(void) 874 { 875 _enter(""); 876 rxrpc_sysctl_exit(); 877 unregister_key_type(&key_type_rxrpc_s); 878 unregister_key_type(&key_type_rxrpc); 879 sock_unregister(PF_RXRPC); 880 proto_unregister(&rxrpc_proto); 881 rxrpc_destroy_all_calls(); 882 rxrpc_destroy_all_connections(); 883 rxrpc_destroy_all_transports(); 884 rxrpc_destroy_all_peers(); 885 rxrpc_destroy_all_locals(); 886 887 ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); 888 889 _debug("flush scheduled work"); 890 flush_workqueue(rxrpc_workqueue); 891 remove_proc_entry("rxrpc_conns", init_net.proc_net); 892 remove_proc_entry("rxrpc_calls", init_net.proc_net); 893 destroy_workqueue(rxrpc_workqueue); 894 rxrpc_exit_security(); 895 kmem_cache_destroy(rxrpc_call_jar); 896 _leave(""); 897 } 898 899 module_init(af_rxrpc_init); 900 module_exit(af_rxrpc_exit); 901