1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* AF_RXRPC internal definitions 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/atomic.h> 9 #include <linux/seqlock.h> 10 #include <linux/win_minmax.h> 11 #include <net/net_namespace.h> 12 #include <net/netns/generic.h> 13 #include <net/sock.h> 14 #include <net/af_rxrpc.h> 15 #include <keys/rxrpc-type.h> 16 #include "protocol.h" 17 18 #define FCRYPT_BSIZE 8 19 struct rxrpc_crypt { 20 union { 21 u8 x[FCRYPT_BSIZE]; 22 __be32 n[2]; 23 }; 24 } __attribute__((aligned(8))); 25 26 #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS)) 27 #define rxrpc_queue_delayed_work(WS,D) \ 28 queue_delayed_work(rxrpc_workqueue, (WS), (D)) 29 30 struct key_preparsed_payload; 31 struct rxrpc_connection; 32 struct rxrpc_txbuf; 33 34 /* 35 * Mark applied to socket buffers in skb->mark. skb->priority is used 36 * to pass supplementary information. 37 */ 38 enum rxrpc_skb_mark { 39 RXRPC_SKB_MARK_PACKET, /* Received packet */ 40 RXRPC_SKB_MARK_ERROR, /* Error notification */ 41 RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */ 42 RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */ 43 }; 44 45 /* 46 * sk_state for RxRPC sockets 47 */ 48 enum { 49 RXRPC_UNBOUND = 0, 50 RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */ 51 RXRPC_CLIENT_BOUND, /* client local address bound */ 52 RXRPC_SERVER_BOUND, /* server local address bound */ 53 RXRPC_SERVER_BOUND2, /* second server local address bound */ 54 RXRPC_SERVER_LISTENING, /* server listening for connections */ 55 RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */ 56 RXRPC_CLOSE, /* socket is being closed */ 57 }; 58 59 /* 60 * Per-network namespace data. 61 */ 62 struct rxrpc_net { 63 struct proc_dir_entry *proc_net; /* Subdir in /proc/net */ 64 u32 epoch; /* Local epoch for detecting local-end reset */ 65 struct list_head calls; /* List of calls active in this namespace */ 66 spinlock_t call_lock; /* Lock for ->calls */ 67 atomic_t nr_calls; /* Count of allocated calls */ 68 69 atomic_t nr_conns; 70 struct list_head conn_proc_list; /* List of conns in this namespace for proc */ 71 struct list_head service_conns; /* Service conns in this namespace */ 72 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ 73 struct work_struct service_conn_reaper; 74 struct timer_list service_conn_reap_timer; 75 76 bool live; 77 78 bool kill_all_client_conns; 79 atomic_t nr_client_conns; 80 spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ 81 struct mutex client_conn_discard_lock; /* Prevent multiple discarders */ 82 struct list_head idle_client_conns; 83 struct work_struct client_conn_reaper; 84 struct timer_list client_conn_reap_timer; 85 86 struct hlist_head local_endpoints; 87 struct mutex local_mutex; /* Lock for ->local_endpoints */ 88 89 DECLARE_HASHTABLE (peer_hash, 10); 90 spinlock_t peer_hash_lock; /* Lock for ->peer_hash */ 91 92 #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */ 93 u8 peer_keepalive_cursor; 94 time64_t peer_keepalive_base; 95 struct list_head peer_keepalive[32]; 96 struct list_head peer_keepalive_new; 97 struct timer_list peer_keepalive_timer; 98 struct work_struct peer_keepalive_work; 99 100 atomic_t stat_tx_data; 101 atomic_t stat_tx_data_retrans; 102 atomic_t stat_tx_data_send; 103 atomic_t stat_tx_data_send_frag; 104 atomic_t stat_tx_data_send_fail; 105 atomic_t stat_tx_data_underflow; 106 atomic_t stat_tx_data_cwnd_reset; 107 atomic_t stat_rx_data; 108 atomic_t stat_rx_data_reqack; 109 atomic_t stat_rx_data_jumbo; 110 111 atomic_t stat_tx_ack_fill; 112 atomic_t stat_tx_ack_send; 113 atomic_t stat_tx_ack_skip; 114 atomic_t stat_tx_acks[256]; 115 atomic_t stat_rx_acks[256]; 116 117 atomic_t stat_why_req_ack[8]; 118 119 atomic_t stat_io_loop; 120 }; 121 122 /* 123 * Service backlog preallocation. 124 * 125 * This contains circular buffers of preallocated peers, connections and calls 126 * for incoming service calls and their head and tail pointers. This allows 127 * calls to be set up in the data_ready handler, thereby avoiding the need to 128 * shuffle packets around so much. 129 */ 130 struct rxrpc_backlog { 131 unsigned short peer_backlog_head; 132 unsigned short peer_backlog_tail; 133 unsigned short conn_backlog_head; 134 unsigned short conn_backlog_tail; 135 unsigned short call_backlog_head; 136 unsigned short call_backlog_tail; 137 #define RXRPC_BACKLOG_MAX 32 138 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX]; 139 struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX]; 140 struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX]; 141 }; 142 143 /* 144 * RxRPC socket definition 145 */ 146 struct rxrpc_sock { 147 /* WARNING: sk has to be the first member */ 148 struct sock sk; 149 rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */ 150 rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */ 151 struct rxrpc_local *local; /* local endpoint */ 152 struct rxrpc_backlog *backlog; /* Preallocation for services */ 153 spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */ 154 struct list_head sock_calls; /* List of calls owned by this socket */ 155 struct list_head to_be_accepted; /* calls awaiting acceptance */ 156 struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */ 157 rwlock_t recvmsg_lock; /* Lock for recvmsg_q */ 158 struct key *key; /* security for this socket */ 159 struct key *securities; /* list of server security descriptors */ 160 struct rb_root calls; /* User ID -> call mapping */ 161 unsigned long flags; 162 #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */ 163 rwlock_t call_lock; /* lock for calls */ 164 u32 min_sec_level; /* minimum security level */ 165 #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT 166 bool exclusive; /* Exclusive connection for a client socket */ 167 u16 second_service; /* Additional service bound to the endpoint */ 168 struct { 169 /* Service upgrade information */ 170 u16 from; /* Service ID to upgrade (if not 0) */ 171 u16 to; /* service ID to upgrade to */ 172 } service_upgrade; 173 sa_family_t family; /* Protocol family created with */ 174 struct sockaddr_rxrpc srx; /* Primary Service/local addresses */ 175 struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */ 176 }; 177 178 #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) 179 180 /* 181 * CPU-byteorder normalised Rx packet header. 182 */ 183 struct rxrpc_host_header { 184 u32 epoch; /* client boot timestamp */ 185 u32 cid; /* connection and channel ID */ 186 u32 callNumber; /* call ID (0 for connection-level packets) */ 187 u32 seq; /* sequence number of pkt in call stream */ 188 u32 serial; /* serial number of pkt sent to network */ 189 u8 type; /* packet type */ 190 u8 flags; /* packet flags */ 191 u8 userStatus; /* app-layer defined status */ 192 u8 securityIndex; /* security protocol ID */ 193 union { 194 u16 _rsvd; /* reserved */ 195 u16 cksum; /* kerberos security checksum */ 196 }; 197 u16 serviceId; /* service ID */ 198 } __packed; 199 200 /* 201 * RxRPC socket buffer private variables 202 * - max 48 bytes (struct sk_buff::cb) 203 */ 204 struct rxrpc_skb_priv { 205 u16 offset; /* Offset of data */ 206 u16 len; /* Length of data */ 207 u8 flags; 208 #define RXRPC_RX_VERIFIED 0x01 209 210 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ 211 }; 212 213 #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb) 214 215 /* 216 * RxRPC security module interface 217 */ 218 struct rxrpc_security { 219 const char *name; /* name of this service */ 220 u8 security_index; /* security type provided */ 221 u32 no_key_abort; /* Abort code indicating no key */ 222 223 /* Initialise a security service */ 224 int (*init)(void); 225 226 /* Clean up a security service */ 227 void (*exit)(void); 228 229 /* Parse the information from a server key */ 230 int (*preparse_server_key)(struct key_preparsed_payload *); 231 232 /* Clean up the preparse buffer after parsing a server key */ 233 void (*free_preparse_server_key)(struct key_preparsed_payload *); 234 235 /* Destroy the payload of a server key */ 236 void (*destroy_server_key)(struct key *); 237 238 /* Describe a server key */ 239 void (*describe_server_key)(const struct key *, struct seq_file *); 240 241 /* initialise a connection's security */ 242 int (*init_connection_security)(struct rxrpc_connection *, 243 struct rxrpc_key_token *); 244 245 /* Work out how much data we can store in a packet, given an estimate 246 * of the amount of data remaining. 247 */ 248 int (*how_much_data)(struct rxrpc_call *, size_t, 249 size_t *, size_t *, size_t *); 250 251 /* impose security on a packet */ 252 int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *); 253 254 /* verify the security on a received packet */ 255 int (*verify_packet)(struct rxrpc_call *, struct sk_buff *); 256 257 /* Free crypto request on a call */ 258 void (*free_call_crypto)(struct rxrpc_call *); 259 260 /* issue a challenge */ 261 int (*issue_challenge)(struct rxrpc_connection *); 262 263 /* respond to a challenge */ 264 int (*respond_to_challenge)(struct rxrpc_connection *, 265 struct sk_buff *, 266 u32 *); 267 268 /* verify a response */ 269 int (*verify_response)(struct rxrpc_connection *, 270 struct sk_buff *, 271 u32 *); 272 273 /* clear connection security */ 274 void (*clear)(struct rxrpc_connection *); 275 }; 276 277 /* 278 * RxRPC local transport endpoint description 279 * - owned by a single AF_RXRPC socket 280 * - pointed to by transport socket struct sk_user_data 281 */ 282 struct rxrpc_local { 283 struct rcu_head rcu; 284 atomic_t active_users; /* Number of users of the local endpoint */ 285 refcount_t ref; /* Number of references to the structure */ 286 struct rxrpc_net *rxnet; /* The network ns in which this resides */ 287 struct hlist_node link; 288 struct socket *socket; /* my UDP socket */ 289 struct task_struct *io_thread; 290 struct completion io_thread_ready; /* Indication that the I/O thread started */ 291 struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */ 292 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ 293 struct sk_buff_head rx_queue; /* Received packets */ 294 struct list_head call_attend_q; /* Calls requiring immediate attention */ 295 struct rb_root client_bundles; /* Client connection bundles by socket params */ 296 spinlock_t client_bundles_lock; /* Lock for client_bundles */ 297 spinlock_t lock; /* access lock */ 298 rwlock_t services_lock; /* lock for services list */ 299 int debug_id; /* debug ID for printks */ 300 bool dead; 301 bool service_closed; /* Service socket closed */ 302 struct sockaddr_rxrpc srx; /* local address */ 303 }; 304 305 /* 306 * RxRPC remote transport endpoint definition 307 * - matched by local endpoint, remote port, address and protocol type 308 */ 309 struct rxrpc_peer { 310 struct rcu_head rcu; /* This must be first */ 311 refcount_t ref; 312 unsigned long hash_key; 313 struct hlist_node hash_link; 314 struct rxrpc_local *local; 315 struct hlist_head error_targets; /* targets for net error distribution */ 316 struct rb_root service_conns; /* Service connections */ 317 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */ 318 time64_t last_tx_at; /* Last time packet sent here */ 319 seqlock_t service_conn_lock; 320 spinlock_t lock; /* access lock */ 321 unsigned int if_mtu; /* interface MTU for this peer */ 322 unsigned int mtu; /* network MTU for this peer */ 323 unsigned int maxdata; /* data size (MTU - hdrsize) */ 324 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 325 int debug_id; /* debug ID for printks */ 326 struct sockaddr_rxrpc srx; /* remote address */ 327 328 /* calculated RTT cache */ 329 #define RXRPC_RTT_CACHE_SIZE 32 330 spinlock_t rtt_input_lock; /* RTT lock for input routine */ 331 ktime_t rtt_last_req; /* Time of last RTT request */ 332 unsigned int rtt_count; /* Number of samples we've got */ 333 334 u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 335 u32 mdev_us; /* medium deviation */ 336 u32 mdev_max_us; /* maximal mdev for the last rtt period */ 337 u32 rttvar_us; /* smoothed mdev_max */ 338 u32 rto_j; /* Retransmission timeout in jiffies */ 339 u8 backoff; /* Backoff timeout */ 340 341 u8 cong_ssthresh; /* Congestion slow-start threshold */ 342 }; 343 344 /* 345 * Keys for matching a connection. 346 */ 347 struct rxrpc_conn_proto { 348 union { 349 struct { 350 u32 epoch; /* epoch of this connection */ 351 u32 cid; /* connection ID */ 352 }; 353 u64 index_key; 354 }; 355 }; 356 357 struct rxrpc_conn_parameters { 358 struct rxrpc_local *local; /* Representation of local endpoint */ 359 struct rxrpc_peer *peer; /* Remote endpoint */ 360 struct key *key; /* Security details */ 361 bool exclusive; /* T if conn is exclusive */ 362 bool upgrade; /* T if service ID can be upgraded */ 363 u16 service_id; /* Service ID for this connection */ 364 u32 security_level; /* Security level selected */ 365 }; 366 367 /* 368 * Bits in the connection flags. 369 */ 370 enum rxrpc_conn_flag { 371 RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */ 372 RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */ 373 RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ 374 RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ 375 RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */ 376 RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */ 377 RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */ 378 RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */ 379 }; 380 381 #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \ 382 (1UL << RXRPC_CONN_FINAL_ACK_1) | \ 383 (1UL << RXRPC_CONN_FINAL_ACK_2) | \ 384 (1UL << RXRPC_CONN_FINAL_ACK_3)) 385 386 /* 387 * Events that can be raised upon a connection. 388 */ 389 enum rxrpc_conn_event { 390 RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */ 391 }; 392 393 /* 394 * The connection protocol state. 395 */ 396 enum rxrpc_conn_proto_state { 397 RXRPC_CONN_UNUSED, /* Connection not yet attempted */ 398 RXRPC_CONN_CLIENT, /* Client connection */ 399 RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */ 400 RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */ 401 RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */ 402 RXRPC_CONN_SERVICE, /* Service secured connection */ 403 RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */ 404 RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */ 405 RXRPC_CONN__NR_STATES 406 }; 407 408 /* 409 * RxRPC client connection bundle. 410 */ 411 struct rxrpc_bundle { 412 struct rxrpc_local *local; /* Representation of local endpoint */ 413 struct rxrpc_peer *peer; /* Remote endpoint */ 414 struct key *key; /* Security details */ 415 refcount_t ref; 416 atomic_t active; /* Number of active users */ 417 unsigned int debug_id; 418 u32 security_level; /* Security level selected */ 419 u16 service_id; /* Service ID for this connection */ 420 bool try_upgrade; /* True if the bundle is attempting upgrade */ 421 bool alloc_conn; /* True if someone's getting a conn */ 422 bool exclusive; /* T if conn is exclusive */ 423 bool upgrade; /* T if service ID can be upgraded */ 424 short alloc_error; /* Error from last conn allocation */ 425 spinlock_t channel_lock; 426 struct rb_node local_node; /* Node in local->client_conns */ 427 struct list_head waiting_calls; /* Calls waiting for channels */ 428 unsigned long avail_chans; /* Mask of available channels */ 429 struct rxrpc_connection *conns[4]; /* The connections in the bundle (max 4) */ 430 }; 431 432 /* 433 * RxRPC connection definition 434 * - matched by { local, peer, epoch, conn_id, direction } 435 * - each connection can only handle four simultaneous calls 436 */ 437 struct rxrpc_connection { 438 struct rxrpc_conn_proto proto; 439 struct rxrpc_local *local; /* Representation of local endpoint */ 440 struct rxrpc_peer *peer; /* Remote endpoint */ 441 struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ 442 struct key *key; /* Security details */ 443 444 refcount_t ref; 445 atomic_t active; /* Active count for service conns */ 446 struct rcu_head rcu; 447 struct list_head cache_link; 448 449 unsigned char act_chans; /* Mask of active channels */ 450 struct rxrpc_channel { 451 unsigned long final_ack_at; /* Time at which to issue final ACK */ 452 struct rxrpc_call __rcu *call; /* Active call */ 453 unsigned int call_debug_id; /* call->debug_id */ 454 u32 call_id; /* ID of current call */ 455 u32 call_counter; /* Call ID counter */ 456 u32 last_call; /* ID of last call */ 457 u8 last_type; /* Type of last packet */ 458 union { 459 u32 last_seq; 460 u32 last_abort; 461 }; 462 } channels[RXRPC_MAXCALLS]; 463 464 struct timer_list timer; /* Conn event timer */ 465 struct work_struct processor; /* connection event processor */ 466 struct work_struct destructor; /* In-process-context destroyer */ 467 struct rxrpc_bundle *bundle; /* Client connection bundle */ 468 struct rb_node service_node; /* Node in peer->service_conns */ 469 struct list_head proc_link; /* link in procfs list */ 470 struct list_head link; /* link in master connection list */ 471 struct sk_buff_head rx_queue; /* received conn-level packets */ 472 473 const struct rxrpc_security *security; /* applied security module */ 474 union { 475 struct { 476 struct crypto_sync_skcipher *cipher; /* encryption handle */ 477 struct rxrpc_crypt csum_iv; /* packet checksum base */ 478 u32 nonce; /* response re-use preventer */ 479 } rxkad; 480 }; 481 unsigned long flags; 482 unsigned long events; 483 unsigned long idle_timestamp; /* Time at which last became idle */ 484 spinlock_t state_lock; /* state-change lock */ 485 enum rxrpc_conn_proto_state state; /* current state of connection */ 486 u32 abort_code; /* Abort code of connection abort */ 487 int debug_id; /* debug ID for printks */ 488 atomic_t serial; /* packet serial number counter */ 489 unsigned int hi_serial; /* highest serial number received */ 490 u32 service_id; /* Service ID, possibly upgraded */ 491 u32 security_level; /* Security level selected */ 492 u8 security_ix; /* security type */ 493 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ 494 u8 bundle_shift; /* Index into bundle->avail_chans */ 495 bool exclusive; /* T if conn is exclusive */ 496 bool upgrade; /* T if service ID can be upgraded */ 497 u16 orig_service_id; /* Originally requested service ID */ 498 short error; /* Local error code */ 499 }; 500 501 static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) 502 { 503 return sp->hdr.flags & RXRPC_CLIENT_INITIATED; 504 } 505 506 static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp) 507 { 508 return !rxrpc_to_server(sp); 509 } 510 511 /* 512 * Flags in call->flags. 513 */ 514 enum rxrpc_call_flag { 515 RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */ 516 RXRPC_CALL_HAS_USERID, /* has a user ID attached */ 517 RXRPC_CALL_IS_SERVICE, /* Call is service call */ 518 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ 519 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ 520 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ 521 RXRPC_CALL_TX_ALL_ACKED, /* Last packet has been hard-acked */ 522 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ 523 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ 524 RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ 525 RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ 526 RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */ 527 RXRPC_CALL_KERNEL, /* The call was made by the kernel */ 528 RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */ 529 RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */ 530 RXRPC_CALL_RX_IS_IDLE, /* Reception is idle - send an ACK */ 531 }; 532 533 /* 534 * Events that can be raised on a call. 535 */ 536 enum rxrpc_call_event { 537 RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */ 538 RXRPC_CALL_EV_INITIAL_PING, /* Send initial ping for a new service call */ 539 }; 540 541 /* 542 * The states that a call can be in. 543 */ 544 enum rxrpc_call_state { 545 RXRPC_CALL_UNINITIALISED, 546 RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */ 547 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ 548 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */ 549 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ 550 RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */ 551 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ 552 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ 553 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ 554 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ 555 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */ 556 RXRPC_CALL_COMPLETE, /* - call complete */ 557 NR__RXRPC_CALL_STATES 558 }; 559 560 /* 561 * Call completion condition (state == RXRPC_CALL_COMPLETE). 562 */ 563 enum rxrpc_call_completion { 564 RXRPC_CALL_SUCCEEDED, /* - Normal termination */ 565 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ 566 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ 567 RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ 568 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ 569 NR__RXRPC_CALL_COMPLETIONS 570 }; 571 572 /* 573 * Call Tx congestion management modes. 574 */ 575 enum rxrpc_congest_mode { 576 RXRPC_CALL_SLOW_START, 577 RXRPC_CALL_CONGEST_AVOIDANCE, 578 RXRPC_CALL_PACKET_LOSS, 579 RXRPC_CALL_FAST_RETRANSMIT, 580 NR__RXRPC_CONGEST_MODES 581 }; 582 583 /* 584 * RxRPC call definition 585 * - matched by { connection, call_id } 586 */ 587 struct rxrpc_call { 588 struct rcu_head rcu; 589 struct rxrpc_connection *conn; /* connection carrying call */ 590 struct rxrpc_peer *peer; /* Peer record for remote address */ 591 struct rxrpc_local *local; /* Representation of local endpoint */ 592 struct rxrpc_sock __rcu *socket; /* socket responsible */ 593 struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ 594 struct key *key; /* Security details */ 595 const struct rxrpc_security *security; /* applied security module */ 596 struct mutex user_mutex; /* User access mutex */ 597 struct sockaddr_rxrpc dest_srx; /* Destination address */ 598 unsigned long delay_ack_at; /* When DELAY ACK needs to happen */ 599 unsigned long ack_lost_at; /* When ACK is figured as lost */ 600 unsigned long resend_at; /* When next resend needs to happen */ 601 unsigned long ping_at; /* When next to send a ping */ 602 unsigned long keepalive_at; /* When next to send a keepalive ping */ 603 unsigned long expect_rx_by; /* When we expect to get a packet by */ 604 unsigned long expect_req_by; /* When we expect to get a request DATA packet by */ 605 unsigned long expect_term_by; /* When we expect call termination by */ 606 u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ 607 u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ 608 struct timer_list timer; /* Combined event timer */ 609 struct work_struct destroyer; /* In-process-context destroyer */ 610 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ 611 struct list_head link; /* link in master call list */ 612 struct list_head chan_wait_link; /* Link in conn->bundle->waiting_calls */ 613 struct hlist_node error_link; /* link in error distribution list */ 614 struct list_head accept_link; /* Link in rx->acceptq */ 615 struct list_head recvmsg_link; /* Link in rx->recvmsg_q */ 616 struct list_head sock_link; /* Link in rx->sock_calls */ 617 struct rb_node sock_node; /* Node in rx->calls */ 618 struct list_head attend_link; /* Link in local->call_attend_q */ 619 struct rxrpc_txbuf *tx_pending; /* Tx buffer being filled */ 620 wait_queue_head_t waitq; /* Wait queue for channel or Tx */ 621 s64 tx_total_len; /* Total length left to be transmitted (or -1) */ 622 unsigned long user_call_ID; /* user-defined call ID */ 623 unsigned long flags; 624 unsigned long events; 625 spinlock_t notify_lock; /* Kernel notification lock */ 626 rwlock_t state_lock; /* lock for state transition */ 627 u32 abort_code; /* Local/remote abort code */ 628 int error; /* Local error incurred */ 629 enum rxrpc_call_state state; /* current state of call */ 630 enum rxrpc_call_completion completion; /* Call completion condition */ 631 refcount_t ref; 632 u8 security_ix; /* Security type */ 633 enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ 634 u32 call_id; /* call ID on connection */ 635 u32 cid; /* connection ID plus channel index */ 636 u32 security_level; /* Security level selected */ 637 int debug_id; /* debug ID for printks */ 638 unsigned short rx_pkt_offset; /* Current recvmsg packet offset */ 639 unsigned short rx_pkt_len; /* Current recvmsg packet len */ 640 641 /* Transmitted data tracking. */ 642 spinlock_t tx_lock; /* Transmit queue lock */ 643 struct list_head tx_sendmsg; /* Sendmsg prepared packets */ 644 struct list_head tx_buffer; /* Buffer of transmissible packets */ 645 rxrpc_seq_t tx_bottom; /* First packet in buffer */ 646 rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */ 647 rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */ 648 rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */ 649 u16 tx_backoff; /* Delay to insert due to Tx failure */ 650 u8 tx_winsize; /* Maximum size of Tx window */ 651 #define RXRPC_TX_MAX_WINDOW 128 652 ktime_t tx_last_sent; /* Last time a transmission occurred */ 653 654 /* Received data tracking */ 655 struct sk_buff_head recvmsg_queue; /* Queue of packets ready for recvmsg() */ 656 struct sk_buff_head rx_oos_queue; /* Queue of out of sequence packets */ 657 658 rxrpc_seq_t rx_highest_seq; /* Higest sequence number received */ 659 rxrpc_seq_t rx_consumed; /* Highest packet consumed */ 660 rxrpc_serial_t rx_serial; /* Highest serial received for this call */ 661 u8 rx_winsize; /* Size of Rx window */ 662 663 /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS 664 * is fixed, we keep these numbers in terms of segments (ie. DATA 665 * packets) rather than bytes. 666 */ 667 #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN 668 #define RXRPC_MIN_CWND (RXRPC_TX_SMSS > 2190 ? 2 : RXRPC_TX_SMSS > 1095 ? 3 : 4) 669 u8 cong_cwnd; /* Congestion window size */ 670 u8 cong_extra; /* Extra to send for congestion management */ 671 u8 cong_ssthresh; /* Slow-start threshold */ 672 enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */ 673 u8 cong_dup_acks; /* Count of ACKs showing missing packets */ 674 u8 cong_cumul_acks; /* Cumulative ACK count */ 675 ktime_t cong_tstamp; /* Last time cwnd was changed */ 676 677 /* Receive-phase ACK management (ACKs we send). */ 678 u8 ackr_reason; /* reason to ACK */ 679 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ 680 atomic64_t ackr_window; /* Base (in LSW) and top (in MSW) of SACK window */ 681 atomic_t ackr_nr_unacked; /* Number of unacked packets */ 682 atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */ 683 struct { 684 #define RXRPC_SACK_SIZE 256 685 /* SACK table for soft-acked packets */ 686 u8 ackr_sack_table[RXRPC_SACK_SIZE]; 687 } __aligned(8); 688 689 /* RTT management */ 690 rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */ 691 ktime_t rtt_sent_at[4]; /* Time packet sent */ 692 unsigned long rtt_avail; /* Mask of available slots in bits 0-3, 693 * Mask of pending samples in 8-11 */ 694 #define RXRPC_CALL_RTT_AVAIL_MASK 0xf 695 #define RXRPC_CALL_RTT_PEND_SHIFT 8 696 697 /* Transmission-phase ACK management (ACKs we've received). */ 698 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ 699 rxrpc_seq_t acks_first_seq; /* first sequence number received */ 700 rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */ 701 rxrpc_seq_t acks_hard_ack; /* Latest hard-ack point */ 702 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ 703 rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */ 704 }; 705 706 /* 707 * Summary of a new ACK and the changes it made to the Tx buffer packet states. 708 */ 709 struct rxrpc_ack_summary { 710 u16 nr_acks; /* Number of ACKs in packet */ 711 u16 nr_new_acks; /* Number of new ACKs in packet */ 712 u16 nr_rot_new_acks; /* Number of rotated new ACKs */ 713 u8 ack_reason; 714 bool saw_nacks; /* Saw NACKs in packet */ 715 bool new_low_nack; /* T if new low NACK found */ 716 bool retrans_timeo; /* T if reTx due to timeout happened */ 717 u8 flight_size; /* Number of unreceived transmissions */ 718 /* Place to stash values for tracing */ 719 enum rxrpc_congest_mode mode:8; 720 u8 cwnd; 721 u8 ssthresh; 722 u8 dup_acks; 723 u8 cumulative_acks; 724 }; 725 726 /* 727 * sendmsg() cmsg-specified parameters. 728 */ 729 enum rxrpc_command { 730 RXRPC_CMD_SEND_DATA, /* send data message */ 731 RXRPC_CMD_SEND_ABORT, /* request abort generation */ 732 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ 733 RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */ 734 }; 735 736 struct rxrpc_call_params { 737 s64 tx_total_len; /* Total Tx data length (if send data) */ 738 unsigned long user_call_ID; /* User's call ID */ 739 struct { 740 u32 hard; /* Maximum lifetime (sec) */ 741 u32 idle; /* Max time since last data packet (msec) */ 742 u32 normal; /* Max time since last call packet (msec) */ 743 } timeouts; 744 u8 nr_timeouts; /* Number of timeouts specified */ 745 bool kernel; /* T if kernel is making the call */ 746 enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */ 747 }; 748 749 struct rxrpc_send_params { 750 struct rxrpc_call_params call; 751 u32 abort_code; /* Abort code to Tx (if abort) */ 752 enum rxrpc_command command : 8; /* The command to implement */ 753 bool exclusive; /* Shared or exclusive call */ 754 bool upgrade; /* If the connection is upgradeable */ 755 }; 756 757 /* 758 * Buffer of data to be output as a packet. 759 */ 760 struct rxrpc_txbuf { 761 struct rcu_head rcu; 762 struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */ 763 struct list_head tx_link; /* Link in live Enc queue or Tx queue */ 764 ktime_t last_sent; /* Time at which last transmitted */ 765 refcount_t ref; 766 rxrpc_seq_t seq; /* Sequence number of this packet */ 767 unsigned int call_debug_id; 768 unsigned int debug_id; 769 unsigned int len; /* Amount of data in buffer */ 770 unsigned int space; /* Remaining data space */ 771 unsigned int offset; /* Offset of fill point */ 772 unsigned long flags; 773 #define RXRPC_TXBUF_LAST 0 /* Set if last packet in Tx phase */ 774 #define RXRPC_TXBUF_RESENT 1 /* Set if has been resent */ 775 u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */ 776 struct { 777 /* The packet for encrypting and DMA'ing. We align it such 778 * that data[] aligns correctly for any crypto blocksize. 779 */ 780 u8 pad[64 - sizeof(struct rxrpc_wire_header)]; 781 struct rxrpc_wire_header wire; /* Network-ready header */ 782 union { 783 u8 data[RXRPC_JUMBO_DATALEN]; /* Data packet */ 784 struct { 785 struct rxrpc_ackpacket ack; 786 u8 acks[0]; 787 }; 788 }; 789 } __aligned(64); 790 }; 791 792 static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb) 793 { 794 return txb->wire.flags & RXRPC_CLIENT_INITIATED; 795 } 796 797 static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb) 798 { 799 return !rxrpc_sending_to_server(txb); 800 } 801 802 #include <trace/events/rxrpc.h> 803 804 /* 805 * af_rxrpc.c 806 */ 807 extern atomic_t rxrpc_n_rx_skbs; 808 extern struct workqueue_struct *rxrpc_workqueue; 809 810 /* 811 * call_accept.c 812 */ 813 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); 814 void rxrpc_discard_prealloc(struct rxrpc_sock *); 815 int rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *, 816 struct rxrpc_connection *, struct sockaddr_rxrpc *, 817 struct sk_buff *); 818 void rxrpc_accept_incoming_calls(struct rxrpc_local *); 819 int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long); 820 821 /* 822 * call_event.c 823 */ 824 void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial, 825 enum rxrpc_propose_ack_trace why); 826 void rxrpc_send_ACK(struct rxrpc_call *, u8, rxrpc_serial_t, enum rxrpc_propose_ack_trace); 827 void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t, 828 enum rxrpc_propose_ack_trace); 829 void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *); 830 void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb); 831 832 void rxrpc_reduce_call_timer(struct rxrpc_call *call, 833 unsigned long expire_at, 834 unsigned long now, 835 enum rxrpc_timer_trace why); 836 837 void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb); 838 839 /* 840 * call_object.c 841 */ 842 extern const char *const rxrpc_call_states[]; 843 extern const char *const rxrpc_call_completions[]; 844 extern struct kmem_cache *rxrpc_call_jar; 845 846 void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what); 847 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); 848 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int); 849 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, 850 struct rxrpc_conn_parameters *, 851 struct sockaddr_rxrpc *, 852 struct rxrpc_call_params *, gfp_t, 853 unsigned int); 854 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, 855 struct sk_buff *); 856 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); 857 void rxrpc_release_calls_on_socket(struct rxrpc_sock *); 858 void rxrpc_see_call(struct rxrpc_call *, enum rxrpc_call_trace); 859 struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *, enum rxrpc_call_trace); 860 void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace); 861 void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace); 862 void rxrpc_cleanup_call(struct rxrpc_call *); 863 void rxrpc_destroy_all_calls(struct rxrpc_net *); 864 865 static inline bool rxrpc_is_service_call(const struct rxrpc_call *call) 866 { 867 return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags); 868 } 869 870 static inline bool rxrpc_is_client_call(const struct rxrpc_call *call) 871 { 872 return !rxrpc_is_service_call(call); 873 } 874 875 /* 876 * conn_client.c 877 */ 878 extern unsigned int rxrpc_reap_client_connections; 879 extern unsigned long rxrpc_conn_idle_client_expiry; 880 extern unsigned long rxrpc_conn_idle_client_fast_expiry; 881 extern struct idr rxrpc_client_conn_ids; 882 883 void rxrpc_destroy_client_conn_ids(void); 884 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); 885 void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); 886 int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *, 887 struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, 888 gfp_t); 889 void rxrpc_expose_client_call(struct rxrpc_call *); 890 void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *); 891 void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace); 892 void rxrpc_discard_expired_client_conns(struct work_struct *); 893 void rxrpc_destroy_all_client_connections(struct rxrpc_net *); 894 void rxrpc_clean_up_local_conns(struct rxrpc_local *); 895 896 /* 897 * conn_event.c 898 */ 899 void rxrpc_process_connection(struct work_struct *); 900 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool); 901 int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb); 902 903 /* 904 * conn_object.c 905 */ 906 extern unsigned int rxrpc_connection_expiry; 907 extern unsigned int rxrpc_closed_conn_expiry; 908 909 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t); 910 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *, 911 struct sockaddr_rxrpc *, 912 struct sk_buff *); 913 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *); 914 void rxrpc_disconnect_call(struct rxrpc_call *); 915 void rxrpc_kill_client_conn(struct rxrpc_connection *); 916 void rxrpc_queue_conn(struct rxrpc_connection *, enum rxrpc_conn_trace); 917 void rxrpc_see_connection(struct rxrpc_connection *, enum rxrpc_conn_trace); 918 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *, 919 enum rxrpc_conn_trace); 920 struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *, 921 enum rxrpc_conn_trace); 922 void rxrpc_put_connection(struct rxrpc_connection *, enum rxrpc_conn_trace); 923 void rxrpc_service_connection_reaper(struct work_struct *); 924 void rxrpc_destroy_all_connections(struct rxrpc_net *); 925 926 static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) 927 { 928 return conn->out_clientflag; 929 } 930 931 static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn) 932 { 933 return !rxrpc_conn_is_client(conn); 934 } 935 936 static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, 937 unsigned long expire_at) 938 { 939 timer_reduce(&conn->timer, expire_at); 940 } 941 942 /* 943 * conn_service.c 944 */ 945 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, 946 struct sk_buff *); 947 struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); 948 void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *, 949 const struct rxrpc_security *, struct sk_buff *); 950 void rxrpc_unpublish_service_conn(struct rxrpc_connection *); 951 952 /* 953 * input.c 954 */ 955 void rxrpc_congestion_degrade(struct rxrpc_call *); 956 void rxrpc_input_call_packet(struct rxrpc_call *, struct sk_buff *); 957 void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *); 958 959 /* 960 * io_thread.c 961 */ 962 int rxrpc_encap_rcv(struct sock *, struct sk_buff *); 963 void rxrpc_error_report(struct sock *); 964 int rxrpc_io_thread(void *data); 965 static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local) 966 { 967 wake_up_process(local->io_thread); 968 } 969 970 /* 971 * insecure.c 972 */ 973 extern const struct rxrpc_security rxrpc_no_security; 974 975 /* 976 * key.c 977 */ 978 extern struct key_type key_type_rxrpc; 979 980 int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int); 981 int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t, 982 u32); 983 984 /* 985 * local_event.c 986 */ 987 void rxrpc_send_version_request(struct rxrpc_local *local, 988 struct rxrpc_host_header *hdr, 989 struct sk_buff *skb); 990 991 /* 992 * local_object.c 993 */ 994 struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *); 995 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *, enum rxrpc_local_trace); 996 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *, enum rxrpc_local_trace); 997 void rxrpc_put_local(struct rxrpc_local *, enum rxrpc_local_trace); 998 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *, enum rxrpc_local_trace); 999 void rxrpc_unuse_local(struct rxrpc_local *, enum rxrpc_local_trace); 1000 void rxrpc_destroy_local(struct rxrpc_local *local); 1001 void rxrpc_destroy_all_locals(struct rxrpc_net *); 1002 1003 static inline bool __rxrpc_use_local(struct rxrpc_local *local, 1004 enum rxrpc_local_trace why) 1005 { 1006 int r, u; 1007 1008 r = refcount_read(&local->ref); 1009 u = atomic_fetch_add_unless(&local->active_users, 1, 0); 1010 trace_rxrpc_local(local->debug_id, why, r, u); 1011 return u != 0; 1012 } 1013 1014 static inline void rxrpc_see_local(struct rxrpc_local *local, 1015 enum rxrpc_local_trace why) 1016 { 1017 int r, u; 1018 1019 r = refcount_read(&local->ref); 1020 u = atomic_read(&local->active_users); 1021 trace_rxrpc_local(local->debug_id, why, r, u); 1022 } 1023 1024 /* 1025 * misc.c 1026 */ 1027 extern unsigned int rxrpc_max_backlog __read_mostly; 1028 extern unsigned long rxrpc_soft_ack_delay; 1029 extern unsigned long rxrpc_idle_ack_delay; 1030 extern unsigned int rxrpc_rx_window_size; 1031 extern unsigned int rxrpc_rx_mtu; 1032 extern unsigned int rxrpc_rx_jumbo_max; 1033 1034 /* 1035 * net_ns.c 1036 */ 1037 extern unsigned int rxrpc_net_id; 1038 extern struct pernet_operations rxrpc_net_ops; 1039 1040 static inline struct rxrpc_net *rxrpc_net(struct net *net) 1041 { 1042 return net_generic(net, rxrpc_net_id); 1043 } 1044 1045 /* 1046 * output.c 1047 */ 1048 int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb); 1049 int rxrpc_send_abort_packet(struct rxrpc_call *); 1050 int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *); 1051 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb); 1052 void rxrpc_send_keepalive(struct rxrpc_peer *); 1053 void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb); 1054 1055 /* 1056 * peer_event.c 1057 */ 1058 void rxrpc_input_error(struct rxrpc_local *, struct sk_buff *); 1059 void rxrpc_peer_keepalive_worker(struct work_struct *); 1060 1061 /* 1062 * peer_object.c 1063 */ 1064 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, 1065 const struct sockaddr_rxrpc *); 1066 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *, 1067 struct sockaddr_rxrpc *, gfp_t); 1068 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t, 1069 enum rxrpc_peer_trace); 1070 void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *, 1071 struct rxrpc_peer *); 1072 void rxrpc_destroy_all_peers(struct rxrpc_net *); 1073 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace); 1074 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace); 1075 void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace); 1076 1077 /* 1078 * proc.c 1079 */ 1080 extern const struct seq_operations rxrpc_call_seq_ops; 1081 extern const struct seq_operations rxrpc_connection_seq_ops; 1082 extern const struct seq_operations rxrpc_peer_seq_ops; 1083 extern const struct seq_operations rxrpc_local_seq_ops; 1084 1085 /* 1086 * recvmsg.c 1087 */ 1088 void rxrpc_notify_socket(struct rxrpc_call *); 1089 bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int); 1090 bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int); 1091 bool __rxrpc_call_completed(struct rxrpc_call *); 1092 bool rxrpc_call_completed(struct rxrpc_call *); 1093 bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int); 1094 bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int); 1095 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); 1096 1097 /* 1098 * Abort a call due to a protocol error. 1099 */ 1100 static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, 1101 struct sk_buff *skb, 1102 const char *eproto_why, 1103 const char *why, 1104 u32 abort_code) 1105 { 1106 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1107 1108 trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why); 1109 return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO); 1110 } 1111 1112 #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \ 1113 __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \ 1114 (abort_why), (abort_code)) 1115 1116 /* 1117 * rtt.c 1118 */ 1119 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int, 1120 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1121 unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); 1122 void rxrpc_peer_init_rtt(struct rxrpc_peer *); 1123 1124 /* 1125 * rxkad.c 1126 */ 1127 #ifdef CONFIG_RXKAD 1128 extern const struct rxrpc_security rxkad; 1129 #endif 1130 1131 /* 1132 * security.c 1133 */ 1134 int __init rxrpc_init_security(void); 1135 const struct rxrpc_security *rxrpc_security_lookup(u8); 1136 void rxrpc_exit_security(void); 1137 int rxrpc_init_client_call_security(struct rxrpc_call *); 1138 int rxrpc_init_client_conn_security(struct rxrpc_connection *); 1139 const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *, 1140 struct sk_buff *); 1141 struct key *rxrpc_look_up_server_security(struct rxrpc_connection *, 1142 struct sk_buff *, u32, u32); 1143 1144 /* 1145 * sendmsg.c 1146 */ 1147 int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t); 1148 1149 /* 1150 * server_key.c 1151 */ 1152 extern struct key_type key_type_rxrpc_s; 1153 1154 int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int); 1155 1156 /* 1157 * skbuff.c 1158 */ 1159 void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); 1160 void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); 1161 void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); 1162 void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); 1163 void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); 1164 void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); 1165 void rxrpc_purge_queue(struct sk_buff_head *); 1166 1167 /* 1168 * stats.c 1169 */ 1170 int rxrpc_stats_show(struct seq_file *seq, void *v); 1171 int rxrpc_stats_clear(struct file *file, char *buf, size_t size); 1172 1173 #define rxrpc_inc_stat(rxnet, s) atomic_inc(&(rxnet)->s) 1174 #define rxrpc_dec_stat(rxnet, s) atomic_dec(&(rxnet)->s) 1175 1176 /* 1177 * sysctl.c 1178 */ 1179 #ifdef CONFIG_SYSCTL 1180 extern int __init rxrpc_sysctl_init(void); 1181 extern void rxrpc_sysctl_exit(void); 1182 #else 1183 static inline int __init rxrpc_sysctl_init(void) { return 0; } 1184 static inline void rxrpc_sysctl_exit(void) {} 1185 #endif 1186 1187 /* 1188 * txbuf.c 1189 */ 1190 extern atomic_t rxrpc_nr_txbuf; 1191 struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type, 1192 gfp_t gfp); 1193 void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); 1194 void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); 1195 void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); 1196 1197 /* 1198 * utils.c 1199 */ 1200 int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *); 1201 1202 static inline bool before(u32 seq1, u32 seq2) 1203 { 1204 return (s32)(seq1 - seq2) < 0; 1205 } 1206 static inline bool before_eq(u32 seq1, u32 seq2) 1207 { 1208 return (s32)(seq1 - seq2) <= 0; 1209 } 1210 static inline bool after(u32 seq1, u32 seq2) 1211 { 1212 return (s32)(seq1 - seq2) > 0; 1213 } 1214 static inline bool after_eq(u32 seq1, u32 seq2) 1215 { 1216 return (s32)(seq1 - seq2) >= 0; 1217 } 1218 1219 /* 1220 * debug tracing 1221 */ 1222 extern unsigned int rxrpc_debug; 1223 1224 #define dbgprintk(FMT,...) \ 1225 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) 1226 1227 #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) 1228 #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) 1229 #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) 1230 1231 1232 #if defined(__KDEBUG) 1233 #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) 1234 #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) 1235 #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) 1236 1237 #elif defined(CONFIG_AF_RXRPC_DEBUG) 1238 #define RXRPC_DEBUG_KENTER 0x01 1239 #define RXRPC_DEBUG_KLEAVE 0x02 1240 #define RXRPC_DEBUG_KDEBUG 0x04 1241 1242 #define _enter(FMT,...) \ 1243 do { \ 1244 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \ 1245 kenter(FMT,##__VA_ARGS__); \ 1246 } while (0) 1247 1248 #define _leave(FMT,...) \ 1249 do { \ 1250 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \ 1251 kleave(FMT,##__VA_ARGS__); \ 1252 } while (0) 1253 1254 #define _debug(FMT,...) \ 1255 do { \ 1256 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \ 1257 kdebug(FMT,##__VA_ARGS__); \ 1258 } while (0) 1259 1260 #else 1261 #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__) 1262 #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) 1263 #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__) 1264 #endif 1265 1266 /* 1267 * debug assertion checking 1268 */ 1269 #if 1 // defined(__KDEBUGALL) 1270 1271 #define ASSERT(X) \ 1272 do { \ 1273 if (unlikely(!(X))) { \ 1274 pr_err("Assertion failed\n"); \ 1275 BUG(); \ 1276 } \ 1277 } while (0) 1278 1279 #define ASSERTCMP(X, OP, Y) \ 1280 do { \ 1281 __typeof__(X) _x = (X); \ 1282 __typeof__(Y) _y = (__typeof__(X))(Y); \ 1283 if (unlikely(!(_x OP _y))) { \ 1284 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \ 1285 (unsigned long)_x, (unsigned long)_x, #OP, \ 1286 (unsigned long)_y, (unsigned long)_y); \ 1287 BUG(); \ 1288 } \ 1289 } while (0) 1290 1291 #define ASSERTIF(C, X) \ 1292 do { \ 1293 if (unlikely((C) && !(X))) { \ 1294 pr_err("Assertion failed\n"); \ 1295 BUG(); \ 1296 } \ 1297 } while (0) 1298 1299 #define ASSERTIFCMP(C, X, OP, Y) \ 1300 do { \ 1301 __typeof__(X) _x = (X); \ 1302 __typeof__(Y) _y = (__typeof__(X))(Y); \ 1303 if (unlikely((C) && !(_x OP _y))) { \ 1304 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \ 1305 (unsigned long)_x, (unsigned long)_x, #OP, \ 1306 (unsigned long)_y, (unsigned long)_y); \ 1307 BUG(); \ 1308 } \ 1309 } while (0) 1310 1311 #else 1312 1313 #define ASSERT(X) \ 1314 do { \ 1315 } while (0) 1316 1317 #define ASSERTCMP(X, OP, Y) \ 1318 do { \ 1319 } while (0) 1320 1321 #define ASSERTIF(C, X) \ 1322 do { \ 1323 } while (0) 1324 1325 #define ASSERTIFCMP(C, X, OP, Y) \ 1326 do { \ 1327 } while (0) 1328 1329 #endif /* __KDEBUGALL */ 1330