1 /* RxRPC virtual connection handler, common bits. 2 * 3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/net.h> 17 #include <linux/skbuff.h> 18 #include "ar-internal.h" 19 20 /* 21 * Time till a connection expires after last use (in seconds). 22 */ 23 unsigned int rxrpc_connection_expiry = 10 * 60; 24 25 static void rxrpc_destroy_connection(struct rcu_head *); 26 27 /* 28 * allocate a new connection 29 */ 30 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) 31 { 32 struct rxrpc_connection *conn; 33 34 _enter(""); 35 36 conn = kzalloc(sizeof(struct rxrpc_connection), gfp); 37 if (conn) { 38 INIT_LIST_HEAD(&conn->cache_link); 39 spin_lock_init(&conn->channel_lock); 40 INIT_LIST_HEAD(&conn->waiting_calls); 41 INIT_WORK(&conn->processor, &rxrpc_process_connection); 42 INIT_LIST_HEAD(&conn->proc_link); 43 INIT_LIST_HEAD(&conn->link); 44 skb_queue_head_init(&conn->rx_queue); 45 conn->security = &rxrpc_no_security; 46 spin_lock_init(&conn->state_lock); 47 conn->debug_id = atomic_inc_return(&rxrpc_debug_id); 48 conn->size_align = 4; 49 conn->idle_timestamp = jiffies; 50 } 51 52 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); 53 return conn; 54 } 55 56 /* 57 * Look up a connection in the cache by protocol parameters. 58 * 59 * If successful, a pointer to the connection is returned, but no ref is taken. 60 * NULL is returned if there is no match. 61 * 62 * The caller must be holding the RCU read lock. 63 */ 64 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, 65 struct sk_buff *skb) 66 { 67 struct rxrpc_connection *conn; 68 struct rxrpc_conn_proto k; 69 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 70 struct sockaddr_rxrpc srx; 71 struct rxrpc_peer *peer; 72 73 _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK); 74 75 if (rxrpc_extract_addr_from_skb(&srx, skb) < 0) 76 goto not_found; 77 78 k.epoch = sp->hdr.epoch; 79 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 80 81 /* We may have to handle mixing IPv4 and IPv6 */ 82 if (srx.transport.family != local->srx.transport.family) { 83 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", 84 srx.transport.family, 85 local->srx.transport.family); 86 goto not_found; 87 } 88 89 k.epoch = sp->hdr.epoch; 90 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 91 92 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { 93 /* We need to look up service connections by the full protocol 94 * parameter set. We look up the peer first as an intermediate 95 * step and then the connection from the peer's tree. 96 */ 97 peer = rxrpc_lookup_peer_rcu(local, &srx); 98 if (!peer) 99 goto not_found; 100 conn = rxrpc_find_service_conn_rcu(peer, skb); 101 if (!conn || atomic_read(&conn->usage) == 0) 102 goto not_found; 103 _leave(" = %p", conn); 104 return conn; 105 } else { 106 /* Look up client connections by connection ID alone as their 107 * IDs are unique for this machine. 108 */ 109 conn = idr_find(&rxrpc_client_conn_ids, 110 sp->hdr.cid >> RXRPC_CIDSHIFT); 111 if (!conn || atomic_read(&conn->usage) == 0) { 112 _debug("no conn"); 113 goto not_found; 114 } 115 116 if (conn->proto.epoch != k.epoch || 117 conn->params.local != local) 118 goto not_found; 119 120 peer = conn->params.peer; 121 switch (srx.transport.family) { 122 case AF_INET: 123 if (peer->srx.transport.sin.sin_port != 124 srx.transport.sin.sin_port || 125 peer->srx.transport.sin.sin_addr.s_addr != 126 srx.transport.sin.sin_addr.s_addr) 127 goto not_found; 128 break; 129 #ifdef CONFIG_AF_RXRPC_IPV6 130 case AF_INET6: 131 if (peer->srx.transport.sin6.sin6_port != 132 srx.transport.sin6.sin6_port || 133 memcmp(&peer->srx.transport.sin6.sin6_addr, 134 &srx.transport.sin6.sin6_addr, 135 sizeof(struct in6_addr)) != 0) 136 goto not_found; 137 break; 138 #endif 139 default: 140 BUG(); 141 } 142 143 _leave(" = %p", conn); 144 return conn; 145 } 146 147 not_found: 148 _leave(" = NULL"); 149 return NULL; 150 } 151 152 /* 153 * Disconnect a call and clear any channel it occupies when that call 154 * terminates. The caller must hold the channel_lock and must release the 155 * call's ref on the connection. 156 */ 157 void __rxrpc_disconnect_call(struct rxrpc_connection *conn, 158 struct rxrpc_call *call) 159 { 160 struct rxrpc_channel *chan = 161 &conn->channels[call->cid & RXRPC_CHANNELMASK]; 162 163 _enter("%d,%x", conn->debug_id, call->cid); 164 165 if (rcu_access_pointer(chan->call) == call) { 166 /* Save the result of the call so that we can repeat it if necessary 167 * through the channel, whilst disposing of the actual call record. 168 */ 169 trace_rxrpc_disconnect_call(call); 170 if (call->abort_code) { 171 chan->last_abort = call->abort_code; 172 chan->last_type = RXRPC_PACKET_TYPE_ABORT; 173 } else { 174 chan->last_seq = call->rx_hard_ack; 175 chan->last_type = RXRPC_PACKET_TYPE_ACK; 176 } 177 /* Sync with rxrpc_conn_retransmit(). */ 178 smp_wmb(); 179 chan->last_call = chan->call_id; 180 chan->call_id = chan->call_counter; 181 182 rcu_assign_pointer(chan->call, NULL); 183 } 184 185 _leave(""); 186 } 187 188 /* 189 * Disconnect a call and clear any channel it occupies when that call 190 * terminates. 191 */ 192 void rxrpc_disconnect_call(struct rxrpc_call *call) 193 { 194 struct rxrpc_connection *conn = call->conn; 195 196 call->peer->cong_cwnd = call->cong_cwnd; 197 198 spin_lock_bh(&conn->params.peer->lock); 199 hlist_del_init(&call->error_link); 200 spin_unlock_bh(&conn->params.peer->lock); 201 202 if (rxrpc_is_client_call(call)) 203 return rxrpc_disconnect_client_call(call); 204 205 spin_lock(&conn->channel_lock); 206 __rxrpc_disconnect_call(conn, call); 207 spin_unlock(&conn->channel_lock); 208 209 call->conn = NULL; 210 conn->idle_timestamp = jiffies; 211 rxrpc_put_connection(conn); 212 } 213 214 /* 215 * Kill off a connection. 216 */ 217 void rxrpc_kill_connection(struct rxrpc_connection *conn) 218 { 219 struct rxrpc_net *rxnet = conn->params.local->rxnet; 220 221 ASSERT(!rcu_access_pointer(conn->channels[0].call) && 222 !rcu_access_pointer(conn->channels[1].call) && 223 !rcu_access_pointer(conn->channels[2].call) && 224 !rcu_access_pointer(conn->channels[3].call)); 225 ASSERT(list_empty(&conn->cache_link)); 226 227 write_lock(&rxnet->conn_lock); 228 list_del_init(&conn->proc_link); 229 write_unlock(&rxnet->conn_lock); 230 231 /* Drain the Rx queue. Note that even though we've unpublished, an 232 * incoming packet could still be being added to our Rx queue, so we 233 * will need to drain it again in the RCU cleanup handler. 234 */ 235 rxrpc_purge_queue(&conn->rx_queue); 236 237 /* Leave final destruction to RCU. The connection processor work item 238 * must carry a ref on the connection to prevent us getting here whilst 239 * it is queued or running. 240 */ 241 call_rcu(&conn->rcu, rxrpc_destroy_connection); 242 } 243 244 /* 245 * Queue a connection's work processor, getting a ref to pass to the work 246 * queue. 247 */ 248 bool rxrpc_queue_conn(struct rxrpc_connection *conn) 249 { 250 const void *here = __builtin_return_address(0); 251 int n = __atomic_add_unless(&conn->usage, 1, 0); 252 if (n == 0) 253 return false; 254 if (rxrpc_queue_work(&conn->processor)) 255 trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here); 256 else 257 rxrpc_put_connection(conn); 258 return true; 259 } 260 261 /* 262 * Note the re-emergence of a connection. 263 */ 264 void rxrpc_see_connection(struct rxrpc_connection *conn) 265 { 266 const void *here = __builtin_return_address(0); 267 if (conn) { 268 int n = atomic_read(&conn->usage); 269 270 trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here); 271 } 272 } 273 274 /* 275 * Get a ref on a connection. 276 */ 277 void rxrpc_get_connection(struct rxrpc_connection *conn) 278 { 279 const void *here = __builtin_return_address(0); 280 int n = atomic_inc_return(&conn->usage); 281 282 trace_rxrpc_conn(conn, rxrpc_conn_got, n, here); 283 } 284 285 /* 286 * Try to get a ref on a connection. 287 */ 288 struct rxrpc_connection * 289 rxrpc_get_connection_maybe(struct rxrpc_connection *conn) 290 { 291 const void *here = __builtin_return_address(0); 292 293 if (conn) { 294 int n = __atomic_add_unless(&conn->usage, 1, 0); 295 if (n > 0) 296 trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here); 297 else 298 conn = NULL; 299 } 300 return conn; 301 } 302 303 /* 304 * Release a service connection 305 */ 306 void rxrpc_put_service_conn(struct rxrpc_connection *conn) 307 { 308 struct rxrpc_net *rxnet; 309 const void *here = __builtin_return_address(0); 310 int n; 311 312 n = atomic_dec_return(&conn->usage); 313 trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); 314 ASSERTCMP(n, >=, 0); 315 if (n == 0) { 316 rxnet = conn->params.local->rxnet; 317 rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); 318 } 319 } 320 321 /* 322 * destroy a virtual connection 323 */ 324 static void rxrpc_destroy_connection(struct rcu_head *rcu) 325 { 326 struct rxrpc_connection *conn = 327 container_of(rcu, struct rxrpc_connection, rcu); 328 329 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); 330 331 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 332 333 _net("DESTROY CONN %d", conn->debug_id); 334 335 rxrpc_purge_queue(&conn->rx_queue); 336 337 conn->security->clear(conn); 338 key_put(conn->params.key); 339 key_put(conn->server_key); 340 rxrpc_put_peer(conn->params.peer); 341 rxrpc_put_local(conn->params.local); 342 343 kfree(conn); 344 _leave(""); 345 } 346 347 /* 348 * reap dead service connections 349 */ 350 void rxrpc_service_connection_reaper(struct work_struct *work) 351 { 352 struct rxrpc_connection *conn, *_p; 353 struct rxrpc_net *rxnet = 354 container_of(to_delayed_work(work), 355 struct rxrpc_net, service_conn_reaper); 356 unsigned long reap_older_than, earliest, idle_timestamp, now; 357 358 LIST_HEAD(graveyard); 359 360 _enter(""); 361 362 now = jiffies; 363 reap_older_than = now - rxrpc_connection_expiry * HZ; 364 earliest = ULONG_MAX; 365 366 write_lock(&rxnet->conn_lock); 367 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { 368 ASSERTCMP(atomic_read(&conn->usage), >, 0); 369 if (likely(atomic_read(&conn->usage) > 1)) 370 continue; 371 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 372 continue; 373 374 idle_timestamp = READ_ONCE(conn->idle_timestamp); 375 _debug("reap CONN %d { u=%d,t=%ld }", 376 conn->debug_id, atomic_read(&conn->usage), 377 (long)reap_older_than - (long)idle_timestamp); 378 379 if (time_after(idle_timestamp, reap_older_than)) { 380 if (time_before(idle_timestamp, earliest)) 381 earliest = idle_timestamp; 382 continue; 383 } 384 385 /* The usage count sits at 1 whilst the object is unused on the 386 * list; we reduce that to 0 to make the object unavailable. 387 */ 388 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) 389 continue; 390 391 if (rxrpc_conn_is_client(conn)) 392 BUG(); 393 else 394 rxrpc_unpublish_service_conn(conn); 395 396 list_move_tail(&conn->link, &graveyard); 397 } 398 write_unlock(&rxnet->conn_lock); 399 400 if (earliest != ULONG_MAX) { 401 _debug("reschedule reaper %ld", (long) earliest - now); 402 ASSERT(time_after(earliest, now)); 403 rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 404 earliest - now); 405 } 406 407 while (!list_empty(&graveyard)) { 408 conn = list_entry(graveyard.next, struct rxrpc_connection, 409 link); 410 list_del_init(&conn->link); 411 412 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 413 rxrpc_kill_connection(conn); 414 } 415 416 _leave(""); 417 } 418 419 /* 420 * preemptively destroy all the service connection records rather than 421 * waiting for them to time out 422 */ 423 void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) 424 { 425 struct rxrpc_connection *conn, *_p; 426 bool leak = false; 427 428 _enter(""); 429 430 rxrpc_destroy_all_client_connections(rxnet); 431 432 rxrpc_connection_expiry = 0; 433 cancel_delayed_work(&rxnet->client_conn_reaper); 434 rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0); 435 flush_workqueue(rxrpc_workqueue); 436 437 write_lock(&rxnet->conn_lock); 438 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { 439 pr_err("AF_RXRPC: Leaked conn %p {%d}\n", 440 conn, atomic_read(&conn->usage)); 441 leak = true; 442 } 443 write_unlock(&rxnet->conn_lock); 444 BUG_ON(leak); 445 446 ASSERT(list_empty(&rxnet->conn_proc_list)); 447 448 _leave(""); 449 } 450