1 /* RxRPC virtual connection handler, common bits. 2 * 3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/net.h> 17 #include <linux/skbuff.h> 18 #include "ar-internal.h" 19 20 /* 21 * Time till a connection expires after last use (in seconds). 22 */ 23 unsigned int rxrpc_connection_expiry = 10 * 60; 24 25 static void rxrpc_connection_reaper(struct work_struct *work); 26 27 LIST_HEAD(rxrpc_connections); 28 LIST_HEAD(rxrpc_connection_proc_list); 29 DEFINE_RWLOCK(rxrpc_connection_lock); 30 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); 31 32 static void rxrpc_destroy_connection(struct rcu_head *); 33 34 /* 35 * allocate a new connection 36 */ 37 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) 38 { 39 struct rxrpc_connection *conn; 40 41 _enter(""); 42 43 conn = kzalloc(sizeof(struct rxrpc_connection), gfp); 44 if (conn) { 45 INIT_LIST_HEAD(&conn->cache_link); 46 spin_lock_init(&conn->channel_lock); 47 INIT_LIST_HEAD(&conn->waiting_calls); 48 INIT_WORK(&conn->processor, &rxrpc_process_connection); 49 INIT_LIST_HEAD(&conn->proc_link); 50 INIT_LIST_HEAD(&conn->link); 51 skb_queue_head_init(&conn->rx_queue); 52 conn->security = &rxrpc_no_security; 53 spin_lock_init(&conn->state_lock); 54 conn->debug_id = atomic_inc_return(&rxrpc_debug_id); 55 conn->size_align = 4; 56 conn->idle_timestamp = jiffies; 57 } 58 59 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); 60 return conn; 61 } 62 63 /* 64 * Look up a connection in the cache by protocol parameters. 65 * 66 * If successful, a pointer to the connection is returned, but no ref is taken. 67 * NULL is returned if there is no match. 68 * 69 * The caller must be holding the RCU read lock. 70 */ 71 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, 72 struct sk_buff *skb) 73 { 74 struct rxrpc_connection *conn; 75 struct rxrpc_conn_proto k; 76 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 77 struct sockaddr_rxrpc srx; 78 struct rxrpc_peer *peer; 79 80 _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK); 81 82 if (rxrpc_extract_addr_from_skb(&srx, skb) < 0) 83 goto not_found; 84 85 k.epoch = sp->hdr.epoch; 86 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 87 88 /* We may have to handle mixing IPv4 and IPv6 */ 89 if (srx.transport.family != local->srx.transport.family) { 90 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", 91 srx.transport.family, 92 local->srx.transport.family); 93 goto not_found; 94 } 95 96 k.epoch = sp->hdr.epoch; 97 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 98 99 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { 100 /* We need to look up service connections by the full protocol 101 * parameter set. We look up the peer first as an intermediate 102 * step and then the connection from the peer's tree. 103 */ 104 peer = rxrpc_lookup_peer_rcu(local, &srx); 105 if (!peer) 106 goto not_found; 107 conn = rxrpc_find_service_conn_rcu(peer, skb); 108 if (!conn || atomic_read(&conn->usage) == 0) 109 goto not_found; 110 _leave(" = %p", conn); 111 return conn; 112 } else { 113 /* Look up client connections by connection ID alone as their 114 * IDs are unique for this machine. 115 */ 116 conn = idr_find(&rxrpc_client_conn_ids, 117 sp->hdr.cid >> RXRPC_CIDSHIFT); 118 if (!conn || atomic_read(&conn->usage) == 0) { 119 _debug("no conn"); 120 goto not_found; 121 } 122 123 if (conn->proto.epoch != k.epoch || 124 conn->params.local != local) 125 goto not_found; 126 127 peer = conn->params.peer; 128 switch (srx.transport.family) { 129 case AF_INET: 130 if (peer->srx.transport.sin.sin_port != 131 srx.transport.sin.sin_port || 132 peer->srx.transport.sin.sin_addr.s_addr != 133 srx.transport.sin.sin_addr.s_addr) 134 goto not_found; 135 break; 136 #ifdef CONFIG_AF_RXRPC_IPV6 137 case AF_INET6: 138 if (peer->srx.transport.sin6.sin6_port != 139 srx.transport.sin6.sin6_port || 140 memcmp(&peer->srx.transport.sin6.sin6_addr, 141 &srx.transport.sin6.sin6_addr, 142 sizeof(struct in6_addr)) != 0) 143 goto not_found; 144 break; 145 #endif 146 default: 147 BUG(); 148 } 149 150 _leave(" = %p", conn); 151 return conn; 152 } 153 154 not_found: 155 _leave(" = NULL"); 156 return NULL; 157 } 158 159 /* 160 * Disconnect a call and clear any channel it occupies when that call 161 * terminates. The caller must hold the channel_lock and must release the 162 * call's ref on the connection. 163 */ 164 void __rxrpc_disconnect_call(struct rxrpc_connection *conn, 165 struct rxrpc_call *call) 166 { 167 struct rxrpc_channel *chan = 168 &conn->channels[call->cid & RXRPC_CHANNELMASK]; 169 170 _enter("%d,%x", conn->debug_id, call->cid); 171 172 if (rcu_access_pointer(chan->call) == call) { 173 /* Save the result of the call so that we can repeat it if necessary 174 * through the channel, whilst disposing of the actual call record. 175 */ 176 chan->last_service_id = call->service_id; 177 if (call->abort_code) { 178 chan->last_abort = call->abort_code; 179 chan->last_type = RXRPC_PACKET_TYPE_ABORT; 180 } else { 181 chan->last_seq = call->rx_hard_ack; 182 chan->last_type = RXRPC_PACKET_TYPE_ACK; 183 } 184 /* Sync with rxrpc_conn_retransmit(). */ 185 smp_wmb(); 186 chan->last_call = chan->call_id; 187 chan->call_id = chan->call_counter; 188 189 rcu_assign_pointer(chan->call, NULL); 190 } 191 192 _leave(""); 193 } 194 195 /* 196 * Disconnect a call and clear any channel it occupies when that call 197 * terminates. 198 */ 199 void rxrpc_disconnect_call(struct rxrpc_call *call) 200 { 201 struct rxrpc_connection *conn = call->conn; 202 203 spin_lock_bh(&conn->params.peer->lock); 204 hlist_del_init(&call->error_link); 205 spin_unlock_bh(&conn->params.peer->lock); 206 207 if (rxrpc_is_client_call(call)) 208 return rxrpc_disconnect_client_call(call); 209 210 spin_lock(&conn->channel_lock); 211 __rxrpc_disconnect_call(conn, call); 212 spin_unlock(&conn->channel_lock); 213 214 call->conn = NULL; 215 conn->idle_timestamp = jiffies; 216 rxrpc_put_connection(conn); 217 } 218 219 /* 220 * Kill off a connection. 221 */ 222 void rxrpc_kill_connection(struct rxrpc_connection *conn) 223 { 224 ASSERT(!rcu_access_pointer(conn->channels[0].call) && 225 !rcu_access_pointer(conn->channels[1].call) && 226 !rcu_access_pointer(conn->channels[2].call) && 227 !rcu_access_pointer(conn->channels[3].call)); 228 ASSERT(list_empty(&conn->cache_link)); 229 230 write_lock(&rxrpc_connection_lock); 231 list_del_init(&conn->proc_link); 232 write_unlock(&rxrpc_connection_lock); 233 234 /* Drain the Rx queue. Note that even though we've unpublished, an 235 * incoming packet could still be being added to our Rx queue, so we 236 * will need to drain it again in the RCU cleanup handler. 237 */ 238 rxrpc_purge_queue(&conn->rx_queue); 239 240 /* Leave final destruction to RCU. The connection processor work item 241 * must carry a ref on the connection to prevent us getting here whilst 242 * it is queued or running. 243 */ 244 call_rcu(&conn->rcu, rxrpc_destroy_connection); 245 } 246 247 /* 248 * Queue a connection's work processor, getting a ref to pass to the work 249 * queue. 250 */ 251 bool rxrpc_queue_conn(struct rxrpc_connection *conn) 252 { 253 const void *here = __builtin_return_address(0); 254 int n = __atomic_add_unless(&conn->usage, 1, 0); 255 if (n == 0) 256 return false; 257 if (rxrpc_queue_work(&conn->processor)) 258 trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here); 259 else 260 rxrpc_put_connection(conn); 261 return true; 262 } 263 264 /* 265 * Note the re-emergence of a connection. 266 */ 267 void rxrpc_see_connection(struct rxrpc_connection *conn) 268 { 269 const void *here = __builtin_return_address(0); 270 if (conn) { 271 int n = atomic_read(&conn->usage); 272 273 trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here); 274 } 275 } 276 277 /* 278 * Get a ref on a connection. 279 */ 280 void rxrpc_get_connection(struct rxrpc_connection *conn) 281 { 282 const void *here = __builtin_return_address(0); 283 int n = atomic_inc_return(&conn->usage); 284 285 trace_rxrpc_conn(conn, rxrpc_conn_got, n, here); 286 } 287 288 /* 289 * Try to get a ref on a connection. 290 */ 291 struct rxrpc_connection * 292 rxrpc_get_connection_maybe(struct rxrpc_connection *conn) 293 { 294 const void *here = __builtin_return_address(0); 295 296 if (conn) { 297 int n = __atomic_add_unless(&conn->usage, 1, 0); 298 if (n > 0) 299 trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here); 300 else 301 conn = NULL; 302 } 303 return conn; 304 } 305 306 /* 307 * Release a service connection 308 */ 309 void rxrpc_put_service_conn(struct rxrpc_connection *conn) 310 { 311 const void *here = __builtin_return_address(0); 312 int n; 313 314 n = atomic_dec_return(&conn->usage); 315 trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); 316 ASSERTCMP(n, >=, 0); 317 if (n == 0) 318 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); 319 } 320 321 /* 322 * destroy a virtual connection 323 */ 324 static void rxrpc_destroy_connection(struct rcu_head *rcu) 325 { 326 struct rxrpc_connection *conn = 327 container_of(rcu, struct rxrpc_connection, rcu); 328 329 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); 330 331 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 332 333 _net("DESTROY CONN %d", conn->debug_id); 334 335 rxrpc_purge_queue(&conn->rx_queue); 336 337 conn->security->clear(conn); 338 key_put(conn->params.key); 339 key_put(conn->server_key); 340 rxrpc_put_peer(conn->params.peer); 341 rxrpc_put_local(conn->params.local); 342 343 kfree(conn); 344 _leave(""); 345 } 346 347 /* 348 * reap dead service connections 349 */ 350 static void rxrpc_connection_reaper(struct work_struct *work) 351 { 352 struct rxrpc_connection *conn, *_p; 353 unsigned long reap_older_than, earliest, idle_timestamp, now; 354 355 LIST_HEAD(graveyard); 356 357 _enter(""); 358 359 now = jiffies; 360 reap_older_than = now - rxrpc_connection_expiry * HZ; 361 earliest = ULONG_MAX; 362 363 write_lock(&rxrpc_connection_lock); 364 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { 365 ASSERTCMP(atomic_read(&conn->usage), >, 0); 366 if (likely(atomic_read(&conn->usage) > 1)) 367 continue; 368 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 369 continue; 370 371 idle_timestamp = READ_ONCE(conn->idle_timestamp); 372 _debug("reap CONN %d { u=%d,t=%ld }", 373 conn->debug_id, atomic_read(&conn->usage), 374 (long)reap_older_than - (long)idle_timestamp); 375 376 if (time_after(idle_timestamp, reap_older_than)) { 377 if (time_before(idle_timestamp, earliest)) 378 earliest = idle_timestamp; 379 continue; 380 } 381 382 /* The usage count sits at 1 whilst the object is unused on the 383 * list; we reduce that to 0 to make the object unavailable. 384 */ 385 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) 386 continue; 387 388 if (rxrpc_conn_is_client(conn)) 389 BUG(); 390 else 391 rxrpc_unpublish_service_conn(conn); 392 393 list_move_tail(&conn->link, &graveyard); 394 } 395 write_unlock(&rxrpc_connection_lock); 396 397 if (earliest != ULONG_MAX) { 398 _debug("reschedule reaper %ld", (long) earliest - now); 399 ASSERT(time_after(earliest, now)); 400 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 401 earliest - now); 402 } 403 404 while (!list_empty(&graveyard)) { 405 conn = list_entry(graveyard.next, struct rxrpc_connection, 406 link); 407 list_del_init(&conn->link); 408 409 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 410 rxrpc_kill_connection(conn); 411 } 412 413 _leave(""); 414 } 415 416 /* 417 * preemptively destroy all the service connection records rather than 418 * waiting for them to time out 419 */ 420 void __exit rxrpc_destroy_all_connections(void) 421 { 422 struct rxrpc_connection *conn, *_p; 423 bool leak = false; 424 425 _enter(""); 426 427 rxrpc_destroy_all_client_connections(); 428 429 rxrpc_connection_expiry = 0; 430 cancel_delayed_work(&rxrpc_connection_reap); 431 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); 432 flush_workqueue(rxrpc_workqueue); 433 434 write_lock(&rxrpc_connection_lock); 435 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { 436 pr_err("AF_RXRPC: Leaked conn %p {%d}\n", 437 conn, atomic_read(&conn->usage)); 438 leak = true; 439 } 440 write_unlock(&rxrpc_connection_lock); 441 BUG_ON(leak); 442 443 ASSERT(list_empty(&rxrpc_connection_proc_list)); 444 445 /* Make sure the local and peer records pinned by any dying connections 446 * are released. 447 */ 448 rcu_barrier(); 449 rxrpc_destroy_client_conn_ids(); 450 451 _leave(""); 452 } 453