1 /* RxRPC virtual connection handler 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/net.h> 17 #include <linux/skbuff.h> 18 #include <net/sock.h> 19 #include <net/af_rxrpc.h> 20 #include "ar-internal.h" 21 22 /* 23 * Time till a connection expires after last use (in seconds). 24 */ 25 unsigned int rxrpc_connection_expiry = 10 * 60; 26 27 static void rxrpc_connection_reaper(struct work_struct *work); 28 29 LIST_HEAD(rxrpc_connections); 30 DEFINE_RWLOCK(rxrpc_connection_lock); 31 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); 32 33 /* 34 * allocate a new connection 35 */ 36 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) 37 { 38 struct rxrpc_connection *conn; 39 40 _enter(""); 41 42 conn = kzalloc(sizeof(struct rxrpc_connection), gfp); 43 if (conn) { 44 spin_lock_init(&conn->channel_lock); 45 init_waitqueue_head(&conn->channel_wq); 46 INIT_WORK(&conn->processor, &rxrpc_process_connection); 47 INIT_LIST_HEAD(&conn->link); 48 skb_queue_head_init(&conn->rx_queue); 49 conn->security = &rxrpc_no_security; 50 spin_lock_init(&conn->state_lock); 51 /* We maintain an extra ref on the connection whilst it is 52 * on the rxrpc_connections list. 53 */ 54 atomic_set(&conn->usage, 2); 55 conn->debug_id = atomic_inc_return(&rxrpc_debug_id); 56 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS); 57 conn->size_align = 4; 58 conn->header_size = sizeof(struct rxrpc_wire_header); 59 } 60 61 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); 62 return conn; 63 } 64 65 /* 66 * Look up a connection in the cache by protocol parameters. 67 * 68 * If successful, a pointer to the connection is returned, but no ref is taken. 69 * NULL is returned if there is no match. 70 * 71 * The caller must be holding the RCU read lock. 72 */ 73 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, 74 struct sk_buff *skb) 75 { 76 struct rxrpc_connection *conn; 77 struct rxrpc_conn_proto k; 78 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 79 struct sockaddr_rxrpc srx; 80 struct rxrpc_peer *peer; 81 82 _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK); 83 84 if (rxrpc_extract_addr_from_skb(&srx, skb) < 0) 85 goto not_found; 86 87 k.epoch = sp->hdr.epoch; 88 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 89 90 /* We may have to handle mixing IPv4 and IPv6 */ 91 if (srx.transport.family != local->srx.transport.family) { 92 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", 93 srx.transport.family, 94 local->srx.transport.family); 95 goto not_found; 96 } 97 98 k.epoch = sp->hdr.epoch; 99 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 100 101 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { 102 /* We need to look up service connections by the full protocol 103 * parameter set. We look up the peer first as an intermediate 104 * step and then the connection from the peer's tree. 105 */ 106 peer = rxrpc_lookup_peer_rcu(local, &srx); 107 if (!peer) 108 goto not_found; 109 conn = rxrpc_find_service_conn_rcu(peer, skb); 110 if (!conn || atomic_read(&conn->usage) == 0) 111 goto not_found; 112 _leave(" = %p", conn); 113 return conn; 114 } else { 115 /* Look up client connections by connection ID alone as their 116 * IDs are unique for this machine. 117 */ 118 conn = idr_find(&rxrpc_client_conn_ids, 119 sp->hdr.cid >> RXRPC_CIDSHIFT); 120 if (!conn || atomic_read(&conn->usage) == 0) { 121 _debug("no conn"); 122 goto not_found; 123 } 124 125 if (conn->proto.epoch != k.epoch || 126 conn->params.local != local) 127 goto not_found; 128 129 peer = conn->params.peer; 130 switch (srx.transport.family) { 131 case AF_INET: 132 if (peer->srx.transport.sin.sin_port != 133 srx.transport.sin.sin_port || 134 peer->srx.transport.sin.sin_addr.s_addr != 135 srx.transport.sin.sin_addr.s_addr) 136 goto not_found; 137 break; 138 default: 139 BUG(); 140 } 141 142 _leave(" = %p", conn); 143 return conn; 144 } 145 146 not_found: 147 _leave(" = NULL"); 148 return NULL; 149 } 150 151 /* 152 * Disconnect a call and clear any channel it occupies when that call 153 * terminates. The caller must hold the channel_lock and must release the 154 * call's ref on the connection. 155 */ 156 void __rxrpc_disconnect_call(struct rxrpc_call *call) 157 { 158 struct rxrpc_connection *conn = call->conn; 159 struct rxrpc_channel *chan = &conn->channels[call->channel]; 160 161 _enter("%d,%d", conn->debug_id, call->channel); 162 163 if (rcu_access_pointer(chan->call) == call) { 164 /* Save the result of the call so that we can repeat it if necessary 165 * through the channel, whilst disposing of the actual call record. 166 */ 167 chan->last_result = call->local_abort; 168 smp_wmb(); 169 chan->last_call = chan->call_id; 170 chan->call_id = chan->call_counter; 171 172 rcu_assign_pointer(chan->call, NULL); 173 atomic_inc(&conn->avail_chans); 174 wake_up(&conn->channel_wq); 175 } 176 177 _leave(""); 178 } 179 180 /* 181 * Disconnect a call and clear any channel it occupies when that call 182 * terminates. 183 */ 184 void rxrpc_disconnect_call(struct rxrpc_call *call) 185 { 186 struct rxrpc_connection *conn = call->conn; 187 188 spin_lock(&conn->channel_lock); 189 __rxrpc_disconnect_call(call); 190 spin_unlock(&conn->channel_lock); 191 192 call->conn = NULL; 193 rxrpc_put_connection(conn); 194 } 195 196 /* 197 * release a virtual connection 198 */ 199 void rxrpc_put_connection(struct rxrpc_connection *conn) 200 { 201 if (!conn) 202 return; 203 204 _enter("%p{u=%d,d=%d}", 205 conn, atomic_read(&conn->usage), conn->debug_id); 206 207 ASSERTCMP(atomic_read(&conn->usage), >, 1); 208 209 conn->put_time = ktime_get_seconds(); 210 if (atomic_dec_return(&conn->usage) == 1) { 211 _debug("zombie"); 212 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); 213 } 214 215 _leave(""); 216 } 217 218 /* 219 * destroy a virtual connection 220 */ 221 static void rxrpc_destroy_connection(struct rcu_head *rcu) 222 { 223 struct rxrpc_connection *conn = 224 container_of(rcu, struct rxrpc_connection, rcu); 225 226 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); 227 228 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 229 230 _net("DESTROY CONN %d", conn->debug_id); 231 232 rxrpc_purge_queue(&conn->rx_queue); 233 234 conn->security->clear(conn); 235 key_put(conn->params.key); 236 key_put(conn->server_key); 237 rxrpc_put_peer(conn->params.peer); 238 rxrpc_put_local(conn->params.local); 239 240 kfree(conn); 241 _leave(""); 242 } 243 244 /* 245 * reap dead connections 246 */ 247 static void rxrpc_connection_reaper(struct work_struct *work) 248 { 249 struct rxrpc_connection *conn, *_p; 250 unsigned long reap_older_than, earliest, put_time, now; 251 252 LIST_HEAD(graveyard); 253 254 _enter(""); 255 256 now = ktime_get_seconds(); 257 reap_older_than = now - rxrpc_connection_expiry; 258 earliest = ULONG_MAX; 259 260 write_lock(&rxrpc_connection_lock); 261 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { 262 ASSERTCMP(atomic_read(&conn->usage), >, 0); 263 if (likely(atomic_read(&conn->usage) > 1)) 264 continue; 265 266 put_time = READ_ONCE(conn->put_time); 267 if (time_after(put_time, reap_older_than)) { 268 if (time_before(put_time, earliest)) 269 earliest = put_time; 270 continue; 271 } 272 273 /* The usage count sits at 1 whilst the object is unused on the 274 * list; we reduce that to 0 to make the object unavailable. 275 */ 276 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) 277 continue; 278 279 if (rxrpc_conn_is_client(conn)) 280 rxrpc_unpublish_client_conn(conn); 281 else 282 rxrpc_unpublish_service_conn(conn); 283 284 list_move_tail(&conn->link, &graveyard); 285 } 286 write_unlock(&rxrpc_connection_lock); 287 288 if (earliest != ULONG_MAX) { 289 _debug("reschedule reaper %ld", (long) earliest - now); 290 ASSERTCMP(earliest, >, now); 291 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 292 (earliest - now) * HZ); 293 } 294 295 while (!list_empty(&graveyard)) { 296 conn = list_entry(graveyard.next, struct rxrpc_connection, 297 link); 298 list_del_init(&conn->link); 299 300 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 301 skb_queue_purge(&conn->rx_queue); 302 call_rcu(&conn->rcu, rxrpc_destroy_connection); 303 } 304 305 _leave(""); 306 } 307 308 /* 309 * preemptively destroy all the connection records rather than waiting for them 310 * to time out 311 */ 312 void __exit rxrpc_destroy_all_connections(void) 313 { 314 struct rxrpc_connection *conn, *_p; 315 bool leak = false; 316 317 _enter(""); 318 319 rxrpc_connection_expiry = 0; 320 cancel_delayed_work(&rxrpc_connection_reap); 321 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); 322 flush_workqueue(rxrpc_workqueue); 323 324 write_lock(&rxrpc_connection_lock); 325 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { 326 pr_err("AF_RXRPC: Leaked conn %p {%d}\n", 327 conn, atomic_read(&conn->usage)); 328 leak = true; 329 } 330 write_unlock(&rxrpc_connection_lock); 331 BUG_ON(leak); 332 333 /* Make sure the local and peer records pinned by any dying connections 334 * are released. 335 */ 336 rcu_barrier(); 337 rxrpc_destroy_client_conn_ids(); 338 339 _leave(""); 340 } 341