conn_client.c (0d6bf319bc5aba4535bb46e1b607973688a2248a) | conn_client.c (9d35d880e0e4a3ab32d8c12f9e4d76198aadd42d) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* Client connection-specific management code. 3 * 4 * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 * 7 * Client connections need to be cached for a little while after they've made a 8 * call so as to handle retransmitted DATA packets in case the server didn't --- 26 unchanged lines hidden (view full) --- 35__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 36 37static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle) 38{ 39 atomic_inc(&bundle->active); 40} 41 42/* | 1// SPDX-License-Identifier: GPL-2.0-or-later 2/* Client connection-specific management code. 3 * 4 * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 * 7 * Client connections need to be cached for a little while after they've made a 8 * call so as to handle retransmitted DATA packets in case the server didn't --- 26 unchanged lines hidden (view full) --- 35__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 36 37static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle) 38{ 39 atomic_inc(&bundle->active); 40} 41 42/* |
43 * Get a connection ID and epoch for a client connection from the global pool. 44 * The connection struct pointer is then recorded in the idr radix tree. The 45 * epoch doesn't change until the client is rebooted (or, at least, unless the 46 * module is unloaded). 47 */ 48static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, 49 gfp_t gfp) 50{ 51 struct rxrpc_local *local = conn->local; 52 int id; 53 54 _enter(""); 55 56 idr_preload(gfp); 57 spin_lock(&local->conn_lock); 58 59 id = idr_alloc_cyclic(&local->conn_ids, conn, 60 1, 0x40000000, GFP_NOWAIT); 61 if (id < 0) 62 goto error; 63 64 spin_unlock(&local->conn_lock); 65 idr_preload_end(); 66 67 conn->proto.epoch = local->rxnet->epoch; 68 conn->proto.cid = id << RXRPC_CIDSHIFT; 69 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); 70 _leave(" [CID %x]", conn->proto.cid); 71 return 0; 72 73error: 74 spin_unlock(&local->conn_lock); 75 idr_preload_end(); 76 _leave(" = %d", id); 77 return id; 78} 79 80/* | |
81 * Release a connection ID for a client connection. 82 */ 83static void rxrpc_put_client_connection_id(struct rxrpc_local *local, 84 struct rxrpc_connection *conn) 85{ | 43 * Release a connection ID for a client connection. 44 */ 45static void rxrpc_put_client_connection_id(struct rxrpc_local *local, 46 struct rxrpc_connection *conn) 47{ |
86 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { 87 spin_lock(&local->conn_lock); 88 idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT); 89 spin_unlock(&local->conn_lock); 90 } | 48 idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT); |
91} 92 93/* 94 * Destroy the client connection ID tree. 95 */ | 49} 50 51/* 52 * Destroy the client connection ID tree. 53 */ |
96void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local) | 54static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local) |
97{ 98 struct rxrpc_connection *conn; 99 int id; 100 101 if (!idr_is_empty(&local->conn_ids)) { 102 idr_for_each_entry(&local->conn_ids, conn, id) { 103 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", 104 conn, refcount_read(&conn->ref)); --- 19 unchanged lines hidden (view full) --- 124 bundle->key = key_get(call->key); 125 bundle->security = call->security; 126 bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); 127 bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); 128 bundle->service_id = call->dest_srx.srx_service; 129 bundle->security_level = call->security_level; 130 refcount_set(&bundle->ref, 1); 131 atomic_set(&bundle->active, 1); | 55{ 56 struct rxrpc_connection *conn; 57 int id; 58 59 if (!idr_is_empty(&local->conn_ids)) { 60 idr_for_each_entry(&local->conn_ids, conn, id) { 61 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", 62 conn, refcount_read(&conn->ref)); --- 19 unchanged lines hidden (view full) --- 82 bundle->key = key_get(call->key); 83 bundle->security = call->security; 84 bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); 85 bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); 86 bundle->service_id = call->dest_srx.srx_service; 87 bundle->security_level = call->security_level; 88 refcount_set(&bundle->ref, 1); 89 atomic_set(&bundle->active, 1); |
132 spin_lock_init(&bundle->channel_lock); | |
133 INIT_LIST_HEAD(&bundle->waiting_calls); 134 trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new); 135 } 136 return bundle; 137} 138 139struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle, 140 enum rxrpc_bundle_trace why) --- 24 unchanged lines hidden (view full) --- 165 dead = __refcount_dec_and_test(&bundle->ref, &r); 166 trace_rxrpc_bundle(id, r - 1, why); 167 if (dead) 168 rxrpc_free_bundle(bundle); 169 } 170} 171 172/* | 90 INIT_LIST_HEAD(&bundle->waiting_calls); 91 trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new); 92 } 93 return bundle; 94} 95 96struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle, 97 enum rxrpc_bundle_trace why) --- 24 unchanged lines hidden (view full) --- 122 dead = __refcount_dec_and_test(&bundle->ref, &r); 123 trace_rxrpc_bundle(id, r - 1, why); 124 if (dead) 125 rxrpc_free_bundle(bundle); 126 } 127} 128 129/* |
130 * Get rid of outstanding client connection preallocations when a local 131 * endpoint is destroyed. 132 */ 133void rxrpc_purge_client_connections(struct rxrpc_local *local) 134{ 135 rxrpc_destroy_client_conn_ids(local); 136} 137 138/* |
|
173 * Allocate a client connection. 174 */ 175static struct rxrpc_connection * | 139 * Allocate a client connection. 140 */ 141static struct rxrpc_connection * |
176rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) | 142rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle) |
177{ 178 struct rxrpc_connection *conn; | 143{ 144 struct rxrpc_connection *conn; |
179 struct rxrpc_net *rxnet = bundle->local->rxnet; 180 int ret; | 145 struct rxrpc_local *local = bundle->local; 146 struct rxrpc_net *rxnet = local->rxnet; 147 int id; |
181 182 _enter(""); 183 | 148 149 _enter(""); 150 |
184 conn = rxrpc_alloc_connection(rxnet, gfp); 185 if (!conn) { 186 _leave(" = -ENOMEM"); | 151 conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN); 152 if (!conn) |
187 return ERR_PTR(-ENOMEM); | 153 return ERR_PTR(-ENOMEM); |
154 155 id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000, 156 GFP_ATOMIC | __GFP_NOWARN); 157 if (id < 0) { 158 kfree(conn); 159 return ERR_PTR(id); |
|
188 } 189 190 refcount_set(&conn->ref, 1); | 160 } 161 162 refcount_set(&conn->ref, 1); |
191 conn->bundle = bundle; 192 conn->local = bundle->local; 193 conn->peer = bundle->peer; 194 conn->key = bundle->key; | 163 conn->proto.cid = id << RXRPC_CIDSHIFT; 164 conn->proto.epoch = local->rxnet->epoch; 165 conn->out_clientflag = RXRPC_CLIENT_INITIATED; 166 conn->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn); 167 conn->local = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn); 168 conn->peer = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn); 169 conn->key = key_get(bundle->key); 170 conn->security = bundle->security; |
195 conn->exclusive = bundle->exclusive; 196 conn->upgrade = bundle->upgrade; 197 conn->orig_service_id = bundle->service_id; 198 conn->security_level = bundle->security_level; | 171 conn->exclusive = bundle->exclusive; 172 conn->upgrade = bundle->upgrade; 173 conn->orig_service_id = bundle->service_id; 174 conn->security_level = bundle->security_level; |
199 conn->out_clientflag = RXRPC_CLIENT_INITIATED; 200 conn->state = RXRPC_CONN_CLIENT; | 175 conn->state = RXRPC_CONN_CLIENT_UNSECURED; |
201 conn->service_id = conn->orig_service_id; 202 | 176 conn->service_id = conn->orig_service_id; 177 |
203 ret = rxrpc_get_client_connection_id(conn, gfp); 204 if (ret < 0) 205 goto error_0; | 178 if (conn->security == &rxrpc_no_security) 179 conn->state = RXRPC_CONN_CLIENT; |
206 | 180 |
207 ret = rxrpc_init_client_conn_security(conn); 208 if (ret < 0) 209 goto error_1; 210 | |
211 atomic_inc(&rxnet->nr_conns); 212 write_lock(&rxnet->conn_lock); 213 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 214 write_unlock(&rxnet->conn_lock); 215 | 181 atomic_inc(&rxnet->nr_conns); 182 write_lock(&rxnet->conn_lock); 183 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 184 write_unlock(&rxnet->conn_lock); 185 |
216 rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn); 217 rxrpc_get_peer(conn->peer, rxrpc_peer_get_client_conn); 218 rxrpc_get_local(conn->local, rxrpc_local_get_client_conn); 219 key_get(conn->key); | 186 rxrpc_see_connection(conn, rxrpc_conn_new_client); |
220 | 187 |
221 trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref), 222 rxrpc_conn_new_client); 223 | |
224 atomic_inc(&rxnet->nr_client_conns); 225 trace_rxrpc_client(conn, -1, rxrpc_client_alloc); | 188 atomic_inc(&rxnet->nr_client_conns); 189 trace_rxrpc_client(conn, -1, rxrpc_client_alloc); |
226 _leave(" = %p", conn); | |
227 return conn; | 190 return conn; |
228 229error_1: 230 rxrpc_put_client_connection_id(bundle->local, conn); 231error_0: 232 kfree(conn); 233 _leave(" = %d", ret); 234 return ERR_PTR(ret); | |
235} 236 237/* 238 * Determine if a connection may be reused. 239 */ 240static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) 241{ 242 struct rxrpc_net *rxnet; 243 int id_cursor, id, distance, limit; 244 245 if (!conn) 246 goto dont_reuse; 247 248 rxnet = conn->rxnet; 249 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) 250 goto dont_reuse; 251 | 191} 192 193/* 194 * Determine if a connection may be reused. 195 */ 196static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) 197{ 198 struct rxrpc_net *rxnet; 199 int id_cursor, id, distance, limit; 200 201 if (!conn) 202 goto dont_reuse; 203 204 rxnet = conn->rxnet; 205 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) 206 goto dont_reuse; 207 |
252 if (conn->state != RXRPC_CONN_CLIENT || | 208 if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED && 209 conn->state != RXRPC_CONN_CLIENT) || |
253 conn->proto.epoch != rxnet->epoch) 254 goto mark_dont_reuse; 255 256 /* The IDR tree gets very expensive on memory if the connection IDs are 257 * widely scattered throughout the number space, so we shall want to 258 * kill off connections that, say, have an ID more than about four 259 * times the maximum number of client conns away from the current 260 * allocation point to try and keep the IDs concentrated. --- 14 unchanged lines hidden (view full) --- 275dont_reuse: 276 return false; 277} 278 279/* 280 * Look up the conn bundle that matches the connection parameters, adding it if 281 * it doesn't yet exist. 282 */ | 210 conn->proto.epoch != rxnet->epoch) 211 goto mark_dont_reuse; 212 213 /* The IDR tree gets very expensive on memory if the connection IDs are 214 * widely scattered throughout the number space, so we shall want to 215 * kill off connections that, say, have an ID more than about four 216 * times the maximum number of client conns away from the current 217 * allocation point to try and keep the IDs concentrated. --- 14 unchanged lines hidden (view full) --- 232dont_reuse: 233 return false; 234} 235 236/* 237 * Look up the conn bundle that matches the connection parameters, adding it if 238 * it doesn't yet exist. 239 */ |
283static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) | 240int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) |
284{ 285 static atomic_t rxrpc_bundle_id; 286 struct rxrpc_bundle *bundle, *candidate; 287 struct rxrpc_local *local = call->local; 288 struct rb_node *p, **pp, *parent; 289 long diff; 290 bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); 291 292 _enter("{%px,%x,%u,%u}", 293 call->peer, key_serial(call->key), call->security_level, 294 upgrade); 295 296 if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) { 297 call->bundle = rxrpc_alloc_bundle(call, gfp); | 241{ 242 static atomic_t rxrpc_bundle_id; 243 struct rxrpc_bundle *bundle, *candidate; 244 struct rxrpc_local *local = call->local; 245 struct rb_node *p, **pp, *parent; 246 long diff; 247 bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); 248 249 _enter("{%px,%x,%u,%u}", 250 call->peer, key_serial(call->key), call->security_level, 251 upgrade); 252 253 if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) { 254 call->bundle = rxrpc_alloc_bundle(call, gfp); |
298 return call->bundle; | 255 return call->bundle ? 0 : -ENOMEM; |
299 } 300 301 /* First, see if the bundle is already there. */ 302 _debug("search 1"); 303 spin_lock(&local->client_bundles_lock); 304 p = local->client_bundles.rb_node; 305 while (p) { 306 bundle = rb_entry(p, struct rxrpc_bundle, local_node); --- 12 unchanged lines hidden (view full) --- 319 goto found_bundle; 320 } 321 spin_unlock(&local->client_bundles_lock); 322 _debug("not found"); 323 324 /* It wasn't. We need to add one. */ 325 candidate = rxrpc_alloc_bundle(call, gfp); 326 if (!candidate) | 256 } 257 258 /* First, see if the bundle is already there. */ 259 _debug("search 1"); 260 spin_lock(&local->client_bundles_lock); 261 p = local->client_bundles.rb_node; 262 while (p) { 263 bundle = rb_entry(p, struct rxrpc_bundle, local_node); --- 12 unchanged lines hidden (view full) --- 276 goto found_bundle; 277 } 278 spin_unlock(&local->client_bundles_lock); 279 _debug("not found"); 280 281 /* It wasn't. We need to add one. */ 282 candidate = rxrpc_alloc_bundle(call, gfp); 283 if (!candidate) |
327 return ERR_PTR(-ENOMEM); | 284 return -ENOMEM; |
328 329 _debug("search 2"); 330 spin_lock(&local->client_bundles_lock); 331 pp = &local->client_bundles.rb_node; 332 parent = NULL; 333 while (*pp) { 334 parent = *pp; 335 bundle = rb_entry(parent, struct rxrpc_bundle, local_node); --- 14 unchanged lines hidden (view full) --- 350 351 _debug("new bundle"); 352 candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id); 353 rb_link_node(&candidate->local_node, parent, pp); 354 rb_insert_color(&candidate->local_node, &local->client_bundles); 355 call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call); 356 spin_unlock(&local->client_bundles_lock); 357 _leave(" = B=%u [new]", call->bundle->debug_id); | 285 286 _debug("search 2"); 287 spin_lock(&local->client_bundles_lock); 288 pp = &local->client_bundles.rb_node; 289 parent = NULL; 290 while (*pp) { 291 parent = *pp; 292 bundle = rb_entry(parent, struct rxrpc_bundle, local_node); --- 14 unchanged lines hidden (view full) --- 307 308 _debug("new bundle"); 309 candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id); 310 rb_link_node(&candidate->local_node, parent, pp); 311 rb_insert_color(&candidate->local_node, &local->client_bundles); 312 call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call); 313 spin_unlock(&local->client_bundles_lock); 314 _leave(" = B=%u [new]", call->bundle->debug_id); |
358 return call->bundle; | 315 return 0; |
359 360found_bundle_free: 361 rxrpc_free_bundle(candidate); 362found_bundle: 363 call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call); 364 rxrpc_activate_bundle(bundle); 365 spin_unlock(&local->client_bundles_lock); 366 _leave(" = B=%u [found]", call->bundle->debug_id); | 316 317found_bundle_free: 318 rxrpc_free_bundle(candidate); 319found_bundle: 320 call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call); 321 rxrpc_activate_bundle(bundle); 322 spin_unlock(&local->client_bundles_lock); 323 _leave(" = B=%u [found]", call->bundle->debug_id); |
367 return call->bundle; | 324 return 0; |
368} 369 370/* | 325} 326 327/* |
371 * Create or find a client bundle to use for a call. 372 * 373 * If we return with a connection, the call will be on its waiting list. It's 374 * left to the caller to assign a channel and wake up the call. 375 */ 376static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_call *call, gfp_t gfp) 377{ 378 struct rxrpc_bundle *bundle; 379 380 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 381 382 call->peer = rxrpc_lookup_peer(call->local, &call->dest_srx, gfp); 383 if (!call->peer) 384 goto error; 385 386 call->tx_last_sent = ktime_get_real(); 387 call->cong_ssthresh = call->peer->cong_ssthresh; 388 if (call->cong_cwnd >= call->cong_ssthresh) 389 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 390 else 391 call->cong_mode = RXRPC_CALL_SLOW_START; 392 393 /* Find the client connection bundle. */ 394 bundle = rxrpc_look_up_bundle(call, gfp); 395 if (!bundle) 396 goto error; 397 398 /* Get this call queued. Someone else may activate it whilst we're 399 * lining up a new connection, but that's fine. 400 */ 401 spin_lock(&bundle->channel_lock); 402 list_add_tail(&call->chan_wait_link, &bundle->waiting_calls); 403 spin_unlock(&bundle->channel_lock); 404 405 _leave(" = [B=%x]", bundle->debug_id); 406 return bundle; 407 408error: 409 _leave(" = -ENOMEM"); 410 return ERR_PTR(-ENOMEM); 411} 412 413/* | |
414 * Allocate a new connection and add it into a bundle. 415 */ | 328 * Allocate a new connection and add it into a bundle. 329 */ |
416static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp) 417 __releases(bundle->channel_lock) | 330static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, 331 unsigned int slot) |
418{ | 332{ |
419 struct rxrpc_connection *candidate = NULL, *old = NULL; 420 bool conflict; 421 int i; | 333 struct rxrpc_connection *conn, *old; 334 unsigned int shift = slot * RXRPC_MAXCALLS; 335 unsigned int i; |
422 | 336 |
423 _enter(""); 424 425 conflict = bundle->alloc_conn; 426 if (!conflict) 427 bundle->alloc_conn = true; 428 spin_unlock(&bundle->channel_lock); 429 if (conflict) { 430 _leave(" [conf]"); 431 return; | 337 old = bundle->conns[slot]; 338 if (old) { 339 bundle->conns[slot] = NULL; 340 trace_rxrpc_client(old, -1, rxrpc_client_replace); 341 rxrpc_put_connection(old, rxrpc_conn_put_noreuse); |
432 } 433 | 342 } 343 |
434 candidate = rxrpc_alloc_client_connection(bundle, gfp); 435 436 spin_lock(&bundle->channel_lock); 437 bundle->alloc_conn = false; 438 439 if (IS_ERR(candidate)) { 440 bundle->alloc_error = PTR_ERR(candidate); 441 spin_unlock(&bundle->channel_lock); 442 _leave(" [err %ld]", PTR_ERR(candidate)); 443 return; | 344 conn = rxrpc_alloc_client_connection(bundle); 345 if (IS_ERR(conn)) { 346 bundle->alloc_error = PTR_ERR(conn); 347 return false; |
444 } 445 | 348 } 349 |
446 bundle->alloc_error = 0; 447 448 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { 449 unsigned int shift = i * RXRPC_MAXCALLS; 450 int j; 451 452 old = bundle->conns[i]; 453 if (!rxrpc_may_reuse_conn(old)) { 454 if (old) 455 trace_rxrpc_client(old, -1, rxrpc_client_replace); 456 candidate->bundle_shift = shift; 457 rxrpc_activate_bundle(bundle); 458 bundle->conns[i] = candidate; 459 for (j = 0; j < RXRPC_MAXCALLS; j++) 460 set_bit(shift + j, &bundle->avail_chans); 461 candidate = NULL; 462 break; 463 } 464 465 old = NULL; 466 } 467 468 spin_unlock(&bundle->channel_lock); 469 470 if (candidate) { 471 _debug("discard C=%x", candidate->debug_id); 472 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); 473 rxrpc_put_connection(candidate, rxrpc_conn_put_discard); 474 } 475 476 rxrpc_put_connection(old, rxrpc_conn_put_noreuse); 477 _leave(""); | 350 rxrpc_activate_bundle(bundle); 351 conn->bundle_shift = shift; 352 bundle->conns[slot] = conn; 353 for (i = 0; i < RXRPC_MAXCALLS; i++) 354 set_bit(shift + i, &bundle->avail_chans); 355 return true; |
478} 479 480/* 481 * Add a connection to a bundle if there are no usable connections or we have 482 * connections waiting for extra capacity. 483 */ | 356} 357 358/* 359 * Add a connection to a bundle if there are no usable connections or we have 360 * connections waiting for extra capacity. 361 */ |
484static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp) | 362static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle) |
485{ | 363{ |
486 struct rxrpc_call *call; 487 int i, usable; | 364 int slot = -1, i, usable; |
488 489 _enter(""); 490 | 365 366 _enter(""); 367 |
491 spin_lock(&bundle->channel_lock); | 368 bundle->alloc_error = 0; |
492 493 /* See if there are any usable connections. */ 494 usable = 0; | 369 370 /* See if there are any usable connections. */ 371 usable = 0; |
495 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) | 372 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { |
496 if (rxrpc_may_reuse_conn(bundle->conns[i])) 497 usable++; | 373 if (rxrpc_may_reuse_conn(bundle->conns[i])) 374 usable++; |
498 499 if (!usable && !list_empty(&bundle->waiting_calls)) { 500 call = list_first_entry(&bundle->waiting_calls, 501 struct rxrpc_call, chan_wait_link); 502 if (test_bit(RXRPC_CALL_UPGRADE, &call->flags)) 503 bundle->try_upgrade = true; | 375 else if (slot == -1) 376 slot = i; |
504 } 505 | 377 } 378 |
379 if (!usable && bundle->upgrade) 380 bundle->try_upgrade = true; 381 |
|
506 if (!usable) 507 goto alloc_conn; 508 509 if (!bundle->avail_chans && 510 !bundle->try_upgrade && | 382 if (!usable) 383 goto alloc_conn; 384 385 if (!bundle->avail_chans && 386 !bundle->try_upgrade && |
511 !list_empty(&bundle->waiting_calls) && | |
512 usable < ARRAY_SIZE(bundle->conns)) 513 goto alloc_conn; 514 | 387 usable < ARRAY_SIZE(bundle->conns)) 388 goto alloc_conn; 389 |
515 spin_unlock(&bundle->channel_lock); | |
516 _leave(""); | 390 _leave(""); |
517 return; | 391 return usable; |
518 519alloc_conn: | 392 393alloc_conn: |
520 return rxrpc_add_conn_to_bundle(bundle, gfp); | 394 return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false; |
521} 522 523/* 524 * Assign a channel to the call at the front of the queue and wake the call up. 525 * We don't increment the callNumber counter until this number has been exposed 526 * to the world. 527 */ 528static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, 529 unsigned int channel) 530{ 531 struct rxrpc_channel *chan = &conn->channels[channel]; 532 struct rxrpc_bundle *bundle = conn->bundle; 533 struct rxrpc_call *call = list_entry(bundle->waiting_calls.next, | 395} 396 397/* 398 * Assign a channel to the call at the front of the queue and wake the call up. 399 * We don't increment the callNumber counter until this number has been exposed 400 * to the world. 401 */ 402static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, 403 unsigned int channel) 404{ 405 struct rxrpc_channel *chan = &conn->channels[channel]; 406 struct rxrpc_bundle *bundle = conn->bundle; 407 struct rxrpc_call *call = list_entry(bundle->waiting_calls.next, |
534 struct rxrpc_call, chan_wait_link); | 408 struct rxrpc_call, wait_link); |
535 u32 call_id = chan->call_counter + 1; 536 537 _enter("C=%x,%u", conn->debug_id, channel); 538 | 409 u32 call_id = chan->call_counter + 1; 410 411 _enter("C=%x,%u", conn->debug_id, channel); 412 |
413 list_del_init(&call->wait_link); 414 |
|
539 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 540 541 /* Cancel the final ACK on the previous call if it hasn't been sent yet 542 * as the DATA packet will implicitly ACK it. 543 */ 544 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 545 clear_bit(conn->bundle_shift + channel, &bundle->avail_chans); 546 547 rxrpc_see_call(call, rxrpc_call_see_activate_client); | 415 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 416 417 /* Cancel the final ACK on the previous call if it hasn't been sent yet 418 * as the DATA packet will implicitly ACK it. 419 */ 420 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 421 clear_bit(conn->bundle_shift + channel, &bundle->avail_chans); 422 423 rxrpc_see_call(call, rxrpc_call_see_activate_client); |
548 list_del_init(&call->chan_wait_link); | |
549 call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call); 550 call->cid = conn->proto.cid | channel; 551 call->call_id = call_id; 552 call->dest_srx.srx_service = conn->service_id; | 424 call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call); 425 call->cid = conn->proto.cid | channel; 426 call->call_id = call_id; 427 call->dest_srx.srx_service = conn->service_id; |
428 call->cong_ssthresh = call->peer->cong_ssthresh; 429 if (call->cong_cwnd >= call->cong_ssthresh) 430 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 431 else 432 call->cong_mode = RXRPC_CALL_SLOW_START; |
|
553 | 433 |
554 trace_rxrpc_connect_call(call); 555 556 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST); 557 558 /* Paired with the read barrier in rxrpc_connect_call(). This orders 559 * cid and epoch in the connection wrt to call_id without the need to 560 * take the channel_lock. 561 * 562 * We provisionally assign a callNumber at this point, but we don't 563 * confirm it until the call is about to be exposed. 564 * 565 * TODO: Pair with a barrier in the data_ready handler when that looks 566 * at the call ID through a connection channel. 567 */ 568 smp_wmb(); 569 | |
570 chan->call_id = call_id; 571 chan->call_debug_id = call->debug_id; | 434 chan->call_id = call_id; 435 chan->call_debug_id = call->debug_id; |
572 rcu_assign_pointer(chan->call, call); | 436 chan->call = call; 437 438 rxrpc_see_call(call, rxrpc_call_see_connected); 439 trace_rxrpc_connect_call(call); 440 call->tx_last_sent = ktime_get_real(); 441 rxrpc_start_call_timer(call); 442 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST); |
573 wake_up(&call->waitq); 574} 575 576/* 577 * Remove a connection from the idle list if it's on it. 578 */ | 443 wake_up(&call->waitq); 444} 445 446/* 447 * Remove a connection from the idle list if it's on it. 448 */ |
579static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn) | 449static void rxrpc_unidle_conn(struct rxrpc_connection *conn) |
580{ | 450{ |
581 struct rxrpc_local *local = bundle->local; 582 bool drop_ref; 583 | |
584 if (!list_empty(&conn->cache_link)) { | 451 if (!list_empty(&conn->cache_link)) { |
585 drop_ref = false; 586 spin_lock(&local->client_conn_cache_lock); 587 if (!list_empty(&conn->cache_link)) { 588 list_del_init(&conn->cache_link); 589 drop_ref = true; 590 } 591 spin_unlock(&local->client_conn_cache_lock); 592 if (drop_ref) 593 rxrpc_put_connection(conn, rxrpc_conn_put_unidle); | 452 list_del_init(&conn->cache_link); 453 rxrpc_put_connection(conn, rxrpc_conn_put_unidle); |
594 } 595} 596 597/* | 454 } 455} 456 457/* |
598 * Assign channels and callNumbers to waiting calls with channel_lock 599 * held by caller. | 458 * Assign channels and callNumbers to waiting calls. |
600 */ | 459 */ |
601static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle) | 460static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) |
602{ 603 struct rxrpc_connection *conn; 604 unsigned long avail, mask; 605 unsigned int channel, slot; 606 | 461{ 462 struct rxrpc_connection *conn; 463 unsigned long avail, mask; 464 unsigned int channel, slot; 465 |
466 trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans); 467 |
|
607 if (bundle->try_upgrade) 608 mask = 1; 609 else 610 mask = ULONG_MAX; 611 612 while (!list_empty(&bundle->waiting_calls)) { 613 avail = bundle->avail_chans & mask; 614 if (!avail) 615 break; 616 channel = __ffs(avail); 617 clear_bit(channel, &bundle->avail_chans); 618 619 slot = channel / RXRPC_MAXCALLS; 620 conn = bundle->conns[slot]; 621 if (!conn) 622 break; 623 624 if (bundle->try_upgrade) 625 set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); | 468 if (bundle->try_upgrade) 469 mask = 1; 470 else 471 mask = ULONG_MAX; 472 473 while (!list_empty(&bundle->waiting_calls)) { 474 avail = bundle->avail_chans & mask; 475 if (!avail) 476 break; 477 channel = __ffs(avail); 478 clear_bit(channel, &bundle->avail_chans); 479 480 slot = channel / RXRPC_MAXCALLS; 481 conn = bundle->conns[slot]; 482 if (!conn) 483 break; 484 485 if (bundle->try_upgrade) 486 set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); |
626 rxrpc_unidle_conn(bundle, conn); | 487 rxrpc_unidle_conn(conn); |
627 628 channel &= (RXRPC_MAXCALLS - 1); 629 conn->act_chans |= 1 << channel; 630 rxrpc_activate_one_channel(conn, channel); 631 } 632} 633 634/* | 488 489 channel &= (RXRPC_MAXCALLS - 1); 490 conn->act_chans |= 1 << channel; 491 rxrpc_activate_one_channel(conn, channel); 492 } 493} 494 495/* |
635 * Assign channels and callNumbers to waiting calls. | 496 * Connect waiting channels (called from the I/O thread). |
636 */ | 497 */ |
637static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) | 498void rxrpc_connect_client_calls(struct rxrpc_local *local) |
638{ | 499{ |
639 _enter("B=%x", bundle->debug_id); | 500 struct rxrpc_call *call; |
640 | 501 |
641 trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans); | 502 while ((call = list_first_entry_or_null(&local->new_client_calls, 503 struct rxrpc_call, wait_link)) 504 ) { 505 struct rxrpc_bundle *bundle = call->bundle; |
642 | 506 |
643 if (!bundle->avail_chans) 644 return; | 507 spin_lock(&local->client_call_lock); 508 list_move_tail(&call->wait_link, &bundle->waiting_calls); 509 spin_unlock(&local->client_call_lock); |
645 | 510 |
646 spin_lock(&bundle->channel_lock); 647 rxrpc_activate_channels_locked(bundle); 648 spin_unlock(&bundle->channel_lock); 649 _leave(""); 650} 651 652/* 653 * Wait for a callNumber and a channel to be granted to a call. 654 */ 655static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle, 656 struct rxrpc_call *call, gfp_t gfp) 657{ 658 DECLARE_WAITQUEUE(myself, current); 659 int ret = 0; 660 661 _enter("%d", call->debug_id); 662 663 if (!gfpflags_allow_blocking(gfp)) { 664 rxrpc_maybe_add_conn(bundle, gfp); 665 rxrpc_activate_channels(bundle); 666 ret = bundle->alloc_error ?: -EAGAIN; 667 goto out; | 511 if (rxrpc_bundle_has_space(bundle)) 512 rxrpc_activate_channels(bundle); |
668 } | 513 } |
669 670 add_wait_queue_exclusive(&call->waitq, &myself); 671 for (;;) { 672 rxrpc_maybe_add_conn(bundle, gfp); 673 rxrpc_activate_channels(bundle); 674 ret = bundle->alloc_error; 675 if (ret < 0) 676 break; 677 678 switch (call->interruptibility) { 679 case RXRPC_INTERRUPTIBLE: 680 case RXRPC_PREINTERRUPTIBLE: 681 set_current_state(TASK_INTERRUPTIBLE); 682 break; 683 case RXRPC_UNINTERRUPTIBLE: 684 default: 685 set_current_state(TASK_UNINTERRUPTIBLE); 686 break; 687 } 688 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 689 break; 690 if ((call->interruptibility == RXRPC_INTERRUPTIBLE || 691 call->interruptibility == RXRPC_PREINTERRUPTIBLE) && 692 signal_pending(current)) { 693 ret = -ERESTARTSYS; 694 break; 695 } 696 schedule(); 697 } 698 remove_wait_queue(&call->waitq, &myself); 699 __set_current_state(TASK_RUNNING); 700 701out: 702 _leave(" = %d", ret); 703 return ret; | |
704} 705 706/* | 514} 515 516/* |
707 * find a connection for a call 708 * - called in process context with IRQs enabled 709 */ 710int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp) 711{ 712 struct rxrpc_bundle *bundle; 713 int ret = 0; 714 715 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 716 717 rxrpc_get_call(call, rxrpc_call_get_io_thread); 718 719 bundle = rxrpc_prep_call(call, gfp); 720 if (IS_ERR(bundle)) { 721 rxrpc_put_call(call, rxrpc_call_get_io_thread); 722 ret = PTR_ERR(bundle); 723 goto out; 724 } 725 726 if (rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_CONN) { 727 ret = rxrpc_wait_for_channel(bundle, call, gfp); 728 if (ret < 0) 729 goto wait_failed; 730 } 731 732granted_channel: 733 /* Paired with the write barrier in rxrpc_activate_one_channel(). */ 734 smp_rmb(); 735 736out: 737 _leave(" = %d", ret); 738 return ret; 739 740wait_failed: 741 spin_lock(&bundle->channel_lock); 742 list_del_init(&call->chan_wait_link); 743 spin_unlock(&bundle->channel_lock); 744 745 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) { 746 ret = 0; 747 goto granted_channel; 748 } 749 750 trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); 751 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret); 752 rxrpc_disconnect_client_call(bundle, call); 753 goto out; 754} 755 756/* | |
757 * Note that a call, and thus a connection, is about to be exposed to the 758 * world. 759 */ 760void rxrpc_expose_client_call(struct rxrpc_call *call) 761{ 762 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 763 struct rxrpc_connection *conn = call->conn; 764 struct rxrpc_channel *chan = &conn->channels[channel]; --- 38 unchanged lines hidden (view full) --- 803 struct rxrpc_channel *chan = NULL; 804 struct rxrpc_local *local = bundle->local; 805 unsigned int channel; 806 bool may_reuse; 807 u32 cid; 808 809 _enter("c=%x", call->debug_id); 810 | 517 * Note that a call, and thus a connection, is about to be exposed to the 518 * world. 519 */ 520void rxrpc_expose_client_call(struct rxrpc_call *call) 521{ 522 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 523 struct rxrpc_connection *conn = call->conn; 524 struct rxrpc_channel *chan = &conn->channels[channel]; --- 38 unchanged lines hidden (view full) --- 563 struct rxrpc_channel *chan = NULL; 564 struct rxrpc_local *local = bundle->local; 565 unsigned int channel; 566 bool may_reuse; 567 u32 cid; 568 569 _enter("c=%x", call->debug_id); 570 |
811 spin_lock(&bundle->channel_lock); 812 | |
813 /* Calls that have never actually been assigned a channel can simply be 814 * discarded. 815 */ 816 conn = call->conn; 817 if (!conn) { 818 _debug("call is waiting"); 819 ASSERTCMP(call->call_id, ==, 0); 820 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); | 571 /* Calls that have never actually been assigned a channel can simply be 572 * discarded. 573 */ 574 conn = call->conn; 575 if (!conn) { 576 _debug("call is waiting"); 577 ASSERTCMP(call->call_id, ==, 0); 578 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); |
821 list_del_init(&call->chan_wait_link); 822 goto out; | 579 list_del_init(&call->wait_link); 580 return; |
823 } 824 825 cid = call->cid; 826 channel = cid & RXRPC_CHANNELMASK; 827 chan = &conn->channels[channel]; 828 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); 829 | 581 } 582 583 cid = call->cid; 584 channel = cid & RXRPC_CHANNELMASK; 585 chan = &conn->channels[channel]; 586 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); 587 |
830 if (rcu_access_pointer(chan->call) != call) { 831 spin_unlock(&bundle->channel_lock); 832 BUG(); 833 } | 588 if (WARN_ON(chan->call != call)) 589 return; |
834 835 may_reuse = rxrpc_may_reuse_conn(conn); 836 837 /* If a client call was exposed to the world, we save the result for 838 * retransmission. 839 * 840 * We use a barrier here so that the call number and abort code can be 841 * read without needing to take a lock. --- 4 unchanged lines hidden (view full) --- 846 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 847 _debug("exposed %u,%u", call->call_id, call->abort_code); 848 __rxrpc_disconnect_call(conn, call); 849 850 if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { 851 trace_rxrpc_client(conn, channel, rxrpc_client_to_active); 852 bundle->try_upgrade = false; 853 if (may_reuse) | 590 591 may_reuse = rxrpc_may_reuse_conn(conn); 592 593 /* If a client call was exposed to the world, we save the result for 594 * retransmission. 595 * 596 * We use a barrier here so that the call number and abort code can be 597 * read without needing to take a lock. --- 4 unchanged lines hidden (view full) --- 602 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 603 _debug("exposed %u,%u", call->call_id, call->abort_code); 604 __rxrpc_disconnect_call(conn, call); 605 606 if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { 607 trace_rxrpc_client(conn, channel, rxrpc_client_to_active); 608 bundle->try_upgrade = false; 609 if (may_reuse) |
854 rxrpc_activate_channels_locked(bundle); | 610 rxrpc_activate_channels(bundle); |
855 } | 611 } |
856 | |
857 } 858 859 /* See if we can pass the channel directly to another call. */ 860 if (may_reuse && !list_empty(&bundle->waiting_calls)) { 861 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 862 rxrpc_activate_one_channel(conn, channel); | 612 } 613 614 /* See if we can pass the channel directly to another call. */ 615 if (may_reuse && !list_empty(&bundle->waiting_calls)) { 616 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 617 rxrpc_activate_one_channel(conn, channel); |
863 goto out; | 618 return; |
864 } 865 866 /* Schedule the final ACK to be transmitted in a short while so that it 867 * can be skipped if we find a follow-on call. The first DATA packet 868 * of the follow on call will implicitly ACK this call. 869 */ 870 if (call->completion == RXRPC_CALL_SUCCEEDED && 871 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 872 unsigned long final_ack_at = jiffies + 2; 873 874 WRITE_ONCE(chan->final_ack_at, final_ack_at); 875 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ 876 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 877 rxrpc_reduce_conn_timer(conn, final_ack_at); 878 } 879 880 /* Deactivate the channel. */ | 619 } 620 621 /* Schedule the final ACK to be transmitted in a short while so that it 622 * can be skipped if we find a follow-on call. The first DATA packet 623 * of the follow on call will implicitly ACK this call. 624 */ 625 if (call->completion == RXRPC_CALL_SUCCEEDED && 626 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 627 unsigned long final_ack_at = jiffies + 2; 628 629 WRITE_ONCE(chan->final_ack_at, final_ack_at); 630 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ 631 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 632 rxrpc_reduce_conn_timer(conn, final_ack_at); 633 } 634 635 /* Deactivate the channel. */ |
881 rcu_assign_pointer(chan->call, NULL); | 636 chan->call = NULL; |
882 set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans); 883 conn->act_chans &= ~(1 << channel); 884 885 /* If no channels remain active, then put the connection on the idle 886 * list for a short while. Give it a ref to stop it going away if it 887 * becomes unbundled. 888 */ 889 if (!conn->act_chans) { 890 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); 891 conn->idle_timestamp = jiffies; 892 893 rxrpc_get_connection(conn, rxrpc_conn_get_idle); | 637 set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans); 638 conn->act_chans &= ~(1 << channel); 639 640 /* If no channels remain active, then put the connection on the idle 641 * list for a short while. Give it a ref to stop it going away if it 642 * becomes unbundled. 643 */ 644 if (!conn->act_chans) { 645 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); 646 conn->idle_timestamp = jiffies; 647 648 rxrpc_get_connection(conn, rxrpc_conn_get_idle); |
894 spin_lock(&local->client_conn_cache_lock); | |
895 list_move_tail(&conn->cache_link, &local->idle_client_conns); | 649 list_move_tail(&conn->cache_link, &local->idle_client_conns); |
896 spin_unlock(&local->client_conn_cache_lock); | |
897 898 rxrpc_set_client_reap_timer(local); 899 } | 650 651 rxrpc_set_client_reap_timer(local); 652 } |
900 901out: 902 spin_unlock(&bundle->channel_lock); | |
903} 904 905/* 906 * Remove a connection from a bundle. 907 */ 908static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) 909{ 910 struct rxrpc_bundle *bundle = conn->bundle; 911 unsigned int bindex; | 653} 654 655/* 656 * Remove a connection from a bundle. 657 */ 658static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) 659{ 660 struct rxrpc_bundle *bundle = conn->bundle; 661 unsigned int bindex; |
912 bool need_drop = false; | |
913 int i; 914 915 _enter("C=%x", conn->debug_id); 916 917 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 918 rxrpc_process_delayed_final_acks(conn, true); 919 | 662 int i; 663 664 _enter("C=%x", conn->debug_id); 665 666 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 667 rxrpc_process_delayed_final_acks(conn, true); 668 |
920 spin_lock(&bundle->channel_lock); | |
921 bindex = conn->bundle_shift / RXRPC_MAXCALLS; 922 if (bundle->conns[bindex] == conn) { 923 _debug("clear slot %u", bindex); 924 bundle->conns[bindex] = NULL; 925 for (i = 0; i < RXRPC_MAXCALLS; i++) 926 clear_bit(conn->bundle_shift + i, &bundle->avail_chans); | 669 bindex = conn->bundle_shift / RXRPC_MAXCALLS; 670 if (bundle->conns[bindex] == conn) { 671 _debug("clear slot %u", bindex); 672 bundle->conns[bindex] = NULL; 673 for (i = 0; i < RXRPC_MAXCALLS; i++) 674 clear_bit(conn->bundle_shift + i, &bundle->avail_chans); |
927 need_drop = true; 928 } 929 spin_unlock(&bundle->channel_lock); 930 931 if (need_drop) { | 675 rxrpc_put_client_connection_id(bundle->local, conn); |
932 rxrpc_deactivate_bundle(bundle); 933 rxrpc_put_connection(conn, rxrpc_conn_put_unbundle); 934 } 935} 936 937/* 938 * Drop the active count on a bundle. 939 */ --- 45 unchanged lines hidden (view full) --- 985void rxrpc_discard_expired_client_conns(struct rxrpc_local *local) 986{ 987 struct rxrpc_connection *conn; 988 unsigned long expiry, conn_expires_at, now; 989 unsigned int nr_conns; 990 991 _enter(""); 992 | 676 rxrpc_deactivate_bundle(bundle); 677 rxrpc_put_connection(conn, rxrpc_conn_put_unbundle); 678 } 679} 680 681/* 682 * Drop the active count on a bundle. 683 */ --- 45 unchanged lines hidden (view full) --- 729void rxrpc_discard_expired_client_conns(struct rxrpc_local *local) 730{ 731 struct rxrpc_connection *conn; 732 unsigned long expiry, conn_expires_at, now; 733 unsigned int nr_conns; 734 735 _enter(""); 736 |
993 if (list_empty(&local->idle_client_conns)) { 994 _leave(" [empty]"); 995 return; 996 } 997 | |
998 /* We keep an estimate of what the number of conns ought to be after 999 * we've discarded some so that we don't overdo the discarding. 1000 */ 1001 nr_conns = atomic_read(&local->rxnet->nr_client_conns); 1002 1003next: | 737 /* We keep an estimate of what the number of conns ought to be after 738 * we've discarded some so that we don't overdo the discarding. 739 */ 740 nr_conns = atomic_read(&local->rxnet->nr_client_conns); 741 742next: |
1004 spin_lock(&local->client_conn_cache_lock); | 743 conn = list_first_entry_or_null(&local->idle_client_conns, 744 struct rxrpc_connection, cache_link); 745 if (!conn) 746 return; |
1005 | 747 |
1006 if (list_empty(&local->idle_client_conns)) 1007 goto out; 1008 1009 conn = list_entry(local->idle_client_conns.next, 1010 struct rxrpc_connection, cache_link); 1011 | |
1012 if (!local->kill_all_client_conns) { 1013 /* If the number of connections is over the reap limit, we 1014 * expedite discard by reducing the expiry timeout. We must, 1015 * however, have at least a short grace period to be able to do 1016 * final-ACK or ABORT retransmission. 1017 */ 1018 expiry = rxrpc_conn_idle_client_expiry; 1019 if (nr_conns > rxrpc_reap_client_connections) --- 7 unchanged lines hidden (view full) --- 1027 if (time_after(conn_expires_at, now)) 1028 goto not_yet_expired; 1029 } 1030 1031 atomic_dec(&conn->active); 1032 trace_rxrpc_client(conn, -1, rxrpc_client_discard); 1033 list_del_init(&conn->cache_link); 1034 | 748 if (!local->kill_all_client_conns) { 749 /* If the number of connections is over the reap limit, we 750 * expedite discard by reducing the expiry timeout. We must, 751 * however, have at least a short grace period to be able to do 752 * final-ACK or ABORT retransmission. 753 */ 754 expiry = rxrpc_conn_idle_client_expiry; 755 if (nr_conns > rxrpc_reap_client_connections) --- 7 unchanged lines hidden (view full) --- 763 if (time_after(conn_expires_at, now)) 764 goto not_yet_expired; 765 } 766 767 atomic_dec(&conn->active); 768 trace_rxrpc_client(conn, -1, rxrpc_client_discard); 769 list_del_init(&conn->cache_link); 770 |
1035 spin_unlock(&local->client_conn_cache_lock); 1036 | |
1037 rxrpc_unbundle_conn(conn); 1038 /* Drop the ->cache_link ref */ 1039 rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle); 1040 1041 nr_conns--; 1042 goto next; 1043 1044not_yet_expired: 1045 /* The connection at the front of the queue hasn't yet expired, so 1046 * schedule the work item for that point if we discarded something. 1047 * 1048 * We don't worry if the work item is already scheduled - it can look 1049 * after rescheduling itself at a later time. We could cancel it, but 1050 * then things get messier. 1051 */ 1052 _debug("not yet"); 1053 if (!local->kill_all_client_conns) 1054 timer_reduce(&local->client_conn_reap_timer, conn_expires_at); 1055 | 771 rxrpc_unbundle_conn(conn); 772 /* Drop the ->cache_link ref */ 773 rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle); 774 775 nr_conns--; 776 goto next; 777 778not_yet_expired: 779 /* The connection at the front of the queue hasn't yet expired, so 780 * schedule the work item for that point if we discarded something. 781 * 782 * We don't worry if the work item is already scheduled - it can look 783 * after rescheduling itself at a later time. We could cancel it, but 784 * then things get messier. 785 */ 786 _debug("not yet"); 787 if (!local->kill_all_client_conns) 788 timer_reduce(&local->client_conn_reap_timer, conn_expires_at); 789 |
1056out: 1057 spin_unlock(&local->client_conn_cache_lock); | |
1058 _leave(""); 1059} 1060 1061/* 1062 * Clean up the client connections on a local endpoint. 1063 */ 1064void rxrpc_clean_up_local_conns(struct rxrpc_local *local) 1065{ | 790 _leave(""); 791} 792 793/* 794 * Clean up the client connections on a local endpoint. 795 */ 796void rxrpc_clean_up_local_conns(struct rxrpc_local *local) 797{ |
1066 struct rxrpc_connection *conn, *tmp; 1067 LIST_HEAD(graveyard); | 798 struct rxrpc_connection *conn; |
1068 1069 _enter(""); 1070 | 799 800 _enter(""); 801 |
1071 spin_lock(&local->client_conn_cache_lock); | |
1072 local->kill_all_client_conns = true; | 802 local->kill_all_client_conns = true; |
1073 spin_unlock(&local->client_conn_cache_lock); | |
1074 1075 del_timer_sync(&local->client_conn_reap_timer); 1076 | 803 804 del_timer_sync(&local->client_conn_reap_timer); 805 |
1077 spin_lock(&local->client_conn_cache_lock); 1078 1079 list_for_each_entry_safe(conn, tmp, &local->idle_client_conns, 1080 cache_link) { 1081 if (conn->local == local) { 1082 atomic_dec(&conn->active); 1083 trace_rxrpc_client(conn, -1, rxrpc_client_discard); 1084 list_move(&conn->cache_link, &graveyard); 1085 } 1086 } 1087 1088 spin_unlock(&local->client_conn_cache_lock); 1089 1090 while (!list_empty(&graveyard)) { 1091 conn = list_entry(graveyard.next, 1092 struct rxrpc_connection, cache_link); | 806 while ((conn = list_first_entry_or_null(&local->idle_client_conns, 807 struct rxrpc_connection, cache_link))) { |
1093 list_del_init(&conn->cache_link); | 808 list_del_init(&conn->cache_link); |
809 atomic_dec(&conn->active); 810 trace_rxrpc_client(conn, -1, rxrpc_client_discard); |
|
1094 rxrpc_unbundle_conn(conn); 1095 rxrpc_put_connection(conn, rxrpc_conn_put_local_dead); 1096 } 1097 1098 _leave(" [culled]"); 1099} | 811 rxrpc_unbundle_conn(conn); 812 rxrpc_put_connection(conn, rxrpc_conn_put_local_dead); 813 } 814 815 _leave(" [culled]"); 816} |