1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AFS cell and server record management 3 * 4 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/key.h> 10 #include <linux/ctype.h> 11 #include <linux/dns_resolver.h> 12 #include <linux/sched.h> 13 #include <linux/inet.h> 14 #include <linux/namei.h> 15 #include <keys/rxrpc-type.h> 16 #include "internal.h" 17 18 static unsigned __read_mostly afs_cell_gc_delay = 10; 19 static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; 20 static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; 21 22 static void afs_manage_cell(struct work_struct *); 23 24 static void afs_dec_cells_outstanding(struct afs_net *net) 25 { 26 if (atomic_dec_and_test(&net->cells_outstanding)) 27 wake_up_var(&net->cells_outstanding); 28 } 29 30 /* 31 * Set the cell timer to fire after a given delay, assuming it's not already 32 * set for an earlier time. 33 */ 34 static void afs_set_cell_timer(struct afs_net *net, time64_t delay) 35 { 36 if (net->live) { 37 atomic_inc(&net->cells_outstanding); 38 if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) 39 afs_dec_cells_outstanding(net); 40 } 41 } 42 43 /* 44 * Look up and get an activation reference on a cell record under RCU 45 * conditions. The caller must hold the RCU read lock. 46 */ 47 struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, 48 const char *name, unsigned int namesz) 49 { 50 struct afs_cell *cell = NULL; 51 struct rb_node *p; 52 int n, seq = 0, ret = 0; 53 54 _enter("%*.*s", namesz, namesz, name); 55 56 if (name && namesz == 0) 57 return ERR_PTR(-EINVAL); 58 if (namesz > AFS_MAXCELLNAME) 59 return ERR_PTR(-ENAMETOOLONG); 60 61 do { 62 /* Unfortunately, rbtree walking doesn't give reliable results 63 * under just the RCU read lock, so we have to check for 64 * changes. 65 */ 66 if (cell) 67 afs_put_cell(net, cell); 68 cell = NULL; 69 ret = -ENOENT; 70 71 read_seqbegin_or_lock(&net->cells_lock, &seq); 72 73 if (!name) { 74 cell = rcu_dereference_raw(net->ws_cell); 75 if (cell) { 76 afs_get_cell(cell); 77 ret = 0; 78 break; 79 } 80 ret = -EDESTADDRREQ; 81 continue; 82 } 83 84 p = rcu_dereference_raw(net->cells.rb_node); 85 while (p) { 86 cell = rb_entry(p, struct afs_cell, net_node); 87 88 n = strncasecmp(cell->name, name, 89 min_t(size_t, cell->name_len, namesz)); 90 if (n == 0) 91 n = cell->name_len - namesz; 92 if (n < 0) { 93 p = rcu_dereference_raw(p->rb_left); 94 } else if (n > 0) { 95 p = rcu_dereference_raw(p->rb_right); 96 } else { 97 if (atomic_inc_not_zero(&cell->usage)) { 98 ret = 0; 99 break; 100 } 101 /* We want to repeat the search, this time with 102 * the lock properly locked. 103 */ 104 } 105 cell = NULL; 106 } 107 108 } while (need_seqretry(&net->cells_lock, seq)); 109 110 done_seqretry(&net->cells_lock, seq); 111 112 if (ret != 0 && cell) 113 afs_put_cell(net, cell); 114 115 return ret == 0 ? cell : ERR_PTR(ret); 116 } 117 118 /* 119 * Set up a cell record and fill in its name, VL server address list and 120 * allocate an anonymous key 121 */ 122 static struct afs_cell *afs_alloc_cell(struct afs_net *net, 123 const char *name, unsigned int namelen, 124 const char *addresses) 125 { 126 struct afs_vlserver_list *vllist; 127 struct afs_cell *cell; 128 int i, ret; 129 130 ASSERT(name); 131 if (namelen == 0) 132 return ERR_PTR(-EINVAL); 133 if (namelen > AFS_MAXCELLNAME) { 134 _leave(" = -ENAMETOOLONG"); 135 return ERR_PTR(-ENAMETOOLONG); 136 } 137 if (namelen == 5 && memcmp(name, "@cell", 5) == 0) 138 return ERR_PTR(-EINVAL); 139 140 _enter("%*.*s,%s", namelen, namelen, name, addresses); 141 142 cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); 143 if (!cell) { 144 _leave(" = -ENOMEM"); 145 return ERR_PTR(-ENOMEM); 146 } 147 148 cell->net = net; 149 cell->name_len = namelen; 150 for (i = 0; i < namelen; i++) 151 cell->name[i] = tolower(name[i]); 152 153 atomic_set(&cell->usage, 2); 154 INIT_WORK(&cell->manager, afs_manage_cell); 155 INIT_LIST_HEAD(&cell->proc_volumes); 156 rwlock_init(&cell->proc_lock); 157 rwlock_init(&cell->vl_servers_lock); 158 159 /* Provide a VL server list, filling it in if we were given a list of 160 * addresses to use. 161 */ 162 if (addresses) { 163 vllist = afs_parse_text_addrs(net, 164 addresses, strlen(addresses), ':', 165 VL_SERVICE, AFS_VL_PORT); 166 if (IS_ERR(vllist)) { 167 ret = PTR_ERR(vllist); 168 goto parse_failed; 169 } 170 171 vllist->source = DNS_RECORD_FROM_CONFIG; 172 vllist->status = DNS_LOOKUP_NOT_DONE; 173 cell->dns_expiry = TIME64_MAX; 174 } else { 175 ret = -ENOMEM; 176 vllist = afs_alloc_vlserver_list(0); 177 if (!vllist) 178 goto error; 179 vllist->source = DNS_RECORD_UNAVAILABLE; 180 vllist->status = DNS_LOOKUP_NOT_DONE; 181 cell->dns_expiry = ktime_get_real_seconds(); 182 } 183 184 rcu_assign_pointer(cell->vl_servers, vllist); 185 186 cell->dns_source = vllist->source; 187 cell->dns_status = vllist->status; 188 smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */ 189 190 _leave(" = %p", cell); 191 return cell; 192 193 parse_failed: 194 if (ret == -EINVAL) 195 printk(KERN_ERR "kAFS: bad VL server IP address\n"); 196 error: 197 kfree(cell); 198 _leave(" = %d", ret); 199 return ERR_PTR(ret); 200 } 201 202 /* 203 * afs_lookup_cell - Look up or create a cell record. 204 * @net: The network namespace 205 * @name: The name of the cell. 206 * @namesz: The strlen of the cell name. 207 * @vllist: A colon/comma separated list of numeric IP addresses or NULL. 208 * @excl: T if an error should be given if the cell name already exists. 209 * 210 * Look up a cell record by name and query the DNS for VL server addresses if 211 * needed. Note that that actual DNS query is punted off to the manager thread 212 * so that this function can return immediately if interrupted whilst allowing 213 * cell records to be shared even if not yet fully constructed. 214 */ 215 struct afs_cell *afs_lookup_cell(struct afs_net *net, 216 const char *name, unsigned int namesz, 217 const char *vllist, bool excl) 218 { 219 struct afs_cell *cell, *candidate, *cursor; 220 struct rb_node *parent, **pp; 221 enum afs_cell_state state; 222 int ret, n; 223 224 _enter("%s,%s", name, vllist); 225 226 if (!excl) { 227 rcu_read_lock(); 228 cell = afs_lookup_cell_rcu(net, name, namesz); 229 rcu_read_unlock(); 230 if (!IS_ERR(cell)) 231 goto wait_for_cell; 232 } 233 234 /* Assume we're probably going to create a cell and preallocate and 235 * mostly set up a candidate record. We can then use this to stash the 236 * name, the net namespace and VL server addresses. 237 * 238 * We also want to do this before we hold any locks as it may involve 239 * upcalling to userspace to make DNS queries. 240 */ 241 candidate = afs_alloc_cell(net, name, namesz, vllist); 242 if (IS_ERR(candidate)) { 243 _leave(" = %ld", PTR_ERR(candidate)); 244 return candidate; 245 } 246 247 /* Find the insertion point and check to see if someone else added a 248 * cell whilst we were allocating. 249 */ 250 write_seqlock(&net->cells_lock); 251 252 pp = &net->cells.rb_node; 253 parent = NULL; 254 while (*pp) { 255 parent = *pp; 256 cursor = rb_entry(parent, struct afs_cell, net_node); 257 258 n = strncasecmp(cursor->name, name, 259 min_t(size_t, cursor->name_len, namesz)); 260 if (n == 0) 261 n = cursor->name_len - namesz; 262 if (n < 0) 263 pp = &(*pp)->rb_left; 264 else if (n > 0) 265 pp = &(*pp)->rb_right; 266 else 267 goto cell_already_exists; 268 } 269 270 cell = candidate; 271 candidate = NULL; 272 rb_link_node_rcu(&cell->net_node, parent, pp); 273 rb_insert_color(&cell->net_node, &net->cells); 274 atomic_inc(&net->cells_outstanding); 275 write_sequnlock(&net->cells_lock); 276 277 queue_work(afs_wq, &cell->manager); 278 279 wait_for_cell: 280 _debug("wait_for_cell"); 281 wait_var_event(&cell->state, 282 ({ 283 state = smp_load_acquire(&cell->state); /* vs error */ 284 state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED; 285 })); 286 287 /* Check the state obtained from the wait check. */ 288 if (state == AFS_CELL_FAILED) { 289 ret = cell->error; 290 goto error; 291 } 292 293 _leave(" = %p [cell]", cell); 294 return cell; 295 296 cell_already_exists: 297 _debug("cell exists"); 298 cell = cursor; 299 if (excl) { 300 ret = -EEXIST; 301 } else { 302 afs_get_cell(cursor); 303 ret = 0; 304 } 305 write_sequnlock(&net->cells_lock); 306 kfree(candidate); 307 if (ret == 0) 308 goto wait_for_cell; 309 goto error_noput; 310 error: 311 afs_put_cell(net, cell); 312 error_noput: 313 _leave(" = %d [error]", ret); 314 return ERR_PTR(ret); 315 } 316 317 /* 318 * set the root cell information 319 * - can be called with a module parameter string 320 * - can be called from a write to /proc/fs/afs/rootcell 321 */ 322 int afs_cell_init(struct afs_net *net, const char *rootcell) 323 { 324 struct afs_cell *old_root, *new_root; 325 const char *cp, *vllist; 326 size_t len; 327 328 _enter(""); 329 330 if (!rootcell) { 331 /* module is loaded with no parameters, or built statically. 332 * - in the future we might initialize cell DB here. 333 */ 334 _leave(" = 0 [no root]"); 335 return 0; 336 } 337 338 cp = strchr(rootcell, ':'); 339 if (!cp) { 340 _debug("kAFS: no VL server IP addresses specified"); 341 vllist = NULL; 342 len = strlen(rootcell); 343 } else { 344 vllist = cp + 1; 345 len = cp - rootcell; 346 } 347 348 /* allocate a cell record for the root cell */ 349 new_root = afs_lookup_cell(net, rootcell, len, vllist, false); 350 if (IS_ERR(new_root)) { 351 _leave(" = %ld", PTR_ERR(new_root)); 352 return PTR_ERR(new_root); 353 } 354 355 if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) 356 afs_get_cell(new_root); 357 358 /* install the new cell */ 359 write_seqlock(&net->cells_lock); 360 old_root = rcu_access_pointer(net->ws_cell); 361 rcu_assign_pointer(net->ws_cell, new_root); 362 write_sequnlock(&net->cells_lock); 363 364 afs_put_cell(net, old_root); 365 _leave(" = 0"); 366 return 0; 367 } 368 369 /* 370 * Update a cell's VL server address list from the DNS. 371 */ 372 static int afs_update_cell(struct afs_cell *cell) 373 { 374 struct afs_vlserver_list *vllist, *old = NULL, *p; 375 unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); 376 unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); 377 time64_t now, expiry = 0; 378 int ret = 0; 379 380 _enter("%s", cell->name); 381 382 vllist = afs_dns_query(cell, &expiry); 383 if (IS_ERR(vllist)) { 384 ret = PTR_ERR(vllist); 385 386 _debug("%s: fail %d", cell->name, ret); 387 if (ret == -ENOMEM) 388 goto out_wake; 389 390 ret = -ENOMEM; 391 vllist = afs_alloc_vlserver_list(0); 392 if (!vllist) 393 goto out_wake; 394 395 switch (ret) { 396 case -ENODATA: 397 case -EDESTADDRREQ: 398 vllist->status = DNS_LOOKUP_GOT_NOT_FOUND; 399 break; 400 case -EAGAIN: 401 case -ECONNREFUSED: 402 vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE; 403 break; 404 default: 405 vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE; 406 break; 407 } 408 } 409 410 _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status); 411 cell->dns_status = vllist->status; 412 413 now = ktime_get_real_seconds(); 414 if (min_ttl > max_ttl) 415 max_ttl = min_ttl; 416 if (expiry < now + min_ttl) 417 expiry = now + min_ttl; 418 else if (expiry > now + max_ttl) 419 expiry = now + max_ttl; 420 421 _debug("%s: status %d", cell->name, vllist->status); 422 if (vllist->source == DNS_RECORD_UNAVAILABLE) { 423 switch (vllist->status) { 424 case DNS_LOOKUP_GOT_NOT_FOUND: 425 /* The DNS said that the cell does not exist or there 426 * weren't any addresses to be had. 427 */ 428 cell->dns_expiry = expiry; 429 break; 430 431 case DNS_LOOKUP_BAD: 432 case DNS_LOOKUP_GOT_LOCAL_FAILURE: 433 case DNS_LOOKUP_GOT_TEMP_FAILURE: 434 case DNS_LOOKUP_GOT_NS_FAILURE: 435 default: 436 cell->dns_expiry = now + 10; 437 break; 438 } 439 } else { 440 cell->dns_expiry = expiry; 441 } 442 443 /* Replace the VL server list if the new record has servers or the old 444 * record doesn't. 445 */ 446 write_lock(&cell->vl_servers_lock); 447 p = rcu_dereference_protected(cell->vl_servers, true); 448 if (vllist->nr_servers > 0 || p->nr_servers == 0) { 449 rcu_assign_pointer(cell->vl_servers, vllist); 450 cell->dns_source = vllist->source; 451 old = p; 452 } 453 write_unlock(&cell->vl_servers_lock); 454 afs_put_vlserverlist(cell->net, old); 455 456 out_wake: 457 smp_store_release(&cell->dns_lookup_count, 458 cell->dns_lookup_count + 1); /* vs source/status */ 459 wake_up_var(&cell->dns_lookup_count); 460 _leave(" = %d", ret); 461 return ret; 462 } 463 464 /* 465 * Destroy a cell record 466 */ 467 static void afs_cell_destroy(struct rcu_head *rcu) 468 { 469 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); 470 471 _enter("%p{%s}", cell, cell->name); 472 473 ASSERTCMP(atomic_read(&cell->usage), ==, 0); 474 475 afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); 476 key_put(cell->anonymous_key); 477 kfree(cell); 478 479 _leave(" [destroyed]"); 480 } 481 482 /* 483 * Queue the cell manager. 484 */ 485 static void afs_queue_cell_manager(struct afs_net *net) 486 { 487 int outstanding = atomic_inc_return(&net->cells_outstanding); 488 489 _enter("%d", outstanding); 490 491 if (!queue_work(afs_wq, &net->cells_manager)) 492 afs_dec_cells_outstanding(net); 493 } 494 495 /* 496 * Cell management timer. We have an increment on cells_outstanding that we 497 * need to pass along to the work item. 498 */ 499 void afs_cells_timer(struct timer_list *timer) 500 { 501 struct afs_net *net = container_of(timer, struct afs_net, cells_timer); 502 503 _enter(""); 504 if (!queue_work(afs_wq, &net->cells_manager)) 505 afs_dec_cells_outstanding(net); 506 } 507 508 /* 509 * Get a reference on a cell record. 510 */ 511 struct afs_cell *afs_get_cell(struct afs_cell *cell) 512 { 513 atomic_inc(&cell->usage); 514 return cell; 515 } 516 517 /* 518 * Drop a reference on a cell record. 519 */ 520 void afs_put_cell(struct afs_net *net, struct afs_cell *cell) 521 { 522 time64_t now, expire_delay; 523 524 if (!cell) 525 return; 526 527 _enter("%s", cell->name); 528 529 now = ktime_get_real_seconds(); 530 cell->last_inactive = now; 531 expire_delay = 0; 532 if (cell->vl_servers->nr_servers) 533 expire_delay = afs_cell_gc_delay; 534 535 if (atomic_dec_return(&cell->usage) > 1) 536 return; 537 538 /* 'cell' may now be garbage collected. */ 539 afs_set_cell_timer(net, expire_delay); 540 } 541 542 /* 543 * Allocate a key to use as a placeholder for anonymous user security. 544 */ 545 static int afs_alloc_anon_key(struct afs_cell *cell) 546 { 547 struct key *key; 548 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp; 549 550 /* Create a key to represent an anonymous user. */ 551 memcpy(keyname, "afs@", 4); 552 dp = keyname + 4; 553 cp = cell->name; 554 do { 555 *dp++ = tolower(*cp); 556 } while (*cp++); 557 558 key = rxrpc_get_null_key(keyname); 559 if (IS_ERR(key)) 560 return PTR_ERR(key); 561 562 cell->anonymous_key = key; 563 564 _debug("anon key %p{%x}", 565 cell->anonymous_key, key_serial(cell->anonymous_key)); 566 return 0; 567 } 568 569 /* 570 * Activate a cell. 571 */ 572 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) 573 { 574 struct hlist_node **p; 575 struct afs_cell *pcell; 576 int ret; 577 578 if (!cell->anonymous_key) { 579 ret = afs_alloc_anon_key(cell); 580 if (ret < 0) 581 return ret; 582 } 583 584 #ifdef CONFIG_AFS_FSCACHE 585 cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index, 586 &afs_cell_cache_index_def, 587 cell->name, strlen(cell->name), 588 NULL, 0, 589 cell, 0, true); 590 #endif 591 ret = afs_proc_cell_setup(cell); 592 if (ret < 0) 593 return ret; 594 595 mutex_lock(&net->proc_cells_lock); 596 for (p = &net->proc_cells.first; *p; p = &(*p)->next) { 597 pcell = hlist_entry(*p, struct afs_cell, proc_link); 598 if (strcmp(cell->name, pcell->name) < 0) 599 break; 600 } 601 602 cell->proc_link.pprev = p; 603 cell->proc_link.next = *p; 604 rcu_assign_pointer(*p, &cell->proc_link.next); 605 if (cell->proc_link.next) 606 cell->proc_link.next->pprev = &cell->proc_link.next; 607 608 afs_dynroot_mkdir(net, cell); 609 mutex_unlock(&net->proc_cells_lock); 610 return 0; 611 } 612 613 /* 614 * Deactivate a cell. 615 */ 616 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) 617 { 618 _enter("%s", cell->name); 619 620 afs_proc_cell_remove(cell); 621 622 mutex_lock(&net->proc_cells_lock); 623 hlist_del_rcu(&cell->proc_link); 624 afs_dynroot_rmdir(net, cell); 625 mutex_unlock(&net->proc_cells_lock); 626 627 #ifdef CONFIG_AFS_FSCACHE 628 fscache_relinquish_cookie(cell->cache, NULL, false); 629 cell->cache = NULL; 630 #endif 631 632 _leave(""); 633 } 634 635 /* 636 * Manage a cell record, initialising and destroying it, maintaining its DNS 637 * records. 638 */ 639 static void afs_manage_cell(struct work_struct *work) 640 { 641 struct afs_cell *cell = container_of(work, struct afs_cell, manager); 642 struct afs_net *net = cell->net; 643 bool deleted; 644 int ret, usage; 645 646 _enter("%s", cell->name); 647 648 again: 649 _debug("state %u", cell->state); 650 switch (cell->state) { 651 case AFS_CELL_INACTIVE: 652 case AFS_CELL_FAILED: 653 write_seqlock(&net->cells_lock); 654 usage = 1; 655 deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0); 656 if (deleted) 657 rb_erase(&cell->net_node, &net->cells); 658 write_sequnlock(&net->cells_lock); 659 if (deleted) 660 goto final_destruction; 661 if (cell->state == AFS_CELL_FAILED) 662 goto done; 663 smp_store_release(&cell->state, AFS_CELL_UNSET); 664 wake_up_var(&cell->state); 665 goto again; 666 667 case AFS_CELL_UNSET: 668 smp_store_release(&cell->state, AFS_CELL_ACTIVATING); 669 wake_up_var(&cell->state); 670 goto again; 671 672 case AFS_CELL_ACTIVATING: 673 ret = afs_activate_cell(net, cell); 674 if (ret < 0) 675 goto activation_failed; 676 677 smp_store_release(&cell->state, AFS_CELL_ACTIVE); 678 wake_up_var(&cell->state); 679 goto again; 680 681 case AFS_CELL_ACTIVE: 682 if (atomic_read(&cell->usage) > 1) { 683 if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) { 684 ret = afs_update_cell(cell); 685 if (ret < 0) 686 cell->error = ret; 687 } 688 goto done; 689 } 690 smp_store_release(&cell->state, AFS_CELL_DEACTIVATING); 691 wake_up_var(&cell->state); 692 goto again; 693 694 case AFS_CELL_DEACTIVATING: 695 if (atomic_read(&cell->usage) > 1) 696 goto reverse_deactivation; 697 afs_deactivate_cell(net, cell); 698 smp_store_release(&cell->state, AFS_CELL_INACTIVE); 699 wake_up_var(&cell->state); 700 goto again; 701 702 default: 703 break; 704 } 705 _debug("bad state %u", cell->state); 706 BUG(); /* Unhandled state */ 707 708 activation_failed: 709 cell->error = ret; 710 afs_deactivate_cell(net, cell); 711 712 smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */ 713 wake_up_var(&cell->state); 714 goto again; 715 716 reverse_deactivation: 717 smp_store_release(&cell->state, AFS_CELL_ACTIVE); 718 wake_up_var(&cell->state); 719 _leave(" [deact->act]"); 720 return; 721 722 done: 723 _leave(" [done %u]", cell->state); 724 return; 725 726 final_destruction: 727 call_rcu(&cell->rcu, afs_cell_destroy); 728 afs_dec_cells_outstanding(net); 729 _leave(" [destruct %d]", atomic_read(&net->cells_outstanding)); 730 } 731 732 /* 733 * Manage the records of cells known to a network namespace. This includes 734 * updating the DNS records and garbage collecting unused cells that were 735 * automatically added. 736 * 737 * Note that constructed cell records may only be removed from net->cells by 738 * this work item, so it is safe for this work item to stash a cursor pointing 739 * into the tree and then return to caller (provided it skips cells that are 740 * still under construction). 741 * 742 * Note also that we were given an increment on net->cells_outstanding by 743 * whoever queued us that we need to deal with before returning. 744 */ 745 void afs_manage_cells(struct work_struct *work) 746 { 747 struct afs_net *net = container_of(work, struct afs_net, cells_manager); 748 struct rb_node *cursor; 749 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; 750 bool purging = !net->live; 751 752 _enter(""); 753 754 /* Trawl the cell database looking for cells that have expired from 755 * lack of use and cells whose DNS results have expired and dispatch 756 * their managers. 757 */ 758 read_seqlock_excl(&net->cells_lock); 759 760 for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { 761 struct afs_cell *cell = 762 rb_entry(cursor, struct afs_cell, net_node); 763 unsigned usage; 764 bool sched_cell = false; 765 766 usage = atomic_read(&cell->usage); 767 _debug("manage %s %u", cell->name, usage); 768 769 ASSERTCMP(usage, >=, 1); 770 771 if (purging) { 772 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) 773 usage = atomic_dec_return(&cell->usage); 774 ASSERTCMP(usage, ==, 1); 775 } 776 777 if (usage == 1) { 778 struct afs_vlserver_list *vllist; 779 time64_t expire_at = cell->last_inactive; 780 781 read_lock(&cell->vl_servers_lock); 782 vllist = rcu_dereference_protected( 783 cell->vl_servers, 784 lockdep_is_held(&cell->vl_servers_lock)); 785 if (vllist->nr_servers > 0) 786 expire_at += afs_cell_gc_delay; 787 read_unlock(&cell->vl_servers_lock); 788 if (purging || expire_at <= now) 789 sched_cell = true; 790 else if (expire_at < next_manage) 791 next_manage = expire_at; 792 } 793 794 if (!purging) { 795 if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) 796 sched_cell = true; 797 } 798 799 if (sched_cell) 800 queue_work(afs_wq, &cell->manager); 801 } 802 803 read_sequnlock_excl(&net->cells_lock); 804 805 /* Update the timer on the way out. We have to pass an increment on 806 * cells_outstanding in the namespace that we are in to the timer or 807 * the work scheduler. 808 */ 809 if (!purging && next_manage < TIME64_MAX) { 810 now = ktime_get_real_seconds(); 811 812 if (next_manage - now <= 0) { 813 if (queue_work(afs_wq, &net->cells_manager)) 814 atomic_inc(&net->cells_outstanding); 815 } else { 816 afs_set_cell_timer(net, next_manage - now); 817 } 818 } 819 820 afs_dec_cells_outstanding(net); 821 _leave(" [%d]", atomic_read(&net->cells_outstanding)); 822 } 823 824 /* 825 * Purge in-memory cell database. 826 */ 827 void afs_cell_purge(struct afs_net *net) 828 { 829 struct afs_cell *ws; 830 831 _enter(""); 832 833 write_seqlock(&net->cells_lock); 834 ws = rcu_access_pointer(net->ws_cell); 835 RCU_INIT_POINTER(net->ws_cell, NULL); 836 write_sequnlock(&net->cells_lock); 837 afs_put_cell(net, ws); 838 839 _debug("del timer"); 840 if (del_timer_sync(&net->cells_timer)) 841 atomic_dec(&net->cells_outstanding); 842 843 _debug("kick mgr"); 844 afs_queue_cell_manager(net); 845 846 _debug("wait"); 847 wait_var_event(&net->cells_outstanding, 848 !atomic_read(&net->cells_outstanding)); 849 _leave(""); 850 } 851