1 /* AFS cell and server record management 2 * 3 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/key.h> 14 #include <linux/ctype.h> 15 #include <linux/dns_resolver.h> 16 #include <linux/sched.h> 17 #include <linux/inet.h> 18 #include <linux/namei.h> 19 #include <keys/rxrpc-type.h> 20 #include "internal.h" 21 22 static unsigned __read_mostly afs_cell_gc_delay = 10; 23 static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; 24 static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; 25 26 static void afs_manage_cell(struct work_struct *); 27 28 static void afs_dec_cells_outstanding(struct afs_net *net) 29 { 30 if (atomic_dec_and_test(&net->cells_outstanding)) 31 wake_up_var(&net->cells_outstanding); 32 } 33 34 /* 35 * Set the cell timer to fire after a given delay, assuming it's not already 36 * set for an earlier time. 37 */ 38 static void afs_set_cell_timer(struct afs_net *net, time64_t delay) 39 { 40 if (net->live) { 41 atomic_inc(&net->cells_outstanding); 42 if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) 43 afs_dec_cells_outstanding(net); 44 } 45 } 46 47 /* 48 * Look up and get an activation reference on a cell record under RCU 49 * conditions. The caller must hold the RCU read lock. 50 */ 51 struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, 52 const char *name, unsigned int namesz) 53 { 54 struct afs_cell *cell = NULL; 55 struct rb_node *p; 56 int n, seq = 0, ret = 0; 57 58 _enter("%*.*s", namesz, namesz, name); 59 60 if (name && namesz == 0) 61 return ERR_PTR(-EINVAL); 62 if (namesz > AFS_MAXCELLNAME) 63 return ERR_PTR(-ENAMETOOLONG); 64 65 do { 66 /* Unfortunately, rbtree walking doesn't give reliable results 67 * under just the RCU read lock, so we have to check for 68 * changes. 69 */ 70 if (cell) 71 afs_put_cell(net, cell); 72 cell = NULL; 73 ret = -ENOENT; 74 75 read_seqbegin_or_lock(&net->cells_lock, &seq); 76 77 if (!name) { 78 cell = rcu_dereference_raw(net->ws_cell); 79 if (cell) { 80 afs_get_cell(cell); 81 break; 82 } 83 ret = -EDESTADDRREQ; 84 continue; 85 } 86 87 p = rcu_dereference_raw(net->cells.rb_node); 88 while (p) { 89 cell = rb_entry(p, struct afs_cell, net_node); 90 91 n = strncasecmp(cell->name, name, 92 min_t(size_t, cell->name_len, namesz)); 93 if (n == 0) 94 n = cell->name_len - namesz; 95 if (n < 0) { 96 p = rcu_dereference_raw(p->rb_left); 97 } else if (n > 0) { 98 p = rcu_dereference_raw(p->rb_right); 99 } else { 100 if (atomic_inc_not_zero(&cell->usage)) { 101 ret = 0; 102 break; 103 } 104 /* We want to repeat the search, this time with 105 * the lock properly locked. 106 */ 107 } 108 cell = NULL; 109 } 110 111 } while (need_seqretry(&net->cells_lock, seq)); 112 113 done_seqretry(&net->cells_lock, seq); 114 115 return ret == 0 ? cell : ERR_PTR(ret); 116 } 117 118 /* 119 * Set up a cell record and fill in its name, VL server address list and 120 * allocate an anonymous key 121 */ 122 static struct afs_cell *afs_alloc_cell(struct afs_net *net, 123 const char *name, unsigned int namelen, 124 const char *addresses) 125 { 126 struct afs_cell *cell; 127 int i, ret; 128 129 ASSERT(name); 130 if (namelen == 0) 131 return ERR_PTR(-EINVAL); 132 if (namelen > AFS_MAXCELLNAME) { 133 _leave(" = -ENAMETOOLONG"); 134 return ERR_PTR(-ENAMETOOLONG); 135 } 136 if (namelen == 5 && memcmp(name, "@cell", 5) == 0) 137 return ERR_PTR(-EINVAL); 138 139 _enter("%*.*s,%s", namelen, namelen, name, addresses); 140 141 cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); 142 if (!cell) { 143 _leave(" = -ENOMEM"); 144 return ERR_PTR(-ENOMEM); 145 } 146 147 cell->net = net; 148 cell->name_len = namelen; 149 for (i = 0; i < namelen; i++) 150 cell->name[i] = tolower(name[i]); 151 152 atomic_set(&cell->usage, 2); 153 INIT_WORK(&cell->manager, afs_manage_cell); 154 cell->flags = ((1 << AFS_CELL_FL_NOT_READY) | 155 (1 << AFS_CELL_FL_NO_LOOKUP_YET)); 156 INIT_LIST_HEAD(&cell->proc_volumes); 157 rwlock_init(&cell->proc_lock); 158 rwlock_init(&cell->vl_servers_lock); 159 160 /* Fill in the VL server list if we were given a list of addresses to 161 * use. 162 */ 163 if (addresses) { 164 struct afs_vlserver_list *vllist; 165 166 vllist = afs_parse_text_addrs(net, 167 addresses, strlen(addresses), ':', 168 VL_SERVICE, AFS_VL_PORT); 169 if (IS_ERR(vllist)) { 170 ret = PTR_ERR(vllist); 171 goto parse_failed; 172 } 173 174 rcu_assign_pointer(cell->vl_servers, vllist); 175 cell->dns_expiry = TIME64_MAX; 176 } else { 177 cell->dns_expiry = ktime_get_real_seconds(); 178 } 179 180 _leave(" = %p", cell); 181 return cell; 182 183 parse_failed: 184 if (ret == -EINVAL) 185 printk(KERN_ERR "kAFS: bad VL server IP address\n"); 186 kfree(cell); 187 _leave(" = %d", ret); 188 return ERR_PTR(ret); 189 } 190 191 /* 192 * afs_lookup_cell - Look up or create a cell record. 193 * @net: The network namespace 194 * @name: The name of the cell. 195 * @namesz: The strlen of the cell name. 196 * @vllist: A colon/comma separated list of numeric IP addresses or NULL. 197 * @excl: T if an error should be given if the cell name already exists. 198 * 199 * Look up a cell record by name and query the DNS for VL server addresses if 200 * needed. Note that that actual DNS query is punted off to the manager thread 201 * so that this function can return immediately if interrupted whilst allowing 202 * cell records to be shared even if not yet fully constructed. 203 */ 204 struct afs_cell *afs_lookup_cell(struct afs_net *net, 205 const char *name, unsigned int namesz, 206 const char *vllist, bool excl) 207 { 208 struct afs_cell *cell, *candidate, *cursor; 209 struct rb_node *parent, **pp; 210 int ret, n; 211 212 _enter("%s,%s", name, vllist); 213 214 if (!excl) { 215 rcu_read_lock(); 216 cell = afs_lookup_cell_rcu(net, name, namesz); 217 rcu_read_unlock(); 218 if (!IS_ERR(cell)) 219 goto wait_for_cell; 220 } 221 222 /* Assume we're probably going to create a cell and preallocate and 223 * mostly set up a candidate record. We can then use this to stash the 224 * name, the net namespace and VL server addresses. 225 * 226 * We also want to do this before we hold any locks as it may involve 227 * upcalling to userspace to make DNS queries. 228 */ 229 candidate = afs_alloc_cell(net, name, namesz, vllist); 230 if (IS_ERR(candidate)) { 231 _leave(" = %ld", PTR_ERR(candidate)); 232 return candidate; 233 } 234 235 /* Find the insertion point and check to see if someone else added a 236 * cell whilst we were allocating. 237 */ 238 write_seqlock(&net->cells_lock); 239 240 pp = &net->cells.rb_node; 241 parent = NULL; 242 while (*pp) { 243 parent = *pp; 244 cursor = rb_entry(parent, struct afs_cell, net_node); 245 246 n = strncasecmp(cursor->name, name, 247 min_t(size_t, cursor->name_len, namesz)); 248 if (n == 0) 249 n = cursor->name_len - namesz; 250 if (n < 0) 251 pp = &(*pp)->rb_left; 252 else if (n > 0) 253 pp = &(*pp)->rb_right; 254 else 255 goto cell_already_exists; 256 } 257 258 cell = candidate; 259 candidate = NULL; 260 rb_link_node_rcu(&cell->net_node, parent, pp); 261 rb_insert_color(&cell->net_node, &net->cells); 262 atomic_inc(&net->cells_outstanding); 263 write_sequnlock(&net->cells_lock); 264 265 queue_work(afs_wq, &cell->manager); 266 267 wait_for_cell: 268 _debug("wait_for_cell"); 269 ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE); 270 smp_rmb(); 271 272 switch (READ_ONCE(cell->state)) { 273 case AFS_CELL_FAILED: 274 ret = cell->error; 275 goto error; 276 default: 277 _debug("weird %u %d", cell->state, cell->error); 278 goto error; 279 case AFS_CELL_ACTIVE: 280 break; 281 } 282 283 _leave(" = %p [cell]", cell); 284 return cell; 285 286 cell_already_exists: 287 _debug("cell exists"); 288 cell = cursor; 289 if (excl) { 290 ret = -EEXIST; 291 } else { 292 afs_get_cell(cursor); 293 ret = 0; 294 } 295 write_sequnlock(&net->cells_lock); 296 kfree(candidate); 297 if (ret == 0) 298 goto wait_for_cell; 299 goto error_noput; 300 error: 301 afs_put_cell(net, cell); 302 error_noput: 303 _leave(" = %d [error]", ret); 304 return ERR_PTR(ret); 305 } 306 307 /* 308 * set the root cell information 309 * - can be called with a module parameter string 310 * - can be called from a write to /proc/fs/afs/rootcell 311 */ 312 int afs_cell_init(struct afs_net *net, const char *rootcell) 313 { 314 struct afs_cell *old_root, *new_root; 315 const char *cp, *vllist; 316 size_t len; 317 318 _enter(""); 319 320 if (!rootcell) { 321 /* module is loaded with no parameters, or built statically. 322 * - in the future we might initialize cell DB here. 323 */ 324 _leave(" = 0 [no root]"); 325 return 0; 326 } 327 328 cp = strchr(rootcell, ':'); 329 if (!cp) { 330 _debug("kAFS: no VL server IP addresses specified"); 331 vllist = NULL; 332 len = strlen(rootcell); 333 } else { 334 vllist = cp + 1; 335 len = cp - rootcell; 336 } 337 338 /* allocate a cell record for the root cell */ 339 new_root = afs_lookup_cell(net, rootcell, len, vllist, false); 340 if (IS_ERR(new_root)) { 341 _leave(" = %ld", PTR_ERR(new_root)); 342 return PTR_ERR(new_root); 343 } 344 345 if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) 346 afs_get_cell(new_root); 347 348 /* install the new cell */ 349 write_seqlock(&net->cells_lock); 350 old_root = rcu_access_pointer(net->ws_cell); 351 rcu_assign_pointer(net->ws_cell, new_root); 352 write_sequnlock(&net->cells_lock); 353 354 afs_put_cell(net, old_root); 355 _leave(" = 0"); 356 return 0; 357 } 358 359 /* 360 * Update a cell's VL server address list from the DNS. 361 */ 362 static void afs_update_cell(struct afs_cell *cell) 363 { 364 struct afs_vlserver_list *vllist, *old; 365 unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); 366 unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); 367 time64_t now, expiry = 0; 368 369 _enter("%s", cell->name); 370 371 vllist = afs_dns_query(cell, &expiry); 372 373 now = ktime_get_real_seconds(); 374 if (min_ttl > max_ttl) 375 max_ttl = min_ttl; 376 if (expiry < now + min_ttl) 377 expiry = now + min_ttl; 378 else if (expiry > now + max_ttl) 379 expiry = now + max_ttl; 380 381 if (IS_ERR(vllist)) { 382 switch (PTR_ERR(vllist)) { 383 case -ENODATA: 384 case -EDESTADDRREQ: 385 /* The DNS said that the cell does not exist or there 386 * weren't any addresses to be had. 387 */ 388 set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags); 389 clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); 390 cell->dns_expiry = expiry; 391 break; 392 393 case -EAGAIN: 394 case -ECONNREFUSED: 395 default: 396 set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); 397 cell->dns_expiry = now + 10; 398 break; 399 } 400 401 cell->error = -EDESTADDRREQ; 402 } else { 403 clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); 404 clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags); 405 406 /* Exclusion on changing vl_addrs is achieved by a 407 * non-reentrant work item. 408 */ 409 old = rcu_dereference_protected(cell->vl_servers, true); 410 rcu_assign_pointer(cell->vl_servers, vllist); 411 cell->dns_expiry = expiry; 412 413 if (old) 414 afs_put_vlserverlist(cell->net, old); 415 } 416 417 if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags)) 418 wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET); 419 420 now = ktime_get_real_seconds(); 421 afs_set_cell_timer(cell->net, cell->dns_expiry - now); 422 _leave(""); 423 } 424 425 /* 426 * Destroy a cell record 427 */ 428 static void afs_cell_destroy(struct rcu_head *rcu) 429 { 430 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); 431 432 _enter("%p{%s}", cell, cell->name); 433 434 ASSERTCMP(atomic_read(&cell->usage), ==, 0); 435 436 afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); 437 key_put(cell->anonymous_key); 438 kfree(cell); 439 440 _leave(" [destroyed]"); 441 } 442 443 /* 444 * Queue the cell manager. 445 */ 446 static void afs_queue_cell_manager(struct afs_net *net) 447 { 448 int outstanding = atomic_inc_return(&net->cells_outstanding); 449 450 _enter("%d", outstanding); 451 452 if (!queue_work(afs_wq, &net->cells_manager)) 453 afs_dec_cells_outstanding(net); 454 } 455 456 /* 457 * Cell management timer. We have an increment on cells_outstanding that we 458 * need to pass along to the work item. 459 */ 460 void afs_cells_timer(struct timer_list *timer) 461 { 462 struct afs_net *net = container_of(timer, struct afs_net, cells_timer); 463 464 _enter(""); 465 if (!queue_work(afs_wq, &net->cells_manager)) 466 afs_dec_cells_outstanding(net); 467 } 468 469 /* 470 * Get a reference on a cell record. 471 */ 472 struct afs_cell *afs_get_cell(struct afs_cell *cell) 473 { 474 atomic_inc(&cell->usage); 475 return cell; 476 } 477 478 /* 479 * Drop a reference on a cell record. 480 */ 481 void afs_put_cell(struct afs_net *net, struct afs_cell *cell) 482 { 483 time64_t now, expire_delay; 484 485 if (!cell) 486 return; 487 488 _enter("%s", cell->name); 489 490 now = ktime_get_real_seconds(); 491 cell->last_inactive = now; 492 expire_delay = 0; 493 if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) && 494 !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags)) 495 expire_delay = afs_cell_gc_delay; 496 497 if (atomic_dec_return(&cell->usage) > 1) 498 return; 499 500 /* 'cell' may now be garbage collected. */ 501 afs_set_cell_timer(net, expire_delay); 502 } 503 504 /* 505 * Allocate a key to use as a placeholder for anonymous user security. 506 */ 507 static int afs_alloc_anon_key(struct afs_cell *cell) 508 { 509 struct key *key; 510 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp; 511 512 /* Create a key to represent an anonymous user. */ 513 memcpy(keyname, "afs@", 4); 514 dp = keyname + 4; 515 cp = cell->name; 516 do { 517 *dp++ = tolower(*cp); 518 } while (*cp++); 519 520 key = rxrpc_get_null_key(keyname); 521 if (IS_ERR(key)) 522 return PTR_ERR(key); 523 524 cell->anonymous_key = key; 525 526 _debug("anon key %p{%x}", 527 cell->anonymous_key, key_serial(cell->anonymous_key)); 528 return 0; 529 } 530 531 /* 532 * Activate a cell. 533 */ 534 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) 535 { 536 struct hlist_node **p; 537 struct afs_cell *pcell; 538 int ret; 539 540 if (!cell->anonymous_key) { 541 ret = afs_alloc_anon_key(cell); 542 if (ret < 0) 543 return ret; 544 } 545 546 #ifdef CONFIG_AFS_FSCACHE 547 cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index, 548 &afs_cell_cache_index_def, 549 cell->name, strlen(cell->name), 550 NULL, 0, 551 cell, 0, true); 552 #endif 553 ret = afs_proc_cell_setup(cell); 554 if (ret < 0) 555 return ret; 556 557 mutex_lock(&net->proc_cells_lock); 558 for (p = &net->proc_cells.first; *p; p = &(*p)->next) { 559 pcell = hlist_entry(*p, struct afs_cell, proc_link); 560 if (strcmp(cell->name, pcell->name) < 0) 561 break; 562 } 563 564 cell->proc_link.pprev = p; 565 cell->proc_link.next = *p; 566 rcu_assign_pointer(*p, &cell->proc_link.next); 567 if (cell->proc_link.next) 568 cell->proc_link.next->pprev = &cell->proc_link.next; 569 570 afs_dynroot_mkdir(net, cell); 571 mutex_unlock(&net->proc_cells_lock); 572 return 0; 573 } 574 575 /* 576 * Deactivate a cell. 577 */ 578 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) 579 { 580 _enter("%s", cell->name); 581 582 afs_proc_cell_remove(cell); 583 584 mutex_lock(&net->proc_cells_lock); 585 hlist_del_rcu(&cell->proc_link); 586 afs_dynroot_rmdir(net, cell); 587 mutex_unlock(&net->proc_cells_lock); 588 589 #ifdef CONFIG_AFS_FSCACHE 590 fscache_relinquish_cookie(cell->cache, NULL, false); 591 cell->cache = NULL; 592 #endif 593 594 _leave(""); 595 } 596 597 /* 598 * Manage a cell record, initialising and destroying it, maintaining its DNS 599 * records. 600 */ 601 static void afs_manage_cell(struct work_struct *work) 602 { 603 struct afs_cell *cell = container_of(work, struct afs_cell, manager); 604 struct afs_net *net = cell->net; 605 bool deleted; 606 int ret, usage; 607 608 _enter("%s", cell->name); 609 610 again: 611 _debug("state %u", cell->state); 612 switch (cell->state) { 613 case AFS_CELL_INACTIVE: 614 case AFS_CELL_FAILED: 615 write_seqlock(&net->cells_lock); 616 usage = 1; 617 deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0); 618 if (deleted) 619 rb_erase(&cell->net_node, &net->cells); 620 write_sequnlock(&net->cells_lock); 621 if (deleted) 622 goto final_destruction; 623 if (cell->state == AFS_CELL_FAILED) 624 goto done; 625 cell->state = AFS_CELL_UNSET; 626 goto again; 627 628 case AFS_CELL_UNSET: 629 cell->state = AFS_CELL_ACTIVATING; 630 goto again; 631 632 case AFS_CELL_ACTIVATING: 633 ret = afs_activate_cell(net, cell); 634 if (ret < 0) 635 goto activation_failed; 636 637 cell->state = AFS_CELL_ACTIVE; 638 smp_wmb(); 639 clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags); 640 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); 641 goto again; 642 643 case AFS_CELL_ACTIVE: 644 if (atomic_read(&cell->usage) > 1) { 645 time64_t now = ktime_get_real_seconds(); 646 if (cell->dns_expiry <= now && net->live) 647 afs_update_cell(cell); 648 goto done; 649 } 650 cell->state = AFS_CELL_DEACTIVATING; 651 goto again; 652 653 case AFS_CELL_DEACTIVATING: 654 set_bit(AFS_CELL_FL_NOT_READY, &cell->flags); 655 if (atomic_read(&cell->usage) > 1) 656 goto reverse_deactivation; 657 afs_deactivate_cell(net, cell); 658 cell->state = AFS_CELL_INACTIVE; 659 goto again; 660 661 default: 662 break; 663 } 664 _debug("bad state %u", cell->state); 665 BUG(); /* Unhandled state */ 666 667 activation_failed: 668 cell->error = ret; 669 afs_deactivate_cell(net, cell); 670 671 cell->state = AFS_CELL_FAILED; 672 smp_wmb(); 673 if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags)) 674 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); 675 goto again; 676 677 reverse_deactivation: 678 cell->state = AFS_CELL_ACTIVE; 679 smp_wmb(); 680 clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags); 681 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY); 682 _leave(" [deact->act]"); 683 return; 684 685 done: 686 _leave(" [done %u]", cell->state); 687 return; 688 689 final_destruction: 690 call_rcu(&cell->rcu, afs_cell_destroy); 691 afs_dec_cells_outstanding(net); 692 _leave(" [destruct %d]", atomic_read(&net->cells_outstanding)); 693 } 694 695 /* 696 * Manage the records of cells known to a network namespace. This includes 697 * updating the DNS records and garbage collecting unused cells that were 698 * automatically added. 699 * 700 * Note that constructed cell records may only be removed from net->cells by 701 * this work item, so it is safe for this work item to stash a cursor pointing 702 * into the tree and then return to caller (provided it skips cells that are 703 * still under construction). 704 * 705 * Note also that we were given an increment on net->cells_outstanding by 706 * whoever queued us that we need to deal with before returning. 707 */ 708 void afs_manage_cells(struct work_struct *work) 709 { 710 struct afs_net *net = container_of(work, struct afs_net, cells_manager); 711 struct rb_node *cursor; 712 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; 713 bool purging = !net->live; 714 715 _enter(""); 716 717 /* Trawl the cell database looking for cells that have expired from 718 * lack of use and cells whose DNS results have expired and dispatch 719 * their managers. 720 */ 721 read_seqlock_excl(&net->cells_lock); 722 723 for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { 724 struct afs_cell *cell = 725 rb_entry(cursor, struct afs_cell, net_node); 726 unsigned usage; 727 bool sched_cell = false; 728 729 usage = atomic_read(&cell->usage); 730 _debug("manage %s %u", cell->name, usage); 731 732 ASSERTCMP(usage, >=, 1); 733 734 if (purging) { 735 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) 736 usage = atomic_dec_return(&cell->usage); 737 ASSERTCMP(usage, ==, 1); 738 } 739 740 if (usage == 1) { 741 time64_t expire_at = cell->last_inactive; 742 743 if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) && 744 !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags)) 745 expire_at += afs_cell_gc_delay; 746 if (purging || expire_at <= now) 747 sched_cell = true; 748 else if (expire_at < next_manage) 749 next_manage = expire_at; 750 } 751 752 if (!purging) { 753 if (cell->dns_expiry <= now) 754 sched_cell = true; 755 else if (cell->dns_expiry <= next_manage) 756 next_manage = cell->dns_expiry; 757 } 758 759 if (sched_cell) 760 queue_work(afs_wq, &cell->manager); 761 } 762 763 read_sequnlock_excl(&net->cells_lock); 764 765 /* Update the timer on the way out. We have to pass an increment on 766 * cells_outstanding in the namespace that we are in to the timer or 767 * the work scheduler. 768 */ 769 if (!purging && next_manage < TIME64_MAX) { 770 now = ktime_get_real_seconds(); 771 772 if (next_manage - now <= 0) { 773 if (queue_work(afs_wq, &net->cells_manager)) 774 atomic_inc(&net->cells_outstanding); 775 } else { 776 afs_set_cell_timer(net, next_manage - now); 777 } 778 } 779 780 afs_dec_cells_outstanding(net); 781 _leave(" [%d]", atomic_read(&net->cells_outstanding)); 782 } 783 784 /* 785 * Purge in-memory cell database. 786 */ 787 void afs_cell_purge(struct afs_net *net) 788 { 789 struct afs_cell *ws; 790 791 _enter(""); 792 793 write_seqlock(&net->cells_lock); 794 ws = rcu_access_pointer(net->ws_cell); 795 RCU_INIT_POINTER(net->ws_cell, NULL); 796 write_sequnlock(&net->cells_lock); 797 afs_put_cell(net, ws); 798 799 _debug("del timer"); 800 if (del_timer_sync(&net->cells_timer)) 801 atomic_dec(&net->cells_outstanding); 802 803 _debug("kick mgr"); 804 afs_queue_cell_manager(net); 805 806 _debug("wait"); 807 wait_var_event(&net->cells_outstanding, 808 !atomic_read(&net->cells_outstanding)); 809 _leave(""); 810 } 811