xref: /openbmc/linux/fs/afs/cell.c (revision e0f6d1a5)
1 /* AFS cell and server record management
2  *
3  * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/key.h>
14 #include <linux/ctype.h>
15 #include <linux/dns_resolver.h>
16 #include <linux/sched.h>
17 #include <linux/inet.h>
18 #include <keys/rxrpc-type.h>
19 #include "internal.h"
20 
21 static unsigned __read_mostly afs_cell_gc_delay = 10;
22 
23 static void afs_manage_cell(struct work_struct *);
24 
25 static void afs_dec_cells_outstanding(struct afs_net *net)
26 {
27 	if (atomic_dec_and_test(&net->cells_outstanding))
28 		wake_up_var(&net->cells_outstanding);
29 }
30 
31 /*
32  * Set the cell timer to fire after a given delay, assuming it's not already
33  * set for an earlier time.
34  */
35 static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
36 {
37 	if (net->live) {
38 		atomic_inc(&net->cells_outstanding);
39 		if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
40 			afs_dec_cells_outstanding(net);
41 	}
42 }
43 
44 /*
45  * Look up and get an activation reference on a cell record under RCU
46  * conditions.  The caller must hold the RCU read lock.
47  */
48 struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
49 				     const char *name, unsigned int namesz)
50 {
51 	struct afs_cell *cell = NULL;
52 	struct rb_node *p;
53 	int n, seq = 0, ret = 0;
54 
55 	_enter("%*.*s", namesz, namesz, name);
56 
57 	if (name && namesz == 0)
58 		return ERR_PTR(-EINVAL);
59 	if (namesz > AFS_MAXCELLNAME)
60 		return ERR_PTR(-ENAMETOOLONG);
61 
62 	do {
63 		/* Unfortunately, rbtree walking doesn't give reliable results
64 		 * under just the RCU read lock, so we have to check for
65 		 * changes.
66 		 */
67 		if (cell)
68 			afs_put_cell(net, cell);
69 		cell = NULL;
70 		ret = -ENOENT;
71 
72 		read_seqbegin_or_lock(&net->cells_lock, &seq);
73 
74 		if (!name) {
75 			cell = rcu_dereference_raw(net->ws_cell);
76 			if (cell) {
77 				afs_get_cell(cell);
78 				break;
79 			}
80 			ret = -EDESTADDRREQ;
81 			continue;
82 		}
83 
84 		p = rcu_dereference_raw(net->cells.rb_node);
85 		while (p) {
86 			cell = rb_entry(p, struct afs_cell, net_node);
87 
88 			n = strncasecmp(cell->name, name,
89 					min_t(size_t, cell->name_len, namesz));
90 			if (n == 0)
91 				n = cell->name_len - namesz;
92 			if (n < 0) {
93 				p = rcu_dereference_raw(p->rb_left);
94 			} else if (n > 0) {
95 				p = rcu_dereference_raw(p->rb_right);
96 			} else {
97 				if (atomic_inc_not_zero(&cell->usage)) {
98 					ret = 0;
99 					break;
100 				}
101 				/* We want to repeat the search, this time with
102 				 * the lock properly locked.
103 				 */
104 			}
105 			cell = NULL;
106 		}
107 
108 	} while (need_seqretry(&net->cells_lock, seq));
109 
110 	done_seqretry(&net->cells_lock, seq);
111 
112 	return ret == 0 ? cell : ERR_PTR(ret);
113 }
114 
115 /*
116  * Set up a cell record and fill in its name, VL server address list and
117  * allocate an anonymous key
118  */
119 static struct afs_cell *afs_alloc_cell(struct afs_net *net,
120 				       const char *name, unsigned int namelen,
121 				       const char *vllist)
122 {
123 	struct afs_cell *cell;
124 	int i, ret;
125 
126 	ASSERT(name);
127 	if (namelen == 0)
128 		return ERR_PTR(-EINVAL);
129 	if (namelen > AFS_MAXCELLNAME) {
130 		_leave(" = -ENAMETOOLONG");
131 		return ERR_PTR(-ENAMETOOLONG);
132 	}
133 	if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
134 		return ERR_PTR(-EINVAL);
135 
136 	_enter("%*.*s,%s", namelen, namelen, name, vllist);
137 
138 	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
139 	if (!cell) {
140 		_leave(" = -ENOMEM");
141 		return ERR_PTR(-ENOMEM);
142 	}
143 
144 	cell->net = net;
145 	cell->name_len = namelen;
146 	for (i = 0; i < namelen; i++)
147 		cell->name[i] = tolower(name[i]);
148 
149 	atomic_set(&cell->usage, 2);
150 	INIT_WORK(&cell->manager, afs_manage_cell);
151 	cell->flags = ((1 << AFS_CELL_FL_NOT_READY) |
152 		       (1 << AFS_CELL_FL_NO_LOOKUP_YET));
153 	INIT_LIST_HEAD(&cell->proc_volumes);
154 	rwlock_init(&cell->proc_lock);
155 	rwlock_init(&cell->vl_addrs_lock);
156 
157 	/* Fill in the VL server list if we were given a list of addresses to
158 	 * use.
159 	 */
160 	if (vllist) {
161 		struct afs_addr_list *alist;
162 
163 		alist = afs_parse_text_addrs(vllist, strlen(vllist), ':',
164 					     VL_SERVICE, AFS_VL_PORT);
165 		if (IS_ERR(alist)) {
166 			ret = PTR_ERR(alist);
167 			goto parse_failed;
168 		}
169 
170 		rcu_assign_pointer(cell->vl_addrs, alist);
171 		cell->dns_expiry = TIME64_MAX;
172 	}
173 
174 	_leave(" = %p", cell);
175 	return cell;
176 
177 parse_failed:
178 	if (ret == -EINVAL)
179 		printk(KERN_ERR "kAFS: bad VL server IP address\n");
180 	kfree(cell);
181 	_leave(" = %d", ret);
182 	return ERR_PTR(ret);
183 }
184 
185 /*
186  * afs_lookup_cell - Look up or create a cell record.
187  * @net:	The network namespace
188  * @name:	The name of the cell.
189  * @namesz:	The strlen of the cell name.
190  * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
191  * @excl:	T if an error should be given if the cell name already exists.
192  *
193  * Look up a cell record by name and query the DNS for VL server addresses if
194  * needed.  Note that that actual DNS query is punted off to the manager thread
195  * so that this function can return immediately if interrupted whilst allowing
196  * cell records to be shared even if not yet fully constructed.
197  */
198 struct afs_cell *afs_lookup_cell(struct afs_net *net,
199 				 const char *name, unsigned int namesz,
200 				 const char *vllist, bool excl)
201 {
202 	struct afs_cell *cell, *candidate, *cursor;
203 	struct rb_node *parent, **pp;
204 	int ret, n;
205 
206 	_enter("%s,%s", name, vllist);
207 
208 	if (!excl) {
209 		rcu_read_lock();
210 		cell = afs_lookup_cell_rcu(net, name, namesz);
211 		rcu_read_unlock();
212 		if (!IS_ERR(cell))
213 			goto wait_for_cell;
214 	}
215 
216 	/* Assume we're probably going to create a cell and preallocate and
217 	 * mostly set up a candidate record.  We can then use this to stash the
218 	 * name, the net namespace and VL server addresses.
219 	 *
220 	 * We also want to do this before we hold any locks as it may involve
221 	 * upcalling to userspace to make DNS queries.
222 	 */
223 	candidate = afs_alloc_cell(net, name, namesz, vllist);
224 	if (IS_ERR(candidate)) {
225 		_leave(" = %ld", PTR_ERR(candidate));
226 		return candidate;
227 	}
228 
229 	/* Find the insertion point and check to see if someone else added a
230 	 * cell whilst we were allocating.
231 	 */
232 	write_seqlock(&net->cells_lock);
233 
234 	pp = &net->cells.rb_node;
235 	parent = NULL;
236 	while (*pp) {
237 		parent = *pp;
238 		cursor = rb_entry(parent, struct afs_cell, net_node);
239 
240 		n = strncasecmp(cursor->name, name,
241 				min_t(size_t, cursor->name_len, namesz));
242 		if (n == 0)
243 			n = cursor->name_len - namesz;
244 		if (n < 0)
245 			pp = &(*pp)->rb_left;
246 		else if (n > 0)
247 			pp = &(*pp)->rb_right;
248 		else
249 			goto cell_already_exists;
250 	}
251 
252 	cell = candidate;
253 	candidate = NULL;
254 	rb_link_node_rcu(&cell->net_node, parent, pp);
255 	rb_insert_color(&cell->net_node, &net->cells);
256 	atomic_inc(&net->cells_outstanding);
257 	write_sequnlock(&net->cells_lock);
258 
259 	queue_work(afs_wq, &cell->manager);
260 
261 wait_for_cell:
262 	_debug("wait_for_cell");
263 	ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE);
264 	smp_rmb();
265 
266 	switch (READ_ONCE(cell->state)) {
267 	case AFS_CELL_FAILED:
268 		ret = cell->error;
269 		goto error;
270 	default:
271 		_debug("weird %u %d", cell->state, cell->error);
272 		goto error;
273 	case AFS_CELL_ACTIVE:
274 		break;
275 	}
276 
277 	_leave(" = %p [cell]", cell);
278 	return cell;
279 
280 cell_already_exists:
281 	_debug("cell exists");
282 	cell = cursor;
283 	if (excl) {
284 		ret = -EEXIST;
285 	} else {
286 		afs_get_cell(cursor);
287 		ret = 0;
288 	}
289 	write_sequnlock(&net->cells_lock);
290 	kfree(candidate);
291 	if (ret == 0)
292 		goto wait_for_cell;
293 	goto error_noput;
294 error:
295 	afs_put_cell(net, cell);
296 error_noput:
297 	_leave(" = %d [error]", ret);
298 	return ERR_PTR(ret);
299 }
300 
301 /*
302  * set the root cell information
303  * - can be called with a module parameter string
304  * - can be called from a write to /proc/fs/afs/rootcell
305  */
306 int afs_cell_init(struct afs_net *net, const char *rootcell)
307 {
308 	struct afs_cell *old_root, *new_root;
309 	const char *cp, *vllist;
310 	size_t len;
311 
312 	_enter("");
313 
314 	if (!rootcell) {
315 		/* module is loaded with no parameters, or built statically.
316 		 * - in the future we might initialize cell DB here.
317 		 */
318 		_leave(" = 0 [no root]");
319 		return 0;
320 	}
321 
322 	cp = strchr(rootcell, ':');
323 	if (!cp) {
324 		_debug("kAFS: no VL server IP addresses specified");
325 		vllist = NULL;
326 		len = strlen(rootcell);
327 	} else {
328 		vllist = cp + 1;
329 		len = cp - rootcell;
330 	}
331 
332 	/* allocate a cell record for the root cell */
333 	new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
334 	if (IS_ERR(new_root)) {
335 		_leave(" = %ld", PTR_ERR(new_root));
336 		return PTR_ERR(new_root);
337 	}
338 
339 	if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
340 		afs_get_cell(new_root);
341 
342 	/* install the new cell */
343 	write_seqlock(&net->cells_lock);
344 	old_root = net->ws_cell;
345 	net->ws_cell = new_root;
346 	write_sequnlock(&net->cells_lock);
347 
348 	afs_put_cell(net, old_root);
349 	_leave(" = 0");
350 	return 0;
351 }
352 
353 /*
354  * Update a cell's VL server address list from the DNS.
355  */
356 static void afs_update_cell(struct afs_cell *cell)
357 {
358 	struct afs_addr_list *alist, *old;
359 	time64_t now, expiry;
360 
361 	_enter("%s", cell->name);
362 
363 	alist = afs_dns_query(cell, &expiry);
364 	if (IS_ERR(alist)) {
365 		switch (PTR_ERR(alist)) {
366 		case -ENODATA:
367 			/* The DNS said that the cell does not exist */
368 			set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
369 			clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
370 			cell->dns_expiry = ktime_get_real_seconds() + 61;
371 			break;
372 
373 		case -EAGAIN:
374 		case -ECONNREFUSED:
375 		default:
376 			set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
377 			cell->dns_expiry = ktime_get_real_seconds() + 10;
378 			break;
379 		}
380 
381 		cell->error = -EDESTADDRREQ;
382 	} else {
383 		clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
384 		clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
385 
386 		/* Exclusion on changing vl_addrs is achieved by a
387 		 * non-reentrant work item.
388 		 */
389 		old = rcu_dereference_protected(cell->vl_addrs, true);
390 		rcu_assign_pointer(cell->vl_addrs, alist);
391 		cell->dns_expiry = expiry;
392 
393 		if (old)
394 			afs_put_addrlist(old);
395 	}
396 
397 	if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags))
398 		wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET);
399 
400 	now = ktime_get_real_seconds();
401 	afs_set_cell_timer(cell->net, cell->dns_expiry - now);
402 	_leave("");
403 }
404 
405 /*
406  * Destroy a cell record
407  */
408 static void afs_cell_destroy(struct rcu_head *rcu)
409 {
410 	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
411 
412 	_enter("%p{%s}", cell, cell->name);
413 
414 	ASSERTCMP(atomic_read(&cell->usage), ==, 0);
415 
416 	afs_put_addrlist(rcu_access_pointer(cell->vl_addrs));
417 	key_put(cell->anonymous_key);
418 	kfree(cell);
419 
420 	_leave(" [destroyed]");
421 }
422 
423 /*
424  * Queue the cell manager.
425  */
426 static void afs_queue_cell_manager(struct afs_net *net)
427 {
428 	int outstanding = atomic_inc_return(&net->cells_outstanding);
429 
430 	_enter("%d", outstanding);
431 
432 	if (!queue_work(afs_wq, &net->cells_manager))
433 		afs_dec_cells_outstanding(net);
434 }
435 
436 /*
437  * Cell management timer.  We have an increment on cells_outstanding that we
438  * need to pass along to the work item.
439  */
440 void afs_cells_timer(struct timer_list *timer)
441 {
442 	struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
443 
444 	_enter("");
445 	if (!queue_work(afs_wq, &net->cells_manager))
446 		afs_dec_cells_outstanding(net);
447 }
448 
449 /*
450  * Get a reference on a cell record.
451  */
452 struct afs_cell *afs_get_cell(struct afs_cell *cell)
453 {
454 	atomic_inc(&cell->usage);
455 	return cell;
456 }
457 
458 /*
459  * Drop a reference on a cell record.
460  */
461 void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
462 {
463 	time64_t now, expire_delay;
464 
465 	if (!cell)
466 		return;
467 
468 	_enter("%s", cell->name);
469 
470 	now = ktime_get_real_seconds();
471 	cell->last_inactive = now;
472 	expire_delay = 0;
473 	if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
474 	    !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
475 		expire_delay = afs_cell_gc_delay;
476 
477 	if (atomic_dec_return(&cell->usage) > 1)
478 		return;
479 
480 	/* 'cell' may now be garbage collected. */
481 	afs_set_cell_timer(net, expire_delay);
482 }
483 
484 /*
485  * Allocate a key to use as a placeholder for anonymous user security.
486  */
487 static int afs_alloc_anon_key(struct afs_cell *cell)
488 {
489 	struct key *key;
490 	char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
491 
492 	/* Create a key to represent an anonymous user. */
493 	memcpy(keyname, "afs@", 4);
494 	dp = keyname + 4;
495 	cp = cell->name;
496 	do {
497 		*dp++ = tolower(*cp);
498 	} while (*cp++);
499 
500 	key = rxrpc_get_null_key(keyname);
501 	if (IS_ERR(key))
502 		return PTR_ERR(key);
503 
504 	cell->anonymous_key = key;
505 
506 	_debug("anon key %p{%x}",
507 	       cell->anonymous_key, key_serial(cell->anonymous_key));
508 	return 0;
509 }
510 
511 /*
512  * Activate a cell.
513  */
514 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
515 {
516 	int ret;
517 
518 	if (!cell->anonymous_key) {
519 		ret = afs_alloc_anon_key(cell);
520 		if (ret < 0)
521 			return ret;
522 	}
523 
524 #ifdef CONFIG_AFS_FSCACHE
525 	cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
526 					     &afs_cell_cache_index_def,
527 					     cell->name, strlen(cell->name),
528 					     NULL, 0,
529 					     cell, 0, true);
530 #endif
531 	ret = afs_proc_cell_setup(net, cell);
532 	if (ret < 0)
533 		return ret;
534 	spin_lock(&net->proc_cells_lock);
535 	list_add_tail(&cell->proc_link, &net->proc_cells);
536 	spin_unlock(&net->proc_cells_lock);
537 	return 0;
538 }
539 
540 /*
541  * Deactivate a cell.
542  */
543 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
544 {
545 	_enter("%s", cell->name);
546 
547 	afs_proc_cell_remove(net, cell);
548 
549 	spin_lock(&net->proc_cells_lock);
550 	list_del_init(&cell->proc_link);
551 	spin_unlock(&net->proc_cells_lock);
552 
553 #ifdef CONFIG_AFS_FSCACHE
554 	fscache_relinquish_cookie(cell->cache, NULL, false);
555 	cell->cache = NULL;
556 #endif
557 
558 	_leave("");
559 }
560 
561 /*
562  * Manage a cell record, initialising and destroying it, maintaining its DNS
563  * records.
564  */
565 static void afs_manage_cell(struct work_struct *work)
566 {
567 	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
568 	struct afs_net *net = cell->net;
569 	bool deleted;
570 	int ret, usage;
571 
572 	_enter("%s", cell->name);
573 
574 again:
575 	_debug("state %u", cell->state);
576 	switch (cell->state) {
577 	case AFS_CELL_INACTIVE:
578 	case AFS_CELL_FAILED:
579 		write_seqlock(&net->cells_lock);
580 		usage = 1;
581 		deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
582 		if (deleted)
583 			rb_erase(&cell->net_node, &net->cells);
584 		write_sequnlock(&net->cells_lock);
585 		if (deleted)
586 			goto final_destruction;
587 		if (cell->state == AFS_CELL_FAILED)
588 			goto done;
589 		cell->state = AFS_CELL_UNSET;
590 		goto again;
591 
592 	case AFS_CELL_UNSET:
593 		cell->state = AFS_CELL_ACTIVATING;
594 		goto again;
595 
596 	case AFS_CELL_ACTIVATING:
597 		ret = afs_activate_cell(net, cell);
598 		if (ret < 0)
599 			goto activation_failed;
600 
601 		cell->state = AFS_CELL_ACTIVE;
602 		smp_wmb();
603 		clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
604 		wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
605 		goto again;
606 
607 	case AFS_CELL_ACTIVE:
608 		if (atomic_read(&cell->usage) > 1) {
609 			time64_t now = ktime_get_real_seconds();
610 			if (cell->dns_expiry <= now && net->live)
611 				afs_update_cell(cell);
612 			goto done;
613 		}
614 		cell->state = AFS_CELL_DEACTIVATING;
615 		goto again;
616 
617 	case AFS_CELL_DEACTIVATING:
618 		set_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
619 		if (atomic_read(&cell->usage) > 1)
620 			goto reverse_deactivation;
621 		afs_deactivate_cell(net, cell);
622 		cell->state = AFS_CELL_INACTIVE;
623 		goto again;
624 
625 	default:
626 		break;
627 	}
628 	_debug("bad state %u", cell->state);
629 	BUG(); /* Unhandled state */
630 
631 activation_failed:
632 	cell->error = ret;
633 	afs_deactivate_cell(net, cell);
634 
635 	cell->state = AFS_CELL_FAILED;
636 	smp_wmb();
637 	if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags))
638 		wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
639 	goto again;
640 
641 reverse_deactivation:
642 	cell->state = AFS_CELL_ACTIVE;
643 	smp_wmb();
644 	clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
645 	wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
646 	_leave(" [deact->act]");
647 	return;
648 
649 done:
650 	_leave(" [done %u]", cell->state);
651 	return;
652 
653 final_destruction:
654 	call_rcu(&cell->rcu, afs_cell_destroy);
655 	afs_dec_cells_outstanding(net);
656 	_leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
657 }
658 
659 /*
660  * Manage the records of cells known to a network namespace.  This includes
661  * updating the DNS records and garbage collecting unused cells that were
662  * automatically added.
663  *
664  * Note that constructed cell records may only be removed from net->cells by
665  * this work item, so it is safe for this work item to stash a cursor pointing
666  * into the tree and then return to caller (provided it skips cells that are
667  * still under construction).
668  *
669  * Note also that we were given an increment on net->cells_outstanding by
670  * whoever queued us that we need to deal with before returning.
671  */
672 void afs_manage_cells(struct work_struct *work)
673 {
674 	struct afs_net *net = container_of(work, struct afs_net, cells_manager);
675 	struct rb_node *cursor;
676 	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
677 	bool purging = !net->live;
678 
679 	_enter("");
680 
681 	/* Trawl the cell database looking for cells that have expired from
682 	 * lack of use and cells whose DNS results have expired and dispatch
683 	 * their managers.
684 	 */
685 	read_seqlock_excl(&net->cells_lock);
686 
687 	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
688 		struct afs_cell *cell =
689 			rb_entry(cursor, struct afs_cell, net_node);
690 		unsigned usage;
691 		bool sched_cell = false;
692 
693 		usage = atomic_read(&cell->usage);
694 		_debug("manage %s %u", cell->name, usage);
695 
696 		ASSERTCMP(usage, >=, 1);
697 
698 		if (purging) {
699 			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
700 				usage = atomic_dec_return(&cell->usage);
701 			ASSERTCMP(usage, ==, 1);
702 		}
703 
704 		if (usage == 1) {
705 			time64_t expire_at = cell->last_inactive;
706 
707 			if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
708 			    !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
709 				expire_at += afs_cell_gc_delay;
710 			if (purging || expire_at <= now)
711 				sched_cell = true;
712 			else if (expire_at < next_manage)
713 				next_manage = expire_at;
714 		}
715 
716 		if (!purging) {
717 			if (cell->dns_expiry <= now)
718 				sched_cell = true;
719 			else if (cell->dns_expiry <= next_manage)
720 				next_manage = cell->dns_expiry;
721 		}
722 
723 		if (sched_cell)
724 			queue_work(afs_wq, &cell->manager);
725 	}
726 
727 	read_sequnlock_excl(&net->cells_lock);
728 
729 	/* Update the timer on the way out.  We have to pass an increment on
730 	 * cells_outstanding in the namespace that we are in to the timer or
731 	 * the work scheduler.
732 	 */
733 	if (!purging && next_manage < TIME64_MAX) {
734 		now = ktime_get_real_seconds();
735 
736 		if (next_manage - now <= 0) {
737 			if (queue_work(afs_wq, &net->cells_manager))
738 				atomic_inc(&net->cells_outstanding);
739 		} else {
740 			afs_set_cell_timer(net, next_manage - now);
741 		}
742 	}
743 
744 	afs_dec_cells_outstanding(net);
745 	_leave(" [%d]", atomic_read(&net->cells_outstanding));
746 }
747 
748 /*
749  * Purge in-memory cell database.
750  */
751 void afs_cell_purge(struct afs_net *net)
752 {
753 	struct afs_cell *ws;
754 
755 	_enter("");
756 
757 	write_seqlock(&net->cells_lock);
758 	ws = net->ws_cell;
759 	net->ws_cell = NULL;
760 	write_sequnlock(&net->cells_lock);
761 	afs_put_cell(net, ws);
762 
763 	_debug("del timer");
764 	if (del_timer_sync(&net->cells_timer))
765 		atomic_dec(&net->cells_outstanding);
766 
767 	_debug("kick mgr");
768 	afs_queue_cell_manager(net);
769 
770 	_debug("wait");
771 	wait_var_event(&net->cells_outstanding,
772 		       !atomic_read(&net->cells_outstanding));
773 	_leave("");
774 }
775