xref: /openbmc/linux/fs/afs/cell.c (revision 4a3fad70)
1 /* AFS cell and server record management
2  *
3  * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/key.h>
14 #include <linux/ctype.h>
15 #include <linux/dns_resolver.h>
16 #include <linux/sched.h>
17 #include <linux/inet.h>
18 #include <keys/rxrpc-type.h>
19 #include "internal.h"
20 
21 unsigned __read_mostly afs_cell_gc_delay = 10;
22 
23 static void afs_manage_cell(struct work_struct *);
24 
25 static void afs_dec_cells_outstanding(struct afs_net *net)
26 {
27 	if (atomic_dec_and_test(&net->cells_outstanding))
28 		wake_up_atomic_t(&net->cells_outstanding);
29 }
30 
31 /*
32  * Set the cell timer to fire after a given delay, assuming it's not already
33  * set for an earlier time.
34  */
35 static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
36 {
37 	if (net->live) {
38 		atomic_inc(&net->cells_outstanding);
39 		if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
40 			afs_dec_cells_outstanding(net);
41 	}
42 }
43 
44 /*
45  * Look up and get an activation reference on a cell record under RCU
46  * conditions.  The caller must hold the RCU read lock.
47  */
48 struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
49 				     const char *name, unsigned int namesz)
50 {
51 	struct afs_cell *cell = NULL;
52 	struct rb_node *p;
53 	int n, seq = 0, ret = 0;
54 
55 	_enter("%*.*s", namesz, namesz, name);
56 
57 	if (name && namesz == 0)
58 		return ERR_PTR(-EINVAL);
59 	if (namesz > AFS_MAXCELLNAME)
60 		return ERR_PTR(-ENAMETOOLONG);
61 
62 	do {
63 		/* Unfortunately, rbtree walking doesn't give reliable results
64 		 * under just the RCU read lock, so we have to check for
65 		 * changes.
66 		 */
67 		if (cell)
68 			afs_put_cell(net, cell);
69 		cell = NULL;
70 		ret = -ENOENT;
71 
72 		read_seqbegin_or_lock(&net->cells_lock, &seq);
73 
74 		if (!name) {
75 			cell = rcu_dereference_raw(net->ws_cell);
76 			if (cell) {
77 				afs_get_cell(cell);
78 				continue;
79 			}
80 			ret = -EDESTADDRREQ;
81 			continue;
82 		}
83 
84 		p = rcu_dereference_raw(net->cells.rb_node);
85 		while (p) {
86 			cell = rb_entry(p, struct afs_cell, net_node);
87 
88 			n = strncasecmp(cell->name, name,
89 					min_t(size_t, cell->name_len, namesz));
90 			if (n == 0)
91 				n = cell->name_len - namesz;
92 			if (n < 0) {
93 				p = rcu_dereference_raw(p->rb_left);
94 			} else if (n > 0) {
95 				p = rcu_dereference_raw(p->rb_right);
96 			} else {
97 				if (atomic_inc_not_zero(&cell->usage)) {
98 					ret = 0;
99 					break;
100 				}
101 				/* We want to repeat the search, this time with
102 				 * the lock properly locked.
103 				 */
104 			}
105 			cell = NULL;
106 		}
107 
108 	} while (need_seqretry(&net->cells_lock, seq));
109 
110 	done_seqretry(&net->cells_lock, seq);
111 
112 	return ret == 0 ? cell : ERR_PTR(ret);
113 }
114 
115 /*
116  * Set up a cell record and fill in its name, VL server address list and
117  * allocate an anonymous key
118  */
119 static struct afs_cell *afs_alloc_cell(struct afs_net *net,
120 				       const char *name, unsigned int namelen,
121 				       const char *vllist)
122 {
123 	struct afs_cell *cell;
124 	int i, ret;
125 
126 	ASSERT(name);
127 	if (namelen == 0)
128 		return ERR_PTR(-EINVAL);
129 	if (namelen > AFS_MAXCELLNAME) {
130 		_leave(" = -ENAMETOOLONG");
131 		return ERR_PTR(-ENAMETOOLONG);
132 	}
133 
134 	_enter("%*.*s,%s", namelen, namelen, name, vllist);
135 
136 	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
137 	if (!cell) {
138 		_leave(" = -ENOMEM");
139 		return ERR_PTR(-ENOMEM);
140 	}
141 
142 	cell->net = net;
143 	cell->name_len = namelen;
144 	for (i = 0; i < namelen; i++)
145 		cell->name[i] = tolower(name[i]);
146 
147 	atomic_set(&cell->usage, 2);
148 	INIT_WORK(&cell->manager, afs_manage_cell);
149 	cell->flags = ((1 << AFS_CELL_FL_NOT_READY) |
150 		       (1 << AFS_CELL_FL_NO_LOOKUP_YET));
151 	INIT_LIST_HEAD(&cell->proc_volumes);
152 	rwlock_init(&cell->proc_lock);
153 	rwlock_init(&cell->vl_addrs_lock);
154 
155 	/* Fill in the VL server list if we were given a list of addresses to
156 	 * use.
157 	 */
158 	if (vllist) {
159 		struct afs_addr_list *alist;
160 
161 		alist = afs_parse_text_addrs(vllist, strlen(vllist), ':',
162 					     VL_SERVICE, AFS_VL_PORT);
163 		if (IS_ERR(alist)) {
164 			ret = PTR_ERR(alist);
165 			goto parse_failed;
166 		}
167 
168 		rcu_assign_pointer(cell->vl_addrs, alist);
169 		cell->dns_expiry = TIME64_MAX;
170 	}
171 
172 	_leave(" = %p", cell);
173 	return cell;
174 
175 parse_failed:
176 	if (ret == -EINVAL)
177 		printk(KERN_ERR "kAFS: bad VL server IP address\n");
178 	kfree(cell);
179 	_leave(" = %d", ret);
180 	return ERR_PTR(ret);
181 }
182 
183 /*
184  * afs_lookup_cell - Look up or create a cell record.
185  * @net:	The network namespace
186  * @name:	The name of the cell.
187  * @namesz:	The strlen of the cell name.
188  * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
189  * @excl:	T if an error should be given if the cell name already exists.
190  *
191  * Look up a cell record by name and query the DNS for VL server addresses if
192  * needed.  Note that that actual DNS query is punted off to the manager thread
193  * so that this function can return immediately if interrupted whilst allowing
194  * cell records to be shared even if not yet fully constructed.
195  */
196 struct afs_cell *afs_lookup_cell(struct afs_net *net,
197 				 const char *name, unsigned int namesz,
198 				 const char *vllist, bool excl)
199 {
200 	struct afs_cell *cell, *candidate, *cursor;
201 	struct rb_node *parent, **pp;
202 	int ret, n;
203 
204 	_enter("%s,%s", name, vllist);
205 
206 	if (!excl) {
207 		rcu_read_lock();
208 		cell = afs_lookup_cell_rcu(net, name, namesz);
209 		rcu_read_unlock();
210 		if (!IS_ERR(cell))
211 			goto wait_for_cell;
212 	}
213 
214 	/* Assume we're probably going to create a cell and preallocate and
215 	 * mostly set up a candidate record.  We can then use this to stash the
216 	 * name, the net namespace and VL server addresses.
217 	 *
218 	 * We also want to do this before we hold any locks as it may involve
219 	 * upcalling to userspace to make DNS queries.
220 	 */
221 	candidate = afs_alloc_cell(net, name, namesz, vllist);
222 	if (IS_ERR(candidate)) {
223 		_leave(" = %ld", PTR_ERR(candidate));
224 		return candidate;
225 	}
226 
227 	/* Find the insertion point and check to see if someone else added a
228 	 * cell whilst we were allocating.
229 	 */
230 	write_seqlock(&net->cells_lock);
231 
232 	pp = &net->cells.rb_node;
233 	parent = NULL;
234 	while (*pp) {
235 		parent = *pp;
236 		cursor = rb_entry(parent, struct afs_cell, net_node);
237 
238 		n = strncasecmp(cursor->name, name,
239 				min_t(size_t, cursor->name_len, namesz));
240 		if (n == 0)
241 			n = cursor->name_len - namesz;
242 		if (n < 0)
243 			pp = &(*pp)->rb_left;
244 		else if (n > 0)
245 			pp = &(*pp)->rb_right;
246 		else
247 			goto cell_already_exists;
248 	}
249 
250 	cell = candidate;
251 	candidate = NULL;
252 	rb_link_node_rcu(&cell->net_node, parent, pp);
253 	rb_insert_color(&cell->net_node, &net->cells);
254 	atomic_inc(&net->cells_outstanding);
255 	write_sequnlock(&net->cells_lock);
256 
257 	queue_work(afs_wq, &cell->manager);
258 
259 wait_for_cell:
260 	_debug("wait_for_cell");
261 	ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE);
262 	smp_rmb();
263 
264 	switch (READ_ONCE(cell->state)) {
265 	case AFS_CELL_FAILED:
266 		ret = cell->error;
267 		goto error;
268 	default:
269 		_debug("weird %u %d", cell->state, cell->error);
270 		goto error;
271 	case AFS_CELL_ACTIVE:
272 		break;
273 	}
274 
275 	_leave(" = %p [cell]", cell);
276 	return cell;
277 
278 cell_already_exists:
279 	_debug("cell exists");
280 	cell = cursor;
281 	if (excl) {
282 		ret = -EEXIST;
283 	} else {
284 		afs_get_cell(cursor);
285 		ret = 0;
286 	}
287 	write_sequnlock(&net->cells_lock);
288 	kfree(candidate);
289 	if (ret == 0)
290 		goto wait_for_cell;
291 	goto error_noput;
292 error:
293 	afs_put_cell(net, cell);
294 error_noput:
295 	_leave(" = %d [error]", ret);
296 	return ERR_PTR(ret);
297 }
298 
299 /*
300  * set the root cell information
301  * - can be called with a module parameter string
302  * - can be called from a write to /proc/fs/afs/rootcell
303  */
304 int afs_cell_init(struct afs_net *net, const char *rootcell)
305 {
306 	struct afs_cell *old_root, *new_root;
307 	const char *cp, *vllist;
308 	size_t len;
309 
310 	_enter("");
311 
312 	if (!rootcell) {
313 		/* module is loaded with no parameters, or built statically.
314 		 * - in the future we might initialize cell DB here.
315 		 */
316 		_leave(" = 0 [no root]");
317 		return 0;
318 	}
319 
320 	cp = strchr(rootcell, ':');
321 	if (!cp) {
322 		_debug("kAFS: no VL server IP addresses specified");
323 		vllist = NULL;
324 		len = strlen(rootcell);
325 	} else {
326 		vllist = cp + 1;
327 		len = cp - rootcell;
328 	}
329 
330 	/* allocate a cell record for the root cell */
331 	new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
332 	if (IS_ERR(new_root)) {
333 		_leave(" = %ld", PTR_ERR(new_root));
334 		return PTR_ERR(new_root);
335 	}
336 
337 	set_bit(AFS_CELL_FL_NO_GC, &new_root->flags);
338 	afs_get_cell(new_root);
339 
340 	/* install the new cell */
341 	write_seqlock(&net->cells_lock);
342 	old_root = net->ws_cell;
343 	net->ws_cell = new_root;
344 	write_sequnlock(&net->cells_lock);
345 
346 	afs_put_cell(net, old_root);
347 	_leave(" = 0");
348 	return 0;
349 }
350 
351 /*
352  * Update a cell's VL server address list from the DNS.
353  */
354 static void afs_update_cell(struct afs_cell *cell)
355 {
356 	struct afs_addr_list *alist, *old;
357 	time64_t now, expiry;
358 
359 	_enter("%s", cell->name);
360 
361 	alist = afs_dns_query(cell, &expiry);
362 	if (IS_ERR(alist)) {
363 		switch (PTR_ERR(alist)) {
364 		case -ENODATA:
365 			/* The DNS said that the cell does not exist */
366 			set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
367 			clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
368 			cell->dns_expiry = ktime_get_real_seconds() + 61;
369 			break;
370 
371 		case -EAGAIN:
372 		case -ECONNREFUSED:
373 		default:
374 			set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
375 			cell->dns_expiry = ktime_get_real_seconds() + 10;
376 			break;
377 		}
378 
379 		cell->error = -EDESTADDRREQ;
380 	} else {
381 		clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
382 		clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
383 
384 		/* Exclusion on changing vl_addrs is achieved by a
385 		 * non-reentrant work item.
386 		 */
387 		old = rcu_dereference_protected(cell->vl_addrs, true);
388 		rcu_assign_pointer(cell->vl_addrs, alist);
389 		cell->dns_expiry = expiry;
390 
391 		if (old)
392 			afs_put_addrlist(old);
393 	}
394 
395 	if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags))
396 		wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET);
397 
398 	now = ktime_get_real_seconds();
399 	afs_set_cell_timer(cell->net, cell->dns_expiry - now);
400 	_leave("");
401 }
402 
403 /*
404  * Destroy a cell record
405  */
406 static void afs_cell_destroy(struct rcu_head *rcu)
407 {
408 	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
409 
410 	_enter("%p{%s}", cell, cell->name);
411 
412 	ASSERTCMP(atomic_read(&cell->usage), ==, 0);
413 
414 	afs_put_addrlist(cell->vl_addrs);
415 	key_put(cell->anonymous_key);
416 	kfree(cell);
417 
418 	_leave(" [destroyed]");
419 }
420 
421 /*
422  * Queue the cell manager.
423  */
424 static void afs_queue_cell_manager(struct afs_net *net)
425 {
426 	int outstanding = atomic_inc_return(&net->cells_outstanding);
427 
428 	_enter("%d", outstanding);
429 
430 	if (!queue_work(afs_wq, &net->cells_manager))
431 		afs_dec_cells_outstanding(net);
432 }
433 
434 /*
435  * Cell management timer.  We have an increment on cells_outstanding that we
436  * need to pass along to the work item.
437  */
438 void afs_cells_timer(struct timer_list *timer)
439 {
440 	struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
441 
442 	_enter("");
443 	if (!queue_work(afs_wq, &net->cells_manager))
444 		afs_dec_cells_outstanding(net);
445 }
446 
447 /*
448  * Get a reference on a cell record.
449  */
450 struct afs_cell *afs_get_cell(struct afs_cell *cell)
451 {
452 	atomic_inc(&cell->usage);
453 	return cell;
454 }
455 
456 /*
457  * Drop a reference on a cell record.
458  */
459 void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
460 {
461 	time64_t now, expire_delay;
462 
463 	if (!cell)
464 		return;
465 
466 	_enter("%s", cell->name);
467 
468 	now = ktime_get_real_seconds();
469 	cell->last_inactive = now;
470 	expire_delay = 0;
471 	if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
472 	    !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
473 		expire_delay = afs_cell_gc_delay;
474 
475 	if (atomic_dec_return(&cell->usage) > 1)
476 		return;
477 
478 	/* 'cell' may now be garbage collected. */
479 	afs_set_cell_timer(net, expire_delay);
480 }
481 
482 /*
483  * Allocate a key to use as a placeholder for anonymous user security.
484  */
485 static int afs_alloc_anon_key(struct afs_cell *cell)
486 {
487 	struct key *key;
488 	char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
489 
490 	/* Create a key to represent an anonymous user. */
491 	memcpy(keyname, "afs@", 4);
492 	dp = keyname + 4;
493 	cp = cell->name;
494 	do {
495 		*dp++ = tolower(*cp);
496 	} while (*cp++);
497 
498 	key = rxrpc_get_null_key(keyname);
499 	if (IS_ERR(key))
500 		return PTR_ERR(key);
501 
502 	cell->anonymous_key = key;
503 
504 	_debug("anon key %p{%x}",
505 	       cell->anonymous_key, key_serial(cell->anonymous_key));
506 	return 0;
507 }
508 
509 /*
510  * Activate a cell.
511  */
512 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
513 {
514 	int ret;
515 
516 	if (!cell->anonymous_key) {
517 		ret = afs_alloc_anon_key(cell);
518 		if (ret < 0)
519 			return ret;
520 	}
521 
522 #ifdef CONFIG_AFS_FSCACHE
523 	cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
524 					     &afs_cell_cache_index_def,
525 					     cell, true);
526 #endif
527 	ret = afs_proc_cell_setup(net, cell);
528 	if (ret < 0)
529 		return ret;
530 	spin_lock(&net->proc_cells_lock);
531 	list_add_tail(&cell->proc_link, &net->proc_cells);
532 	spin_unlock(&net->proc_cells_lock);
533 	return 0;
534 }
535 
536 /*
537  * Deactivate a cell.
538  */
539 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
540 {
541 	_enter("%s", cell->name);
542 
543 	afs_proc_cell_remove(net, cell);
544 
545 	spin_lock(&net->proc_cells_lock);
546 	list_del_init(&cell->proc_link);
547 	spin_unlock(&net->proc_cells_lock);
548 
549 #ifdef CONFIG_AFS_FSCACHE
550 	fscache_relinquish_cookie(cell->cache, 0);
551 	cell->cache = NULL;
552 #endif
553 
554 	_leave("");
555 }
556 
557 /*
558  * Manage a cell record, initialising and destroying it, maintaining its DNS
559  * records.
560  */
561 static void afs_manage_cell(struct work_struct *work)
562 {
563 	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
564 	struct afs_net *net = cell->net;
565 	bool deleted;
566 	int ret, usage;
567 
568 	_enter("%s", cell->name);
569 
570 again:
571 	_debug("state %u", cell->state);
572 	switch (cell->state) {
573 	case AFS_CELL_INACTIVE:
574 	case AFS_CELL_FAILED:
575 		write_seqlock(&net->cells_lock);
576 		usage = 1;
577 		deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
578 		if (deleted)
579 			rb_erase(&cell->net_node, &net->cells);
580 		write_sequnlock(&net->cells_lock);
581 		if (deleted)
582 			goto final_destruction;
583 		if (cell->state == AFS_CELL_FAILED)
584 			goto done;
585 		cell->state = AFS_CELL_UNSET;
586 		goto again;
587 
588 	case AFS_CELL_UNSET:
589 		cell->state = AFS_CELL_ACTIVATING;
590 		goto again;
591 
592 	case AFS_CELL_ACTIVATING:
593 		ret = afs_activate_cell(net, cell);
594 		if (ret < 0)
595 			goto activation_failed;
596 
597 		cell->state = AFS_CELL_ACTIVE;
598 		smp_wmb();
599 		clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
600 		wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
601 		goto again;
602 
603 	case AFS_CELL_ACTIVE:
604 		if (atomic_read(&cell->usage) > 1) {
605 			time64_t now = ktime_get_real_seconds();
606 			if (cell->dns_expiry <= now && net->live)
607 				afs_update_cell(cell);
608 			goto done;
609 		}
610 		cell->state = AFS_CELL_DEACTIVATING;
611 		goto again;
612 
613 	case AFS_CELL_DEACTIVATING:
614 		set_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
615 		if (atomic_read(&cell->usage) > 1)
616 			goto reverse_deactivation;
617 		afs_deactivate_cell(net, cell);
618 		cell->state = AFS_CELL_INACTIVE;
619 		goto again;
620 
621 	default:
622 		break;
623 	}
624 	_debug("bad state %u", cell->state);
625 	BUG(); /* Unhandled state */
626 
627 activation_failed:
628 	cell->error = ret;
629 	afs_deactivate_cell(net, cell);
630 
631 	cell->state = AFS_CELL_FAILED;
632 	smp_wmb();
633 	if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags))
634 		wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
635 	goto again;
636 
637 reverse_deactivation:
638 	cell->state = AFS_CELL_ACTIVE;
639 	smp_wmb();
640 	clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
641 	wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
642 	_leave(" [deact->act]");
643 	return;
644 
645 done:
646 	_leave(" [done %u]", cell->state);
647 	return;
648 
649 final_destruction:
650 	call_rcu(&cell->rcu, afs_cell_destroy);
651 	afs_dec_cells_outstanding(net);
652 	_leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
653 }
654 
655 /*
656  * Manage the records of cells known to a network namespace.  This includes
657  * updating the DNS records and garbage collecting unused cells that were
658  * automatically added.
659  *
660  * Note that constructed cell records may only be removed from net->cells by
661  * this work item, so it is safe for this work item to stash a cursor pointing
662  * into the tree and then return to caller (provided it skips cells that are
663  * still under construction).
664  *
665  * Note also that we were given an increment on net->cells_outstanding by
666  * whoever queued us that we need to deal with before returning.
667  */
668 void afs_manage_cells(struct work_struct *work)
669 {
670 	struct afs_net *net = container_of(work, struct afs_net, cells_manager);
671 	struct rb_node *cursor;
672 	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
673 	bool purging = !net->live;
674 
675 	_enter("");
676 
677 	/* Trawl the cell database looking for cells that have expired from
678 	 * lack of use and cells whose DNS results have expired and dispatch
679 	 * their managers.
680 	 */
681 	read_seqlock_excl(&net->cells_lock);
682 
683 	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
684 		struct afs_cell *cell =
685 			rb_entry(cursor, struct afs_cell, net_node);
686 		unsigned usage;
687 		bool sched_cell = false;
688 
689 		usage = atomic_read(&cell->usage);
690 		_debug("manage %s %u", cell->name, usage);
691 
692 		ASSERTCMP(usage, >=, 1);
693 
694 		if (purging) {
695 			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
696 				usage = atomic_dec_return(&cell->usage);
697 			ASSERTCMP(usage, ==, 1);
698 		}
699 
700 		if (usage == 1) {
701 			time64_t expire_at = cell->last_inactive;
702 
703 			if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
704 			    !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
705 				expire_at += afs_cell_gc_delay;
706 			if (purging || expire_at <= now)
707 				sched_cell = true;
708 			else if (expire_at < next_manage)
709 				next_manage = expire_at;
710 		}
711 
712 		if (!purging) {
713 			if (cell->dns_expiry <= now)
714 				sched_cell = true;
715 			else if (cell->dns_expiry <= next_manage)
716 				next_manage = cell->dns_expiry;
717 		}
718 
719 		if (sched_cell)
720 			queue_work(afs_wq, &cell->manager);
721 	}
722 
723 	read_sequnlock_excl(&net->cells_lock);
724 
725 	/* Update the timer on the way out.  We have to pass an increment on
726 	 * cells_outstanding in the namespace that we are in to the timer or
727 	 * the work scheduler.
728 	 */
729 	if (!purging && next_manage < TIME64_MAX) {
730 		now = ktime_get_real_seconds();
731 
732 		if (next_manage - now <= 0) {
733 			if (queue_work(afs_wq, &net->cells_manager))
734 				atomic_inc(&net->cells_outstanding);
735 		} else {
736 			afs_set_cell_timer(net, next_manage - now);
737 		}
738 	}
739 
740 	afs_dec_cells_outstanding(net);
741 	_leave(" [%d]", atomic_read(&net->cells_outstanding));
742 }
743 
744 /*
745  * Purge in-memory cell database.
746  */
747 void afs_cell_purge(struct afs_net *net)
748 {
749 	struct afs_cell *ws;
750 
751 	_enter("");
752 
753 	write_seqlock(&net->cells_lock);
754 	ws = net->ws_cell;
755 	net->ws_cell = NULL;
756 	write_sequnlock(&net->cells_lock);
757 	afs_put_cell(net, ws);
758 
759 	_debug("del timer");
760 	if (del_timer_sync(&net->cells_timer))
761 		atomic_dec(&net->cells_outstanding);
762 
763 	_debug("kick mgr");
764 	afs_queue_cell_manager(net);
765 
766 	_debug("wait");
767 	wait_on_atomic_t(&net->cells_outstanding, atomic_t_wait,
768 			 TASK_UNINTERRUPTIBLE);
769 	_leave("");
770 }
771