xref: /openbmc/linux/fs/afs/cell.c (revision bf070bb0)
1 /* AFS cell and server record management
2  *
3  * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/key.h>
14 #include <linux/ctype.h>
15 #include <linux/dns_resolver.h>
16 #include <linux/sched.h>
17 #include <linux/inet.h>
18 #include <keys/rxrpc-type.h>
19 #include "internal.h"
20 
21 unsigned __read_mostly afs_cell_gc_delay = 10;
22 
23 static void afs_manage_cell(struct work_struct *);
24 
25 static void afs_dec_cells_outstanding(struct afs_net *net)
26 {
27 	if (atomic_dec_and_test(&net->cells_outstanding))
28 		wake_up_atomic_t(&net->cells_outstanding);
29 }
30 
31 /*
32  * Set the cell timer to fire after a given delay, assuming it's not already
33  * set for an earlier time.
34  */
35 static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
36 {
37 	if (net->live) {
38 		atomic_inc(&net->cells_outstanding);
39 		if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
40 			afs_dec_cells_outstanding(net);
41 	}
42 }
43 
44 /*
45  * Look up and get an activation reference on a cell record under RCU
46  * conditions.  The caller must hold the RCU read lock.
47  */
48 struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
49 				     const char *name, unsigned int namesz)
50 {
51 	struct afs_cell *cell = NULL;
52 	struct rb_node *p;
53 	int n, seq = 0, ret = 0;
54 
55 	_enter("%*.*s", namesz, namesz, name);
56 
57 	if (name && namesz == 0)
58 		return ERR_PTR(-EINVAL);
59 	if (namesz > AFS_MAXCELLNAME)
60 		return ERR_PTR(-ENAMETOOLONG);
61 
62 	do {
63 		/* Unfortunately, rbtree walking doesn't give reliable results
64 		 * under just the RCU read lock, so we have to check for
65 		 * changes.
66 		 */
67 		if (cell)
68 			afs_put_cell(net, cell);
69 		cell = NULL;
70 		ret = -ENOENT;
71 
72 		read_seqbegin_or_lock(&net->cells_lock, &seq);
73 
74 		if (!name) {
75 			cell = rcu_dereference_raw(net->ws_cell);
76 			if (cell) {
77 				afs_get_cell(cell);
78 				continue;
79 			}
80 			ret = -EDESTADDRREQ;
81 			continue;
82 		}
83 
84 		p = rcu_dereference_raw(net->cells.rb_node);
85 		while (p) {
86 			cell = rb_entry(p, struct afs_cell, net_node);
87 
88 			n = strncasecmp(cell->name, name,
89 					min_t(size_t, cell->name_len, namesz));
90 			if (n == 0)
91 				n = cell->name_len - namesz;
92 			if (n < 0) {
93 				p = rcu_dereference_raw(p->rb_left);
94 			} else if (n > 0) {
95 				p = rcu_dereference_raw(p->rb_right);
96 			} else {
97 				if (atomic_inc_not_zero(&cell->usage)) {
98 					ret = 0;
99 					break;
100 				}
101 				/* We want to repeat the search, this time with
102 				 * the lock properly locked.
103 				 */
104 			}
105 			cell = NULL;
106 		}
107 
108 	} while (need_seqretry(&net->cells_lock, seq));
109 
110 	done_seqretry(&net->cells_lock, seq);
111 
112 	return ret == 0 ? cell : ERR_PTR(ret);
113 }
114 
115 /*
116  * Set up a cell record and fill in its name, VL server address list and
117  * allocate an anonymous key
118  */
119 static struct afs_cell *afs_alloc_cell(struct afs_net *net,
120 				       const char *name, unsigned int namelen,
121 				       const char *vllist)
122 {
123 	struct afs_cell *cell;
124 	int i, ret;
125 
126 	ASSERT(name);
127 	if (namelen == 0)
128 		return ERR_PTR(-EINVAL);
129 	if (namelen > AFS_MAXCELLNAME) {
130 		_leave(" = -ENAMETOOLONG");
131 		return ERR_PTR(-ENAMETOOLONG);
132 	}
133 
134 	_enter("%*.*s,%s", namelen, namelen, name, vllist);
135 
136 	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
137 	if (!cell) {
138 		_leave(" = -ENOMEM");
139 		return ERR_PTR(-ENOMEM);
140 	}
141 
142 	cell->net = net;
143 	cell->name_len = namelen;
144 	for (i = 0; i < namelen; i++)
145 		cell->name[i] = tolower(name[i]);
146 
147 	atomic_set(&cell->usage, 2);
148 	INIT_WORK(&cell->manager, afs_manage_cell);
149 	cell->flags = ((1 << AFS_CELL_FL_NOT_READY) |
150 		       (1 << AFS_CELL_FL_NO_LOOKUP_YET));
151 	INIT_LIST_HEAD(&cell->proc_volumes);
152 	rwlock_init(&cell->proc_lock);
153 	rwlock_init(&cell->vl_addrs_lock);
154 
155 	/* Fill in the VL server list if we were given a list of addresses to
156 	 * use.
157 	 */
158 	if (vllist) {
159 		struct afs_addr_list *alist;
160 
161 		alist = afs_parse_text_addrs(vllist, strlen(vllist), ':',
162 					     VL_SERVICE, AFS_VL_PORT);
163 		if (IS_ERR(alist)) {
164 			ret = PTR_ERR(alist);
165 			goto parse_failed;
166 		}
167 
168 		rcu_assign_pointer(cell->vl_addrs, alist);
169 		cell->dns_expiry = TIME64_MAX;
170 	}
171 
172 	_leave(" = %p", cell);
173 	return cell;
174 
175 parse_failed:
176 	if (ret == -EINVAL)
177 		printk(KERN_ERR "kAFS: bad VL server IP address\n");
178 	kfree(cell);
179 	_leave(" = %d", ret);
180 	return ERR_PTR(ret);
181 }
182 
183 /*
184  * afs_lookup_cell - Look up or create a cell record.
185  * @net:	The network namespace
186  * @name:	The name of the cell.
187  * @namesz:	The strlen of the cell name.
188  * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
189  * @excl:	T if an error should be given if the cell name already exists.
190  *
191  * Look up a cell record by name and query the DNS for VL server addresses if
192  * needed.  Note that that actual DNS query is punted off to the manager thread
193  * so that this function can return immediately if interrupted whilst allowing
194  * cell records to be shared even if not yet fully constructed.
195  */
196 struct afs_cell *afs_lookup_cell(struct afs_net *net,
197 				 const char *name, unsigned int namesz,
198 				 const char *vllist, bool excl)
199 {
200 	struct afs_cell *cell, *candidate, *cursor;
201 	struct rb_node *parent, **pp;
202 	int ret, n;
203 
204 	_enter("%s,%s", name, vllist);
205 
206 	if (!excl) {
207 		rcu_read_lock();
208 		cell = afs_lookup_cell_rcu(net, name, namesz);
209 		rcu_read_unlock();
210 		if (!IS_ERR(cell)) {
211 			if (excl) {
212 				afs_put_cell(net, cell);
213 				return ERR_PTR(-EEXIST);
214 			}
215 			goto wait_for_cell;
216 		}
217 	}
218 
219 	/* Assume we're probably going to create a cell and preallocate and
220 	 * mostly set up a candidate record.  We can then use this to stash the
221 	 * name, the net namespace and VL server addresses.
222 	 *
223 	 * We also want to do this before we hold any locks as it may involve
224 	 * upcalling to userspace to make DNS queries.
225 	 */
226 	candidate = afs_alloc_cell(net, name, namesz, vllist);
227 	if (IS_ERR(candidate)) {
228 		_leave(" = %ld", PTR_ERR(candidate));
229 		return candidate;
230 	}
231 
232 	/* Find the insertion point and check to see if someone else added a
233 	 * cell whilst we were allocating.
234 	 */
235 	write_seqlock(&net->cells_lock);
236 
237 	pp = &net->cells.rb_node;
238 	parent = NULL;
239 	while (*pp) {
240 		parent = *pp;
241 		cursor = rb_entry(parent, struct afs_cell, net_node);
242 
243 		n = strncasecmp(cursor->name, name,
244 				min_t(size_t, cursor->name_len, namesz));
245 		if (n == 0)
246 			n = cursor->name_len - namesz;
247 		if (n < 0)
248 			pp = &(*pp)->rb_left;
249 		else if (n > 0)
250 			pp = &(*pp)->rb_right;
251 		else
252 			goto cell_already_exists;
253 	}
254 
255 	cell = candidate;
256 	candidate = NULL;
257 	rb_link_node_rcu(&cell->net_node, parent, pp);
258 	rb_insert_color(&cell->net_node, &net->cells);
259 	atomic_inc(&net->cells_outstanding);
260 	write_sequnlock(&net->cells_lock);
261 
262 	queue_work(afs_wq, &cell->manager);
263 
264 wait_for_cell:
265 	_debug("wait_for_cell");
266 	ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE);
267 	smp_rmb();
268 
269 	switch (READ_ONCE(cell->state)) {
270 	case AFS_CELL_FAILED:
271 		ret = cell->error;
272 		goto error;
273 	default:
274 		_debug("weird %u %d", cell->state, cell->error);
275 		goto error;
276 	case AFS_CELL_ACTIVE:
277 		break;
278 	}
279 
280 	_leave(" = %p [cell]", cell);
281 	return cell;
282 
283 cell_already_exists:
284 	_debug("cell exists");
285 	cell = cursor;
286 	if (excl) {
287 		ret = -EEXIST;
288 	} else {
289 		afs_get_cell(cursor);
290 		ret = 0;
291 	}
292 	write_sequnlock(&net->cells_lock);
293 	kfree(candidate);
294 	if (ret == 0)
295 		goto wait_for_cell;
296 	goto error_noput;
297 error:
298 	afs_put_cell(net, cell);
299 error_noput:
300 	_leave(" = %d [error]", ret);
301 	return ERR_PTR(ret);
302 }
303 
304 /*
305  * set the root cell information
306  * - can be called with a module parameter string
307  * - can be called from a write to /proc/fs/afs/rootcell
308  */
309 int afs_cell_init(struct afs_net *net, const char *rootcell)
310 {
311 	struct afs_cell *old_root, *new_root;
312 	const char *cp, *vllist;
313 	size_t len;
314 
315 	_enter("");
316 
317 	if (!rootcell) {
318 		/* module is loaded with no parameters, or built statically.
319 		 * - in the future we might initialize cell DB here.
320 		 */
321 		_leave(" = 0 [no root]");
322 		return 0;
323 	}
324 
325 	cp = strchr(rootcell, ':');
326 	if (!cp) {
327 		_debug("kAFS: no VL server IP addresses specified");
328 		vllist = NULL;
329 		len = strlen(rootcell);
330 	} else {
331 		vllist = cp + 1;
332 		len = cp - rootcell;
333 	}
334 
335 	/* allocate a cell record for the root cell */
336 	new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
337 	if (IS_ERR(new_root)) {
338 		_leave(" = %ld", PTR_ERR(new_root));
339 		return PTR_ERR(new_root);
340 	}
341 
342 	set_bit(AFS_CELL_FL_NO_GC, &new_root->flags);
343 	afs_get_cell(new_root);
344 
345 	/* install the new cell */
346 	write_seqlock(&net->cells_lock);
347 	old_root = net->ws_cell;
348 	net->ws_cell = new_root;
349 	write_sequnlock(&net->cells_lock);
350 
351 	afs_put_cell(net, old_root);
352 	_leave(" = 0");
353 	return 0;
354 }
355 
356 /*
357  * Update a cell's VL server address list from the DNS.
358  */
359 static void afs_update_cell(struct afs_cell *cell)
360 {
361 	struct afs_addr_list *alist, *old;
362 	time64_t now, expiry;
363 
364 	_enter("%s", cell->name);
365 
366 	alist = afs_dns_query(cell, &expiry);
367 	if (IS_ERR(alist)) {
368 		switch (PTR_ERR(alist)) {
369 		case -ENODATA:
370 			/* The DNS said that the cell does not exist */
371 			set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
372 			clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
373 			cell->dns_expiry = ktime_get_real_seconds() + 61;
374 			break;
375 
376 		case -EAGAIN:
377 		case -ECONNREFUSED:
378 		default:
379 			set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
380 			cell->dns_expiry = ktime_get_real_seconds() + 10;
381 			break;
382 		}
383 
384 		cell->error = -EDESTADDRREQ;
385 	} else {
386 		clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
387 		clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
388 
389 		/* Exclusion on changing vl_addrs is achieved by a
390 		 * non-reentrant work item.
391 		 */
392 		old = rcu_dereference_protected(cell->vl_addrs, true);
393 		rcu_assign_pointer(cell->vl_addrs, alist);
394 		cell->dns_expiry = expiry;
395 
396 		if (old)
397 			afs_put_addrlist(old);
398 	}
399 
400 	if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags))
401 		wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET);
402 
403 	now = ktime_get_real_seconds();
404 	afs_set_cell_timer(cell->net, cell->dns_expiry - now);
405 	_leave("");
406 }
407 
408 /*
409  * Destroy a cell record
410  */
411 static void afs_cell_destroy(struct rcu_head *rcu)
412 {
413 	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
414 
415 	_enter("%p{%s}", cell, cell->name);
416 
417 	ASSERTCMP(atomic_read(&cell->usage), ==, 0);
418 
419 	afs_put_addrlist(cell->vl_addrs);
420 	key_put(cell->anonymous_key);
421 	kfree(cell);
422 
423 	_leave(" [destroyed]");
424 }
425 
426 /*
427  * Queue the cell manager.
428  */
429 static void afs_queue_cell_manager(struct afs_net *net)
430 {
431 	int outstanding = atomic_inc_return(&net->cells_outstanding);
432 
433 	_enter("%d", outstanding);
434 
435 	if (!queue_work(afs_wq, &net->cells_manager))
436 		afs_dec_cells_outstanding(net);
437 }
438 
439 /*
440  * Cell management timer.  We have an increment on cells_outstanding that we
441  * need to pass along to the work item.
442  */
443 void afs_cells_timer(struct timer_list *timer)
444 {
445 	struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
446 
447 	_enter("");
448 	if (!queue_work(afs_wq, &net->cells_manager))
449 		afs_dec_cells_outstanding(net);
450 }
451 
452 /*
453  * Get a reference on a cell record.
454  */
455 struct afs_cell *afs_get_cell(struct afs_cell *cell)
456 {
457 	atomic_inc(&cell->usage);
458 	return cell;
459 }
460 
461 /*
462  * Drop a reference on a cell record.
463  */
464 void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
465 {
466 	time64_t now, expire_delay;
467 
468 	if (!cell)
469 		return;
470 
471 	_enter("%s", cell->name);
472 
473 	now = ktime_get_real_seconds();
474 	cell->last_inactive = now;
475 	expire_delay = 0;
476 	if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
477 	    !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
478 		expire_delay = afs_cell_gc_delay;
479 
480 	if (atomic_dec_return(&cell->usage) > 1)
481 		return;
482 
483 	/* 'cell' may now be garbage collected. */
484 	afs_set_cell_timer(net, expire_delay);
485 }
486 
487 /*
488  * Allocate a key to use as a placeholder for anonymous user security.
489  */
490 static int afs_alloc_anon_key(struct afs_cell *cell)
491 {
492 	struct key *key;
493 	char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
494 
495 	/* Create a key to represent an anonymous user. */
496 	memcpy(keyname, "afs@", 4);
497 	dp = keyname + 4;
498 	cp = cell->name;
499 	do {
500 		*dp++ = tolower(*cp);
501 	} while (*cp++);
502 
503 	key = rxrpc_get_null_key(keyname);
504 	if (IS_ERR(key))
505 		return PTR_ERR(key);
506 
507 	cell->anonymous_key = key;
508 
509 	_debug("anon key %p{%x}",
510 	       cell->anonymous_key, key_serial(cell->anonymous_key));
511 	return 0;
512 }
513 
514 /*
515  * Activate a cell.
516  */
517 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
518 {
519 	int ret;
520 
521 	if (!cell->anonymous_key) {
522 		ret = afs_alloc_anon_key(cell);
523 		if (ret < 0)
524 			return ret;
525 	}
526 
527 #ifdef CONFIG_AFS_FSCACHE
528 	cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
529 					     &afs_cell_cache_index_def,
530 					     cell, true);
531 #endif
532 	ret = afs_proc_cell_setup(net, cell);
533 	if (ret < 0)
534 		return ret;
535 	spin_lock(&net->proc_cells_lock);
536 	list_add_tail(&cell->proc_link, &net->proc_cells);
537 	spin_unlock(&net->proc_cells_lock);
538 	return 0;
539 }
540 
541 /*
542  * Deactivate a cell.
543  */
544 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
545 {
546 	_enter("%s", cell->name);
547 
548 	afs_proc_cell_remove(net, cell);
549 
550 	spin_lock(&net->proc_cells_lock);
551 	list_del_init(&cell->proc_link);
552 	spin_unlock(&net->proc_cells_lock);
553 
554 #ifdef CONFIG_AFS_FSCACHE
555 	fscache_relinquish_cookie(cell->cache, 0);
556 	cell->cache = NULL;
557 #endif
558 
559 	_leave("");
560 }
561 
562 /*
563  * Manage a cell record, initialising and destroying it, maintaining its DNS
564  * records.
565  */
566 static void afs_manage_cell(struct work_struct *work)
567 {
568 	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
569 	struct afs_net *net = cell->net;
570 	bool deleted;
571 	int ret, usage;
572 
573 	_enter("%s", cell->name);
574 
575 again:
576 	_debug("state %u", cell->state);
577 	switch (cell->state) {
578 	case AFS_CELL_INACTIVE:
579 	case AFS_CELL_FAILED:
580 		write_seqlock(&net->cells_lock);
581 		usage = 1;
582 		deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
583 		if (deleted)
584 			rb_erase(&cell->net_node, &net->cells);
585 		write_sequnlock(&net->cells_lock);
586 		if (deleted)
587 			goto final_destruction;
588 		if (cell->state == AFS_CELL_FAILED)
589 			goto done;
590 		cell->state = AFS_CELL_UNSET;
591 		goto again;
592 
593 	case AFS_CELL_UNSET:
594 		cell->state = AFS_CELL_ACTIVATING;
595 		goto again;
596 
597 	case AFS_CELL_ACTIVATING:
598 		ret = afs_activate_cell(net, cell);
599 		if (ret < 0)
600 			goto activation_failed;
601 
602 		cell->state = AFS_CELL_ACTIVE;
603 		smp_wmb();
604 		clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
605 		wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
606 		goto again;
607 
608 	case AFS_CELL_ACTIVE:
609 		if (atomic_read(&cell->usage) > 1) {
610 			time64_t now = ktime_get_real_seconds();
611 			if (cell->dns_expiry <= now && net->live)
612 				afs_update_cell(cell);
613 			goto done;
614 		}
615 		cell->state = AFS_CELL_DEACTIVATING;
616 		goto again;
617 
618 	case AFS_CELL_DEACTIVATING:
619 		set_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
620 		if (atomic_read(&cell->usage) > 1)
621 			goto reverse_deactivation;
622 		afs_deactivate_cell(net, cell);
623 		cell->state = AFS_CELL_INACTIVE;
624 		goto again;
625 
626 	default:
627 		break;
628 	}
629 	_debug("bad state %u", cell->state);
630 	BUG(); /* Unhandled state */
631 
632 activation_failed:
633 	cell->error = ret;
634 	afs_deactivate_cell(net, cell);
635 
636 	cell->state = AFS_CELL_FAILED;
637 	smp_wmb();
638 	if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags))
639 		wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
640 	goto again;
641 
642 reverse_deactivation:
643 	cell->state = AFS_CELL_ACTIVE;
644 	smp_wmb();
645 	clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
646 	wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
647 	_leave(" [deact->act]");
648 	return;
649 
650 done:
651 	_leave(" [done %u]", cell->state);
652 	return;
653 
654 final_destruction:
655 	call_rcu(&cell->rcu, afs_cell_destroy);
656 	afs_dec_cells_outstanding(net);
657 	_leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
658 }
659 
660 /*
661  * Manage the records of cells known to a network namespace.  This includes
662  * updating the DNS records and garbage collecting unused cells that were
663  * automatically added.
664  *
665  * Note that constructed cell records may only be removed from net->cells by
666  * this work item, so it is safe for this work item to stash a cursor pointing
667  * into the tree and then return to caller (provided it skips cells that are
668  * still under construction).
669  *
670  * Note also that we were given an increment on net->cells_outstanding by
671  * whoever queued us that we need to deal with before returning.
672  */
673 void afs_manage_cells(struct work_struct *work)
674 {
675 	struct afs_net *net = container_of(work, struct afs_net, cells_manager);
676 	struct rb_node *cursor;
677 	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
678 	bool purging = !net->live;
679 
680 	_enter("");
681 
682 	/* Trawl the cell database looking for cells that have expired from
683 	 * lack of use and cells whose DNS results have expired and dispatch
684 	 * their managers.
685 	 */
686 	read_seqlock_excl(&net->cells_lock);
687 
688 	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
689 		struct afs_cell *cell =
690 			rb_entry(cursor, struct afs_cell, net_node);
691 		unsigned usage;
692 		bool sched_cell = false;
693 
694 		usage = atomic_read(&cell->usage);
695 		_debug("manage %s %u", cell->name, usage);
696 
697 		ASSERTCMP(usage, >=, 1);
698 
699 		if (purging) {
700 			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
701 				usage = atomic_dec_return(&cell->usage);
702 			ASSERTCMP(usage, ==, 1);
703 		}
704 
705 		if (usage == 1) {
706 			time64_t expire_at = cell->last_inactive;
707 
708 			if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
709 			    !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
710 				expire_at += afs_cell_gc_delay;
711 			if (purging || expire_at <= now)
712 				sched_cell = true;
713 			else if (expire_at < next_manage)
714 				next_manage = expire_at;
715 		}
716 
717 		if (!purging) {
718 			if (cell->dns_expiry <= now)
719 				sched_cell = true;
720 			else if (cell->dns_expiry <= next_manage)
721 				next_manage = cell->dns_expiry;
722 		}
723 
724 		if (sched_cell)
725 			queue_work(afs_wq, &cell->manager);
726 	}
727 
728 	read_sequnlock_excl(&net->cells_lock);
729 
730 	/* Update the timer on the way out.  We have to pass an increment on
731 	 * cells_outstanding in the namespace that we are in to the timer or
732 	 * the work scheduler.
733 	 */
734 	if (!purging && next_manage < TIME64_MAX) {
735 		now = ktime_get_real_seconds();
736 
737 		if (next_manage - now <= 0) {
738 			if (queue_work(afs_wq, &net->cells_manager))
739 				atomic_inc(&net->cells_outstanding);
740 		} else {
741 			afs_set_cell_timer(net, next_manage - now);
742 		}
743 	}
744 
745 	afs_dec_cells_outstanding(net);
746 	_leave(" [%d]", atomic_read(&net->cells_outstanding));
747 }
748 
749 /*
750  * Purge in-memory cell database.
751  */
752 void afs_cell_purge(struct afs_net *net)
753 {
754 	struct afs_cell *ws;
755 
756 	_enter("");
757 
758 	write_seqlock(&net->cells_lock);
759 	ws = net->ws_cell;
760 	net->ws_cell = NULL;
761 	write_sequnlock(&net->cells_lock);
762 	afs_put_cell(net, ws);
763 
764 	_debug("del timer");
765 	if (del_timer_sync(&net->cells_timer))
766 		atomic_dec(&net->cells_outstanding);
767 
768 	_debug("kick mgr");
769 	afs_queue_cell_manager(net);
770 
771 	_debug("wait");
772 	wait_on_atomic_t(&net->cells_outstanding, atomic_t_wait,
773 			 TASK_UNINTERRUPTIBLE);
774 	_leave("");
775 }
776