xref: /openbmc/linux/net/sunrpc/cache.c (revision a09d2831)
1 /*
2  * net/sunrpc/cache.c
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12 
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
36 
37 #define	 RPCDBG_FACILITY RPCDBG_CACHE
38 
39 static int cache_defer_req(struct cache_req *req, struct cache_head *item);
40 static void cache_revisit_request(struct cache_head *item);
41 
42 static void cache_init(struct cache_head *h)
43 {
44 	time_t now = get_seconds();
45 	h->next = NULL;
46 	h->flags = 0;
47 	kref_init(&h->ref);
48 	h->expiry_time = now + CACHE_NEW_EXPIRY;
49 	h->last_refresh = now;
50 }
51 
52 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
53 				       struct cache_head *key, int hash)
54 {
55 	struct cache_head **head,  **hp;
56 	struct cache_head *new = NULL;
57 
58 	head = &detail->hash_table[hash];
59 
60 	read_lock(&detail->hash_lock);
61 
62 	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
63 		struct cache_head *tmp = *hp;
64 		if (detail->match(tmp, key)) {
65 			cache_get(tmp);
66 			read_unlock(&detail->hash_lock);
67 			return tmp;
68 		}
69 	}
70 	read_unlock(&detail->hash_lock);
71 	/* Didn't find anything, insert an empty entry */
72 
73 	new = detail->alloc();
74 	if (!new)
75 		return NULL;
76 	/* must fully initialise 'new', else
77 	 * we might get lose if we need to
78 	 * cache_put it soon.
79 	 */
80 	cache_init(new);
81 	detail->init(new, key);
82 
83 	write_lock(&detail->hash_lock);
84 
85 	/* check if entry appeared while we slept */
86 	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
87 		struct cache_head *tmp = *hp;
88 		if (detail->match(tmp, key)) {
89 			cache_get(tmp);
90 			write_unlock(&detail->hash_lock);
91 			cache_put(new, detail);
92 			return tmp;
93 		}
94 	}
95 	new->next = *head;
96 	*head = new;
97 	detail->entries++;
98 	cache_get(new);
99 	write_unlock(&detail->hash_lock);
100 
101 	return new;
102 }
103 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
104 
105 
106 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
107 
108 static void cache_fresh_locked(struct cache_head *head, time_t expiry)
109 {
110 	head->expiry_time = expiry;
111 	head->last_refresh = get_seconds();
112 	set_bit(CACHE_VALID, &head->flags);
113 }
114 
115 static void cache_fresh_unlocked(struct cache_head *head,
116 				 struct cache_detail *detail)
117 {
118 	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
119 		cache_revisit_request(head);
120 		cache_dequeue(detail, head);
121 	}
122 }
123 
124 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
125 				       struct cache_head *new, struct cache_head *old, int hash)
126 {
127 	/* The 'old' entry is to be replaced by 'new'.
128 	 * If 'old' is not VALID, we update it directly,
129 	 * otherwise we need to replace it
130 	 */
131 	struct cache_head **head;
132 	struct cache_head *tmp;
133 
134 	if (!test_bit(CACHE_VALID, &old->flags)) {
135 		write_lock(&detail->hash_lock);
136 		if (!test_bit(CACHE_VALID, &old->flags)) {
137 			if (test_bit(CACHE_NEGATIVE, &new->flags))
138 				set_bit(CACHE_NEGATIVE, &old->flags);
139 			else
140 				detail->update(old, new);
141 			cache_fresh_locked(old, new->expiry_time);
142 			write_unlock(&detail->hash_lock);
143 			cache_fresh_unlocked(old, detail);
144 			return old;
145 		}
146 		write_unlock(&detail->hash_lock);
147 	}
148 	/* We need to insert a new entry */
149 	tmp = detail->alloc();
150 	if (!tmp) {
151 		cache_put(old, detail);
152 		return NULL;
153 	}
154 	cache_init(tmp);
155 	detail->init(tmp, old);
156 	head = &detail->hash_table[hash];
157 
158 	write_lock(&detail->hash_lock);
159 	if (test_bit(CACHE_NEGATIVE, &new->flags))
160 		set_bit(CACHE_NEGATIVE, &tmp->flags);
161 	else
162 		detail->update(tmp, new);
163 	tmp->next = *head;
164 	*head = tmp;
165 	detail->entries++;
166 	cache_get(tmp);
167 	cache_fresh_locked(tmp, new->expiry_time);
168 	cache_fresh_locked(old, 0);
169 	write_unlock(&detail->hash_lock);
170 	cache_fresh_unlocked(tmp, detail);
171 	cache_fresh_unlocked(old, detail);
172 	cache_put(old, detail);
173 	return tmp;
174 }
175 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
176 
177 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
178 {
179 	if (!cd->cache_upcall)
180 		return -EINVAL;
181 	return cd->cache_upcall(cd, h);
182 }
183 
184 static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
185 {
186 	if (!test_bit(CACHE_VALID, &h->flags) ||
187 	    h->expiry_time < get_seconds())
188 		return -EAGAIN;
189 	else if (detail->flush_time > h->last_refresh)
190 		return -EAGAIN;
191 	else {
192 		/* entry is valid */
193 		if (test_bit(CACHE_NEGATIVE, &h->flags))
194 			return -ENOENT;
195 		else
196 			return 0;
197 	}
198 }
199 
200 /*
201  * This is the generic cache management routine for all
202  * the authentication caches.
203  * It checks the currency of a cache item and will (later)
204  * initiate an upcall to fill it if needed.
205  *
206  *
207  * Returns 0 if the cache_head can be used, or cache_puts it and returns
208  * -EAGAIN if upcall is pending and request has been queued
209  * -ETIMEDOUT if upcall failed or request could not be queue or
210  *           upcall completed but item is still invalid (implying that
211  *           the cache item has been replaced with a newer one).
212  * -ENOENT if cache entry was negative
213  */
214 int cache_check(struct cache_detail *detail,
215 		    struct cache_head *h, struct cache_req *rqstp)
216 {
217 	int rv;
218 	long refresh_age, age;
219 
220 	/* First decide return status as best we can */
221 	rv = cache_is_valid(detail, h);
222 
223 	/* now see if we want to start an upcall */
224 	refresh_age = (h->expiry_time - h->last_refresh);
225 	age = get_seconds() - h->last_refresh;
226 
227 	if (rqstp == NULL) {
228 		if (rv == -EAGAIN)
229 			rv = -ENOENT;
230 	} else if (rv == -EAGAIN || age > refresh_age/2) {
231 		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
232 				refresh_age, age);
233 		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
234 			switch (cache_make_upcall(detail, h)) {
235 			case -EINVAL:
236 				clear_bit(CACHE_PENDING, &h->flags);
237 				cache_revisit_request(h);
238 				if (rv == -EAGAIN) {
239 					set_bit(CACHE_NEGATIVE, &h->flags);
240 					cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY);
241 					cache_fresh_unlocked(h, detail);
242 					rv = -ENOENT;
243 				}
244 				break;
245 
246 			case -EAGAIN:
247 				clear_bit(CACHE_PENDING, &h->flags);
248 				cache_revisit_request(h);
249 				break;
250 			}
251 		}
252 	}
253 
254 	if (rv == -EAGAIN) {
255 		if (cache_defer_req(rqstp, h) < 0) {
256 			/* Request is not deferred */
257 			rv = cache_is_valid(detail, h);
258 			if (rv == -EAGAIN)
259 				rv = -ETIMEDOUT;
260 		}
261 	}
262 	if (rv)
263 		cache_put(h, detail);
264 	return rv;
265 }
266 EXPORT_SYMBOL_GPL(cache_check);
267 
268 /*
269  * caches need to be periodically cleaned.
270  * For this we maintain a list of cache_detail and
271  * a current pointer into that list and into the table
272  * for that entry.
273  *
274  * Each time clean_cache is called it finds the next non-empty entry
275  * in the current table and walks the list in that entry
276  * looking for entries that can be removed.
277  *
278  * An entry gets removed if:
279  * - The expiry is before current time
280  * - The last_refresh time is before the flush_time for that cache
281  *
282  * later we might drop old entries with non-NEVER expiry if that table
283  * is getting 'full' for some definition of 'full'
284  *
285  * The question of "how often to scan a table" is an interesting one
286  * and is answered in part by the use of the "nextcheck" field in the
287  * cache_detail.
288  * When a scan of a table begins, the nextcheck field is set to a time
289  * that is well into the future.
290  * While scanning, if an expiry time is found that is earlier than the
291  * current nextcheck time, nextcheck is set to that expiry time.
292  * If the flush_time is ever set to a time earlier than the nextcheck
293  * time, the nextcheck time is then set to that flush_time.
294  *
295  * A table is then only scanned if the current time is at least
296  * the nextcheck time.
297  *
298  */
299 
300 static LIST_HEAD(cache_list);
301 static DEFINE_SPINLOCK(cache_list_lock);
302 static struct cache_detail *current_detail;
303 static int current_index;
304 
305 static void do_cache_clean(struct work_struct *work);
306 static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
307 
308 static void sunrpc_init_cache_detail(struct cache_detail *cd)
309 {
310 	rwlock_init(&cd->hash_lock);
311 	INIT_LIST_HEAD(&cd->queue);
312 	spin_lock(&cache_list_lock);
313 	cd->nextcheck = 0;
314 	cd->entries = 0;
315 	atomic_set(&cd->readers, 0);
316 	cd->last_close = 0;
317 	cd->last_warn = -1;
318 	list_add(&cd->others, &cache_list);
319 	spin_unlock(&cache_list_lock);
320 
321 	/* start the cleaning process */
322 	schedule_delayed_work(&cache_cleaner, 0);
323 }
324 
325 static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
326 {
327 	cache_purge(cd);
328 	spin_lock(&cache_list_lock);
329 	write_lock(&cd->hash_lock);
330 	if (cd->entries || atomic_read(&cd->inuse)) {
331 		write_unlock(&cd->hash_lock);
332 		spin_unlock(&cache_list_lock);
333 		goto out;
334 	}
335 	if (current_detail == cd)
336 		current_detail = NULL;
337 	list_del_init(&cd->others);
338 	write_unlock(&cd->hash_lock);
339 	spin_unlock(&cache_list_lock);
340 	if (list_empty(&cache_list)) {
341 		/* module must be being unloaded so its safe to kill the worker */
342 		cancel_delayed_work_sync(&cache_cleaner);
343 	}
344 	return;
345 out:
346 	printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
347 }
348 
349 /* clean cache tries to find something to clean
350  * and cleans it.
351  * It returns 1 if it cleaned something,
352  *            0 if it didn't find anything this time
353  *           -1 if it fell off the end of the list.
354  */
355 static int cache_clean(void)
356 {
357 	int rv = 0;
358 	struct list_head *next;
359 
360 	spin_lock(&cache_list_lock);
361 
362 	/* find a suitable table if we don't already have one */
363 	while (current_detail == NULL ||
364 	    current_index >= current_detail->hash_size) {
365 		if (current_detail)
366 			next = current_detail->others.next;
367 		else
368 			next = cache_list.next;
369 		if (next == &cache_list) {
370 			current_detail = NULL;
371 			spin_unlock(&cache_list_lock);
372 			return -1;
373 		}
374 		current_detail = list_entry(next, struct cache_detail, others);
375 		if (current_detail->nextcheck > get_seconds())
376 			current_index = current_detail->hash_size;
377 		else {
378 			current_index = 0;
379 			current_detail->nextcheck = get_seconds()+30*60;
380 		}
381 	}
382 
383 	/* find a non-empty bucket in the table */
384 	while (current_detail &&
385 	       current_index < current_detail->hash_size &&
386 	       current_detail->hash_table[current_index] == NULL)
387 		current_index++;
388 
389 	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
390 
391 	if (current_detail && current_index < current_detail->hash_size) {
392 		struct cache_head *ch, **cp;
393 		struct cache_detail *d;
394 
395 		write_lock(&current_detail->hash_lock);
396 
397 		/* Ok, now to clean this strand */
398 
399 		cp = & current_detail->hash_table[current_index];
400 		ch = *cp;
401 		for (; ch; cp= & ch->next, ch= *cp) {
402 			if (current_detail->nextcheck > ch->expiry_time)
403 				current_detail->nextcheck = ch->expiry_time+1;
404 			if (ch->expiry_time >= get_seconds() &&
405 			    ch->last_refresh >= current_detail->flush_time)
406 				continue;
407 			if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
408 				cache_dequeue(current_detail, ch);
409 
410 			if (atomic_read(&ch->ref.refcount) == 1)
411 				break;
412 		}
413 		if (ch) {
414 			*cp = ch->next;
415 			ch->next = NULL;
416 			current_detail->entries--;
417 			rv = 1;
418 		}
419 		write_unlock(&current_detail->hash_lock);
420 		d = current_detail;
421 		if (!ch)
422 			current_index ++;
423 		spin_unlock(&cache_list_lock);
424 		if (ch) {
425 			cache_revisit_request(ch);
426 			cache_put(ch, d);
427 		}
428 	} else
429 		spin_unlock(&cache_list_lock);
430 
431 	return rv;
432 }
433 
434 /*
435  * We want to regularly clean the cache, so we need to schedule some work ...
436  */
437 static void do_cache_clean(struct work_struct *work)
438 {
439 	int delay = 5;
440 	if (cache_clean() == -1)
441 		delay = round_jiffies_relative(30*HZ);
442 
443 	if (list_empty(&cache_list))
444 		delay = 0;
445 
446 	if (delay)
447 		schedule_delayed_work(&cache_cleaner, delay);
448 }
449 
450 
451 /*
452  * Clean all caches promptly.  This just calls cache_clean
453  * repeatedly until we are sure that every cache has had a chance to
454  * be fully cleaned
455  */
456 void cache_flush(void)
457 {
458 	while (cache_clean() != -1)
459 		cond_resched();
460 	while (cache_clean() != -1)
461 		cond_resched();
462 }
463 EXPORT_SYMBOL_GPL(cache_flush);
464 
465 void cache_purge(struct cache_detail *detail)
466 {
467 	detail->flush_time = LONG_MAX;
468 	detail->nextcheck = get_seconds();
469 	cache_flush();
470 	detail->flush_time = 1;
471 }
472 EXPORT_SYMBOL_GPL(cache_purge);
473 
474 
475 /*
476  * Deferral and Revisiting of Requests.
477  *
478  * If a cache lookup finds a pending entry, we
479  * need to defer the request and revisit it later.
480  * All deferred requests are stored in a hash table,
481  * indexed by "struct cache_head *".
482  * As it may be wasteful to store a whole request
483  * structure, we allow the request to provide a
484  * deferred form, which must contain a
485  * 'struct cache_deferred_req'
486  * This cache_deferred_req contains a method to allow
487  * it to be revisited when cache info is available
488  */
489 
490 #define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
491 #define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
492 
493 #define	DFR_MAX	300	/* ??? */
494 
495 static DEFINE_SPINLOCK(cache_defer_lock);
496 static LIST_HEAD(cache_defer_list);
497 static struct list_head cache_defer_hash[DFR_HASHSIZE];
498 static int cache_defer_cnt;
499 
500 static int cache_defer_req(struct cache_req *req, struct cache_head *item)
501 {
502 	struct cache_deferred_req *dreq, *discard;
503 	int hash = DFR_HASH(item);
504 
505 	if (cache_defer_cnt >= DFR_MAX) {
506 		/* too much in the cache, randomly drop this one,
507 		 * or continue and drop the oldest below
508 		 */
509 		if (net_random()&1)
510 			return -ENOMEM;
511 	}
512 	dreq = req->defer(req);
513 	if (dreq == NULL)
514 		return -ENOMEM;
515 
516 	dreq->item = item;
517 
518 	spin_lock(&cache_defer_lock);
519 
520 	list_add(&dreq->recent, &cache_defer_list);
521 
522 	if (cache_defer_hash[hash].next == NULL)
523 		INIT_LIST_HEAD(&cache_defer_hash[hash]);
524 	list_add(&dreq->hash, &cache_defer_hash[hash]);
525 
526 	/* it is in, now maybe clean up */
527 	discard = NULL;
528 	if (++cache_defer_cnt > DFR_MAX) {
529 		discard = list_entry(cache_defer_list.prev,
530 				     struct cache_deferred_req, recent);
531 		list_del_init(&discard->recent);
532 		list_del_init(&discard->hash);
533 		cache_defer_cnt--;
534 	}
535 	spin_unlock(&cache_defer_lock);
536 
537 	if (discard)
538 		/* there was one too many */
539 		discard->revisit(discard, 1);
540 
541 	if (!test_bit(CACHE_PENDING, &item->flags)) {
542 		/* must have just been validated... */
543 		cache_revisit_request(item);
544 		return -EAGAIN;
545 	}
546 	return 0;
547 }
548 
549 static void cache_revisit_request(struct cache_head *item)
550 {
551 	struct cache_deferred_req *dreq;
552 	struct list_head pending;
553 
554 	struct list_head *lp;
555 	int hash = DFR_HASH(item);
556 
557 	INIT_LIST_HEAD(&pending);
558 	spin_lock(&cache_defer_lock);
559 
560 	lp = cache_defer_hash[hash].next;
561 	if (lp) {
562 		while (lp != &cache_defer_hash[hash]) {
563 			dreq = list_entry(lp, struct cache_deferred_req, hash);
564 			lp = lp->next;
565 			if (dreq->item == item) {
566 				list_del_init(&dreq->hash);
567 				list_move(&dreq->recent, &pending);
568 				cache_defer_cnt--;
569 			}
570 		}
571 	}
572 	spin_unlock(&cache_defer_lock);
573 
574 	while (!list_empty(&pending)) {
575 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
576 		list_del_init(&dreq->recent);
577 		dreq->revisit(dreq, 0);
578 	}
579 }
580 
581 void cache_clean_deferred(void *owner)
582 {
583 	struct cache_deferred_req *dreq, *tmp;
584 	struct list_head pending;
585 
586 
587 	INIT_LIST_HEAD(&pending);
588 	spin_lock(&cache_defer_lock);
589 
590 	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
591 		if (dreq->owner == owner) {
592 			list_del_init(&dreq->hash);
593 			list_move(&dreq->recent, &pending);
594 			cache_defer_cnt--;
595 		}
596 	}
597 	spin_unlock(&cache_defer_lock);
598 
599 	while (!list_empty(&pending)) {
600 		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
601 		list_del_init(&dreq->recent);
602 		dreq->revisit(dreq, 1);
603 	}
604 }
605 
606 /*
607  * communicate with user-space
608  *
609  * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
610  * On read, you get a full request, or block.
611  * On write, an update request is processed.
612  * Poll works if anything to read, and always allows write.
613  *
614  * Implemented by linked list of requests.  Each open file has
615  * a ->private that also exists in this list.  New requests are added
616  * to the end and may wakeup and preceding readers.
617  * New readers are added to the head.  If, on read, an item is found with
618  * CACHE_UPCALLING clear, we free it from the list.
619  *
620  */
621 
622 static DEFINE_SPINLOCK(queue_lock);
623 static DEFINE_MUTEX(queue_io_mutex);
624 
625 struct cache_queue {
626 	struct list_head	list;
627 	int			reader;	/* if 0, then request */
628 };
629 struct cache_request {
630 	struct cache_queue	q;
631 	struct cache_head	*item;
632 	char			* buf;
633 	int			len;
634 	int			readers;
635 };
636 struct cache_reader {
637 	struct cache_queue	q;
638 	int			offset;	/* if non-0, we have a refcnt on next request */
639 };
640 
641 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
642 			  loff_t *ppos, struct cache_detail *cd)
643 {
644 	struct cache_reader *rp = filp->private_data;
645 	struct cache_request *rq;
646 	struct inode *inode = filp->f_path.dentry->d_inode;
647 	int err;
648 
649 	if (count == 0)
650 		return 0;
651 
652 	mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
653 			      * readers on this file */
654  again:
655 	spin_lock(&queue_lock);
656 	/* need to find next request */
657 	while (rp->q.list.next != &cd->queue &&
658 	       list_entry(rp->q.list.next, struct cache_queue, list)
659 	       ->reader) {
660 		struct list_head *next = rp->q.list.next;
661 		list_move(&rp->q.list, next);
662 	}
663 	if (rp->q.list.next == &cd->queue) {
664 		spin_unlock(&queue_lock);
665 		mutex_unlock(&inode->i_mutex);
666 		BUG_ON(rp->offset);
667 		return 0;
668 	}
669 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
670 	BUG_ON(rq->q.reader);
671 	if (rp->offset == 0)
672 		rq->readers++;
673 	spin_unlock(&queue_lock);
674 
675 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
676 		err = -EAGAIN;
677 		spin_lock(&queue_lock);
678 		list_move(&rp->q.list, &rq->q.list);
679 		spin_unlock(&queue_lock);
680 	} else {
681 		if (rp->offset + count > rq->len)
682 			count = rq->len - rp->offset;
683 		err = -EFAULT;
684 		if (copy_to_user(buf, rq->buf + rp->offset, count))
685 			goto out;
686 		rp->offset += count;
687 		if (rp->offset >= rq->len) {
688 			rp->offset = 0;
689 			spin_lock(&queue_lock);
690 			list_move(&rp->q.list, &rq->q.list);
691 			spin_unlock(&queue_lock);
692 		}
693 		err = 0;
694 	}
695  out:
696 	if (rp->offset == 0) {
697 		/* need to release rq */
698 		spin_lock(&queue_lock);
699 		rq->readers--;
700 		if (rq->readers == 0 &&
701 		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
702 			list_del(&rq->q.list);
703 			spin_unlock(&queue_lock);
704 			cache_put(rq->item, cd);
705 			kfree(rq->buf);
706 			kfree(rq);
707 		} else
708 			spin_unlock(&queue_lock);
709 	}
710 	if (err == -EAGAIN)
711 		goto again;
712 	mutex_unlock(&inode->i_mutex);
713 	return err ? err :  count;
714 }
715 
716 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
717 				 size_t count, struct cache_detail *cd)
718 {
719 	ssize_t ret;
720 
721 	if (copy_from_user(kaddr, buf, count))
722 		return -EFAULT;
723 	kaddr[count] = '\0';
724 	ret = cd->cache_parse(cd, kaddr, count);
725 	if (!ret)
726 		ret = count;
727 	return ret;
728 }
729 
730 static ssize_t cache_slow_downcall(const char __user *buf,
731 				   size_t count, struct cache_detail *cd)
732 {
733 	static char write_buf[8192]; /* protected by queue_io_mutex */
734 	ssize_t ret = -EINVAL;
735 
736 	if (count >= sizeof(write_buf))
737 		goto out;
738 	mutex_lock(&queue_io_mutex);
739 	ret = cache_do_downcall(write_buf, buf, count, cd);
740 	mutex_unlock(&queue_io_mutex);
741 out:
742 	return ret;
743 }
744 
745 static ssize_t cache_downcall(struct address_space *mapping,
746 			      const char __user *buf,
747 			      size_t count, struct cache_detail *cd)
748 {
749 	struct page *page;
750 	char *kaddr;
751 	ssize_t ret = -ENOMEM;
752 
753 	if (count >= PAGE_CACHE_SIZE)
754 		goto out_slow;
755 
756 	page = find_or_create_page(mapping, 0, GFP_KERNEL);
757 	if (!page)
758 		goto out_slow;
759 
760 	kaddr = kmap(page);
761 	ret = cache_do_downcall(kaddr, buf, count, cd);
762 	kunmap(page);
763 	unlock_page(page);
764 	page_cache_release(page);
765 	return ret;
766 out_slow:
767 	return cache_slow_downcall(buf, count, cd);
768 }
769 
770 static ssize_t cache_write(struct file *filp, const char __user *buf,
771 			   size_t count, loff_t *ppos,
772 			   struct cache_detail *cd)
773 {
774 	struct address_space *mapping = filp->f_mapping;
775 	struct inode *inode = filp->f_path.dentry->d_inode;
776 	ssize_t ret = -EINVAL;
777 
778 	if (!cd->cache_parse)
779 		goto out;
780 
781 	mutex_lock(&inode->i_mutex);
782 	ret = cache_downcall(mapping, buf, count, cd);
783 	mutex_unlock(&inode->i_mutex);
784 out:
785 	return ret;
786 }
787 
788 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
789 
790 static unsigned int cache_poll(struct file *filp, poll_table *wait,
791 			       struct cache_detail *cd)
792 {
793 	unsigned int mask;
794 	struct cache_reader *rp = filp->private_data;
795 	struct cache_queue *cq;
796 
797 	poll_wait(filp, &queue_wait, wait);
798 
799 	/* alway allow write */
800 	mask = POLL_OUT | POLLWRNORM;
801 
802 	if (!rp)
803 		return mask;
804 
805 	spin_lock(&queue_lock);
806 
807 	for (cq= &rp->q; &cq->list != &cd->queue;
808 	     cq = list_entry(cq->list.next, struct cache_queue, list))
809 		if (!cq->reader) {
810 			mask |= POLLIN | POLLRDNORM;
811 			break;
812 		}
813 	spin_unlock(&queue_lock);
814 	return mask;
815 }
816 
817 static int cache_ioctl(struct inode *ino, struct file *filp,
818 		       unsigned int cmd, unsigned long arg,
819 		       struct cache_detail *cd)
820 {
821 	int len = 0;
822 	struct cache_reader *rp = filp->private_data;
823 	struct cache_queue *cq;
824 
825 	if (cmd != FIONREAD || !rp)
826 		return -EINVAL;
827 
828 	spin_lock(&queue_lock);
829 
830 	/* only find the length remaining in current request,
831 	 * or the length of the next request
832 	 */
833 	for (cq= &rp->q; &cq->list != &cd->queue;
834 	     cq = list_entry(cq->list.next, struct cache_queue, list))
835 		if (!cq->reader) {
836 			struct cache_request *cr =
837 				container_of(cq, struct cache_request, q);
838 			len = cr->len - rp->offset;
839 			break;
840 		}
841 	spin_unlock(&queue_lock);
842 
843 	return put_user(len, (int __user *)arg);
844 }
845 
846 static int cache_open(struct inode *inode, struct file *filp,
847 		      struct cache_detail *cd)
848 {
849 	struct cache_reader *rp = NULL;
850 
851 	if (!cd || !try_module_get(cd->owner))
852 		return -EACCES;
853 	nonseekable_open(inode, filp);
854 	if (filp->f_mode & FMODE_READ) {
855 		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
856 		if (!rp)
857 			return -ENOMEM;
858 		rp->offset = 0;
859 		rp->q.reader = 1;
860 		atomic_inc(&cd->readers);
861 		spin_lock(&queue_lock);
862 		list_add(&rp->q.list, &cd->queue);
863 		spin_unlock(&queue_lock);
864 	}
865 	filp->private_data = rp;
866 	return 0;
867 }
868 
869 static int cache_release(struct inode *inode, struct file *filp,
870 			 struct cache_detail *cd)
871 {
872 	struct cache_reader *rp = filp->private_data;
873 
874 	if (rp) {
875 		spin_lock(&queue_lock);
876 		if (rp->offset) {
877 			struct cache_queue *cq;
878 			for (cq= &rp->q; &cq->list != &cd->queue;
879 			     cq = list_entry(cq->list.next, struct cache_queue, list))
880 				if (!cq->reader) {
881 					container_of(cq, struct cache_request, q)
882 						->readers--;
883 					break;
884 				}
885 			rp->offset = 0;
886 		}
887 		list_del(&rp->q.list);
888 		spin_unlock(&queue_lock);
889 
890 		filp->private_data = NULL;
891 		kfree(rp);
892 
893 		cd->last_close = get_seconds();
894 		atomic_dec(&cd->readers);
895 	}
896 	module_put(cd->owner);
897 	return 0;
898 }
899 
900 
901 
902 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
903 {
904 	struct cache_queue *cq;
905 	spin_lock(&queue_lock);
906 	list_for_each_entry(cq, &detail->queue, list)
907 		if (!cq->reader) {
908 			struct cache_request *cr = container_of(cq, struct cache_request, q);
909 			if (cr->item != ch)
910 				continue;
911 			if (cr->readers != 0)
912 				continue;
913 			list_del(&cr->q.list);
914 			spin_unlock(&queue_lock);
915 			cache_put(cr->item, detail);
916 			kfree(cr->buf);
917 			kfree(cr);
918 			return;
919 		}
920 	spin_unlock(&queue_lock);
921 }
922 
923 /*
924  * Support routines for text-based upcalls.
925  * Fields are separated by spaces.
926  * Fields are either mangled to quote space tab newline slosh with slosh
927  * or a hexified with a leading \x
928  * Record is terminated with newline.
929  *
930  */
931 
932 void qword_add(char **bpp, int *lp, char *str)
933 {
934 	char *bp = *bpp;
935 	int len = *lp;
936 	char c;
937 
938 	if (len < 0) return;
939 
940 	while ((c=*str++) && len)
941 		switch(c) {
942 		case ' ':
943 		case '\t':
944 		case '\n':
945 		case '\\':
946 			if (len >= 4) {
947 				*bp++ = '\\';
948 				*bp++ = '0' + ((c & 0300)>>6);
949 				*bp++ = '0' + ((c & 0070)>>3);
950 				*bp++ = '0' + ((c & 0007)>>0);
951 			}
952 			len -= 4;
953 			break;
954 		default:
955 			*bp++ = c;
956 			len--;
957 		}
958 	if (c || len <1) len = -1;
959 	else {
960 		*bp++ = ' ';
961 		len--;
962 	}
963 	*bpp = bp;
964 	*lp = len;
965 }
966 EXPORT_SYMBOL_GPL(qword_add);
967 
968 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
969 {
970 	char *bp = *bpp;
971 	int len = *lp;
972 
973 	if (len < 0) return;
974 
975 	if (len > 2) {
976 		*bp++ = '\\';
977 		*bp++ = 'x';
978 		len -= 2;
979 		while (blen && len >= 2) {
980 			unsigned char c = *buf++;
981 			*bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
982 			*bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
983 			len -= 2;
984 			blen--;
985 		}
986 	}
987 	if (blen || len<1) len = -1;
988 	else {
989 		*bp++ = ' ';
990 		len--;
991 	}
992 	*bpp = bp;
993 	*lp = len;
994 }
995 EXPORT_SYMBOL_GPL(qword_addhex);
996 
997 static void warn_no_listener(struct cache_detail *detail)
998 {
999 	if (detail->last_warn != detail->last_close) {
1000 		detail->last_warn = detail->last_close;
1001 		if (detail->warn_no_listener)
1002 			detail->warn_no_listener(detail, detail->last_close != 0);
1003 	}
1004 }
1005 
1006 /*
1007  * register an upcall request to user-space and queue it up for read() by the
1008  * upcall daemon.
1009  *
1010  * Each request is at most one page long.
1011  */
1012 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1013 		void (*cache_request)(struct cache_detail *,
1014 				      struct cache_head *,
1015 				      char **,
1016 				      int *))
1017 {
1018 
1019 	char *buf;
1020 	struct cache_request *crq;
1021 	char *bp;
1022 	int len;
1023 
1024 	if (atomic_read(&detail->readers) == 0 &&
1025 	    detail->last_close < get_seconds() - 30) {
1026 			warn_no_listener(detail);
1027 			return -EINVAL;
1028 	}
1029 
1030 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1031 	if (!buf)
1032 		return -EAGAIN;
1033 
1034 	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1035 	if (!crq) {
1036 		kfree(buf);
1037 		return -EAGAIN;
1038 	}
1039 
1040 	bp = buf; len = PAGE_SIZE;
1041 
1042 	cache_request(detail, h, &bp, &len);
1043 
1044 	if (len < 0) {
1045 		kfree(buf);
1046 		kfree(crq);
1047 		return -EAGAIN;
1048 	}
1049 	crq->q.reader = 0;
1050 	crq->item = cache_get(h);
1051 	crq->buf = buf;
1052 	crq->len = PAGE_SIZE - len;
1053 	crq->readers = 0;
1054 	spin_lock(&queue_lock);
1055 	list_add_tail(&crq->q.list, &detail->queue);
1056 	spin_unlock(&queue_lock);
1057 	wake_up(&queue_wait);
1058 	return 0;
1059 }
1060 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1061 
1062 /*
1063  * parse a message from user-space and pass it
1064  * to an appropriate cache
1065  * Messages are, like requests, separated into fields by
1066  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1067  *
1068  * Message is
1069  *   reply cachename expiry key ... content....
1070  *
1071  * key and content are both parsed by cache
1072  */
1073 
1074 #define isodigit(c) (isdigit(c) && c <= '7')
1075 int qword_get(char **bpp, char *dest, int bufsize)
1076 {
1077 	/* return bytes copied, or -1 on error */
1078 	char *bp = *bpp;
1079 	int len = 0;
1080 
1081 	while (*bp == ' ') bp++;
1082 
1083 	if (bp[0] == '\\' && bp[1] == 'x') {
1084 		/* HEX STRING */
1085 		bp += 2;
1086 		while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1087 			int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1088 			bp++;
1089 			byte <<= 4;
1090 			byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1091 			*dest++ = byte;
1092 			bp++;
1093 			len++;
1094 		}
1095 	} else {
1096 		/* text with \nnn octal quoting */
1097 		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1098 			if (*bp == '\\' &&
1099 			    isodigit(bp[1]) && (bp[1] <= '3') &&
1100 			    isodigit(bp[2]) &&
1101 			    isodigit(bp[3])) {
1102 				int byte = (*++bp -'0');
1103 				bp++;
1104 				byte = (byte << 3) | (*bp++ - '0');
1105 				byte = (byte << 3) | (*bp++ - '0');
1106 				*dest++ = byte;
1107 				len++;
1108 			} else {
1109 				*dest++ = *bp++;
1110 				len++;
1111 			}
1112 		}
1113 	}
1114 
1115 	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1116 		return -1;
1117 	while (*bp == ' ') bp++;
1118 	*bpp = bp;
1119 	*dest = '\0';
1120 	return len;
1121 }
1122 EXPORT_SYMBOL_GPL(qword_get);
1123 
1124 
1125 /*
1126  * support /proc/sunrpc/cache/$CACHENAME/content
1127  * as a seqfile.
1128  * We call ->cache_show passing NULL for the item to
1129  * get a header, then pass each real item in the cache
1130  */
1131 
1132 struct handle {
1133 	struct cache_detail *cd;
1134 };
1135 
1136 static void *c_start(struct seq_file *m, loff_t *pos)
1137 	__acquires(cd->hash_lock)
1138 {
1139 	loff_t n = *pos;
1140 	unsigned hash, entry;
1141 	struct cache_head *ch;
1142 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1143 
1144 
1145 	read_lock(&cd->hash_lock);
1146 	if (!n--)
1147 		return SEQ_START_TOKEN;
1148 	hash = n >> 32;
1149 	entry = n & ((1LL<<32) - 1);
1150 
1151 	for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1152 		if (!entry--)
1153 			return ch;
1154 	n &= ~((1LL<<32) - 1);
1155 	do {
1156 		hash++;
1157 		n += 1LL<<32;
1158 	} while(hash < cd->hash_size &&
1159 		cd->hash_table[hash]==NULL);
1160 	if (hash >= cd->hash_size)
1161 		return NULL;
1162 	*pos = n+1;
1163 	return cd->hash_table[hash];
1164 }
1165 
1166 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1167 {
1168 	struct cache_head *ch = p;
1169 	int hash = (*pos >> 32);
1170 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1171 
1172 	if (p == SEQ_START_TOKEN)
1173 		hash = 0;
1174 	else if (ch->next == NULL) {
1175 		hash++;
1176 		*pos += 1LL<<32;
1177 	} else {
1178 		++*pos;
1179 		return ch->next;
1180 	}
1181 	*pos &= ~((1LL<<32) - 1);
1182 	while (hash < cd->hash_size &&
1183 	       cd->hash_table[hash] == NULL) {
1184 		hash++;
1185 		*pos += 1LL<<32;
1186 	}
1187 	if (hash >= cd->hash_size)
1188 		return NULL;
1189 	++*pos;
1190 	return cd->hash_table[hash];
1191 }
1192 
1193 static void c_stop(struct seq_file *m, void *p)
1194 	__releases(cd->hash_lock)
1195 {
1196 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1197 	read_unlock(&cd->hash_lock);
1198 }
1199 
1200 static int c_show(struct seq_file *m, void *p)
1201 {
1202 	struct cache_head *cp = p;
1203 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1204 
1205 	if (p == SEQ_START_TOKEN)
1206 		return cd->cache_show(m, cd, NULL);
1207 
1208 	ifdebug(CACHE)
1209 		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1210 			   cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
1211 	cache_get(cp);
1212 	if (cache_check(cd, cp, NULL))
1213 		/* cache_check does a cache_put on failure */
1214 		seq_printf(m, "# ");
1215 	else
1216 		cache_put(cp, cd);
1217 
1218 	return cd->cache_show(m, cd, cp);
1219 }
1220 
1221 static const struct seq_operations cache_content_op = {
1222 	.start	= c_start,
1223 	.next	= c_next,
1224 	.stop	= c_stop,
1225 	.show	= c_show,
1226 };
1227 
1228 static int content_open(struct inode *inode, struct file *file,
1229 			struct cache_detail *cd)
1230 {
1231 	struct handle *han;
1232 
1233 	if (!cd || !try_module_get(cd->owner))
1234 		return -EACCES;
1235 	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1236 	if (han == NULL)
1237 		return -ENOMEM;
1238 
1239 	han->cd = cd;
1240 	return 0;
1241 }
1242 
1243 static int content_release(struct inode *inode, struct file *file,
1244 		struct cache_detail *cd)
1245 {
1246 	int ret = seq_release_private(inode, file);
1247 	module_put(cd->owner);
1248 	return ret;
1249 }
1250 
1251 static int open_flush(struct inode *inode, struct file *file,
1252 			struct cache_detail *cd)
1253 {
1254 	if (!cd || !try_module_get(cd->owner))
1255 		return -EACCES;
1256 	return nonseekable_open(inode, file);
1257 }
1258 
1259 static int release_flush(struct inode *inode, struct file *file,
1260 			struct cache_detail *cd)
1261 {
1262 	module_put(cd->owner);
1263 	return 0;
1264 }
1265 
1266 static ssize_t read_flush(struct file *file, char __user *buf,
1267 			  size_t count, loff_t *ppos,
1268 			  struct cache_detail *cd)
1269 {
1270 	char tbuf[20];
1271 	unsigned long p = *ppos;
1272 	size_t len;
1273 
1274 	sprintf(tbuf, "%lu\n", cd->flush_time);
1275 	len = strlen(tbuf);
1276 	if (p >= len)
1277 		return 0;
1278 	len -= p;
1279 	if (len > count)
1280 		len = count;
1281 	if (copy_to_user(buf, (void*)(tbuf+p), len))
1282 		return -EFAULT;
1283 	*ppos += len;
1284 	return len;
1285 }
1286 
1287 static ssize_t write_flush(struct file *file, const char __user *buf,
1288 			   size_t count, loff_t *ppos,
1289 			   struct cache_detail *cd)
1290 {
1291 	char tbuf[20];
1292 	char *ep;
1293 	long flushtime;
1294 	if (*ppos || count > sizeof(tbuf)-1)
1295 		return -EINVAL;
1296 	if (copy_from_user(tbuf, buf, count))
1297 		return -EFAULT;
1298 	tbuf[count] = 0;
1299 	flushtime = simple_strtoul(tbuf, &ep, 0);
1300 	if (*ep && *ep != '\n')
1301 		return -EINVAL;
1302 
1303 	cd->flush_time = flushtime;
1304 	cd->nextcheck = get_seconds();
1305 	cache_flush();
1306 
1307 	*ppos += count;
1308 	return count;
1309 }
1310 
1311 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1312 				 size_t count, loff_t *ppos)
1313 {
1314 	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1315 
1316 	return cache_read(filp, buf, count, ppos, cd);
1317 }
1318 
1319 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1320 				  size_t count, loff_t *ppos)
1321 {
1322 	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1323 
1324 	return cache_write(filp, buf, count, ppos, cd);
1325 }
1326 
1327 static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1328 {
1329 	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1330 
1331 	return cache_poll(filp, wait, cd);
1332 }
1333 
1334 static int cache_ioctl_procfs(struct inode *inode, struct file *filp,
1335 			      unsigned int cmd, unsigned long arg)
1336 {
1337 	struct cache_detail *cd = PDE(inode)->data;
1338 
1339 	return cache_ioctl(inode, filp, cmd, arg, cd);
1340 }
1341 
1342 static int cache_open_procfs(struct inode *inode, struct file *filp)
1343 {
1344 	struct cache_detail *cd = PDE(inode)->data;
1345 
1346 	return cache_open(inode, filp, cd);
1347 }
1348 
1349 static int cache_release_procfs(struct inode *inode, struct file *filp)
1350 {
1351 	struct cache_detail *cd = PDE(inode)->data;
1352 
1353 	return cache_release(inode, filp, cd);
1354 }
1355 
1356 static const struct file_operations cache_file_operations_procfs = {
1357 	.owner		= THIS_MODULE,
1358 	.llseek		= no_llseek,
1359 	.read		= cache_read_procfs,
1360 	.write		= cache_write_procfs,
1361 	.poll		= cache_poll_procfs,
1362 	.ioctl		= cache_ioctl_procfs, /* for FIONREAD */
1363 	.open		= cache_open_procfs,
1364 	.release	= cache_release_procfs,
1365 };
1366 
1367 static int content_open_procfs(struct inode *inode, struct file *filp)
1368 {
1369 	struct cache_detail *cd = PDE(inode)->data;
1370 
1371 	return content_open(inode, filp, cd);
1372 }
1373 
1374 static int content_release_procfs(struct inode *inode, struct file *filp)
1375 {
1376 	struct cache_detail *cd = PDE(inode)->data;
1377 
1378 	return content_release(inode, filp, cd);
1379 }
1380 
1381 static const struct file_operations content_file_operations_procfs = {
1382 	.open		= content_open_procfs,
1383 	.read		= seq_read,
1384 	.llseek		= seq_lseek,
1385 	.release	= content_release_procfs,
1386 };
1387 
1388 static int open_flush_procfs(struct inode *inode, struct file *filp)
1389 {
1390 	struct cache_detail *cd = PDE(inode)->data;
1391 
1392 	return open_flush(inode, filp, cd);
1393 }
1394 
1395 static int release_flush_procfs(struct inode *inode, struct file *filp)
1396 {
1397 	struct cache_detail *cd = PDE(inode)->data;
1398 
1399 	return release_flush(inode, filp, cd);
1400 }
1401 
1402 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1403 			    size_t count, loff_t *ppos)
1404 {
1405 	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1406 
1407 	return read_flush(filp, buf, count, ppos, cd);
1408 }
1409 
1410 static ssize_t write_flush_procfs(struct file *filp,
1411 				  const char __user *buf,
1412 				  size_t count, loff_t *ppos)
1413 {
1414 	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1415 
1416 	return write_flush(filp, buf, count, ppos, cd);
1417 }
1418 
1419 static const struct file_operations cache_flush_operations_procfs = {
1420 	.open		= open_flush_procfs,
1421 	.read		= read_flush_procfs,
1422 	.write		= write_flush_procfs,
1423 	.release	= release_flush_procfs,
1424 };
1425 
1426 static void remove_cache_proc_entries(struct cache_detail *cd)
1427 {
1428 	if (cd->u.procfs.proc_ent == NULL)
1429 		return;
1430 	if (cd->u.procfs.flush_ent)
1431 		remove_proc_entry("flush", cd->u.procfs.proc_ent);
1432 	if (cd->u.procfs.channel_ent)
1433 		remove_proc_entry("channel", cd->u.procfs.proc_ent);
1434 	if (cd->u.procfs.content_ent)
1435 		remove_proc_entry("content", cd->u.procfs.proc_ent);
1436 	cd->u.procfs.proc_ent = NULL;
1437 	remove_proc_entry(cd->name, proc_net_rpc);
1438 }
1439 
1440 #ifdef CONFIG_PROC_FS
1441 static int create_cache_proc_entries(struct cache_detail *cd)
1442 {
1443 	struct proc_dir_entry *p;
1444 
1445 	cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc);
1446 	if (cd->u.procfs.proc_ent == NULL)
1447 		goto out_nomem;
1448 	cd->u.procfs.channel_ent = NULL;
1449 	cd->u.procfs.content_ent = NULL;
1450 
1451 	p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1452 			     cd->u.procfs.proc_ent,
1453 			     &cache_flush_operations_procfs, cd);
1454 	cd->u.procfs.flush_ent = p;
1455 	if (p == NULL)
1456 		goto out_nomem;
1457 
1458 	if (cd->cache_upcall || cd->cache_parse) {
1459 		p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1460 				     cd->u.procfs.proc_ent,
1461 				     &cache_file_operations_procfs, cd);
1462 		cd->u.procfs.channel_ent = p;
1463 		if (p == NULL)
1464 			goto out_nomem;
1465 	}
1466 	if (cd->cache_show) {
1467 		p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
1468 				cd->u.procfs.proc_ent,
1469 				&content_file_operations_procfs, cd);
1470 		cd->u.procfs.content_ent = p;
1471 		if (p == NULL)
1472 			goto out_nomem;
1473 	}
1474 	return 0;
1475 out_nomem:
1476 	remove_cache_proc_entries(cd);
1477 	return -ENOMEM;
1478 }
1479 #else /* CONFIG_PROC_FS */
1480 static int create_cache_proc_entries(struct cache_detail *cd)
1481 {
1482 	return 0;
1483 }
1484 #endif
1485 
1486 int cache_register(struct cache_detail *cd)
1487 {
1488 	int ret;
1489 
1490 	sunrpc_init_cache_detail(cd);
1491 	ret = create_cache_proc_entries(cd);
1492 	if (ret)
1493 		sunrpc_destroy_cache_detail(cd);
1494 	return ret;
1495 }
1496 EXPORT_SYMBOL_GPL(cache_register);
1497 
1498 void cache_unregister(struct cache_detail *cd)
1499 {
1500 	remove_cache_proc_entries(cd);
1501 	sunrpc_destroy_cache_detail(cd);
1502 }
1503 EXPORT_SYMBOL_GPL(cache_unregister);
1504 
1505 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1506 				 size_t count, loff_t *ppos)
1507 {
1508 	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1509 
1510 	return cache_read(filp, buf, count, ppos, cd);
1511 }
1512 
1513 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1514 				  size_t count, loff_t *ppos)
1515 {
1516 	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1517 
1518 	return cache_write(filp, buf, count, ppos, cd);
1519 }
1520 
1521 static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1522 {
1523 	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1524 
1525 	return cache_poll(filp, wait, cd);
1526 }
1527 
1528 static int cache_ioctl_pipefs(struct inode *inode, struct file *filp,
1529 			      unsigned int cmd, unsigned long arg)
1530 {
1531 	struct cache_detail *cd = RPC_I(inode)->private;
1532 
1533 	return cache_ioctl(inode, filp, cmd, arg, cd);
1534 }
1535 
1536 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1537 {
1538 	struct cache_detail *cd = RPC_I(inode)->private;
1539 
1540 	return cache_open(inode, filp, cd);
1541 }
1542 
1543 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1544 {
1545 	struct cache_detail *cd = RPC_I(inode)->private;
1546 
1547 	return cache_release(inode, filp, cd);
1548 }
1549 
1550 const struct file_operations cache_file_operations_pipefs = {
1551 	.owner		= THIS_MODULE,
1552 	.llseek		= no_llseek,
1553 	.read		= cache_read_pipefs,
1554 	.write		= cache_write_pipefs,
1555 	.poll		= cache_poll_pipefs,
1556 	.ioctl		= cache_ioctl_pipefs, /* for FIONREAD */
1557 	.open		= cache_open_pipefs,
1558 	.release	= cache_release_pipefs,
1559 };
1560 
1561 static int content_open_pipefs(struct inode *inode, struct file *filp)
1562 {
1563 	struct cache_detail *cd = RPC_I(inode)->private;
1564 
1565 	return content_open(inode, filp, cd);
1566 }
1567 
1568 static int content_release_pipefs(struct inode *inode, struct file *filp)
1569 {
1570 	struct cache_detail *cd = RPC_I(inode)->private;
1571 
1572 	return content_release(inode, filp, cd);
1573 }
1574 
1575 const struct file_operations content_file_operations_pipefs = {
1576 	.open		= content_open_pipefs,
1577 	.read		= seq_read,
1578 	.llseek		= seq_lseek,
1579 	.release	= content_release_pipefs,
1580 };
1581 
1582 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1583 {
1584 	struct cache_detail *cd = RPC_I(inode)->private;
1585 
1586 	return open_flush(inode, filp, cd);
1587 }
1588 
1589 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1590 {
1591 	struct cache_detail *cd = RPC_I(inode)->private;
1592 
1593 	return release_flush(inode, filp, cd);
1594 }
1595 
1596 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1597 			    size_t count, loff_t *ppos)
1598 {
1599 	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1600 
1601 	return read_flush(filp, buf, count, ppos, cd);
1602 }
1603 
1604 static ssize_t write_flush_pipefs(struct file *filp,
1605 				  const char __user *buf,
1606 				  size_t count, loff_t *ppos)
1607 {
1608 	struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1609 
1610 	return write_flush(filp, buf, count, ppos, cd);
1611 }
1612 
1613 const struct file_operations cache_flush_operations_pipefs = {
1614 	.open		= open_flush_pipefs,
1615 	.read		= read_flush_pipefs,
1616 	.write		= write_flush_pipefs,
1617 	.release	= release_flush_pipefs,
1618 };
1619 
1620 int sunrpc_cache_register_pipefs(struct dentry *parent,
1621 				 const char *name, mode_t umode,
1622 				 struct cache_detail *cd)
1623 {
1624 	struct qstr q;
1625 	struct dentry *dir;
1626 	int ret = 0;
1627 
1628 	sunrpc_init_cache_detail(cd);
1629 	q.name = name;
1630 	q.len = strlen(name);
1631 	q.hash = full_name_hash(q.name, q.len);
1632 	dir = rpc_create_cache_dir(parent, &q, umode, cd);
1633 	if (!IS_ERR(dir))
1634 		cd->u.pipefs.dir = dir;
1635 	else {
1636 		sunrpc_destroy_cache_detail(cd);
1637 		ret = PTR_ERR(dir);
1638 	}
1639 	return ret;
1640 }
1641 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1642 
1643 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1644 {
1645 	rpc_remove_cache_dir(cd->u.pipefs.dir);
1646 	cd->u.pipefs.dir = NULL;
1647 	sunrpc_destroy_cache_detail(cd);
1648 }
1649 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1650 
1651