1 /* 2 * net/sunrpc/cache.c 3 * 4 * Generic code for various authentication-related caches 5 * used by sunrpc clients and servers. 6 * 7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> 8 * 9 * Released under terms in GPL version 2. See COPYING. 10 * 11 */ 12 13 #include <linux/types.h> 14 #include <linux/fs.h> 15 #include <linux/file.h> 16 #include <linux/slab.h> 17 #include <linux/signal.h> 18 #include <linux/sched.h> 19 #include <linux/kmod.h> 20 #include <linux/list.h> 21 #include <linux/module.h> 22 #include <linux/ctype.h> 23 #include <linux/string_helpers.h> 24 #include <linux/uaccess.h> 25 #include <linux/poll.h> 26 #include <linux/seq_file.h> 27 #include <linux/proc_fs.h> 28 #include <linux/net.h> 29 #include <linux/workqueue.h> 30 #include <linux/mutex.h> 31 #include <linux/pagemap.h> 32 #include <asm/ioctls.h> 33 #include <linux/sunrpc/types.h> 34 #include <linux/sunrpc/cache.h> 35 #include <linux/sunrpc/stats.h> 36 #include <linux/sunrpc/rpc_pipe_fs.h> 37 #include "netns.h" 38 39 #define RPCDBG_FACILITY RPCDBG_CACHE 40 41 static bool cache_defer_req(struct cache_req *req, struct cache_head *item); 42 static void cache_revisit_request(struct cache_head *item); 43 44 static void cache_init(struct cache_head *h, struct cache_detail *detail) 45 { 46 time_t now = seconds_since_boot(); 47 INIT_HLIST_NODE(&h->cache_list); 48 h->flags = 0; 49 kref_init(&h->ref); 50 h->expiry_time = now + CACHE_NEW_EXPIRY; 51 if (now <= detail->flush_time) 52 /* ensure it isn't already expired */ 53 now = detail->flush_time + 1; 54 h->last_refresh = now; 55 } 56 57 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail, 58 struct cache_head *key, 59 int hash) 60 { 61 struct hlist_head *head = &detail->hash_table[hash]; 62 struct cache_head *tmp; 63 64 rcu_read_lock(); 65 hlist_for_each_entry_rcu(tmp, head, cache_list) { 66 if (detail->match(tmp, key)) { 67 if (cache_is_expired(detail, tmp)) 68 continue; 69 tmp = cache_get_rcu(tmp); 70 rcu_read_unlock(); 71 return tmp; 72 } 73 } 74 rcu_read_unlock(); 75 return NULL; 76 } 77 78 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, 79 struct cache_head *key, 80 int hash) 81 { 82 struct cache_head *new, *tmp, *freeme = NULL; 83 struct hlist_head *head = &detail->hash_table[hash]; 84 85 new = detail->alloc(); 86 if (!new) 87 return NULL; 88 /* must fully initialise 'new', else 89 * we might get lose if we need to 90 * cache_put it soon. 91 */ 92 cache_init(new, detail); 93 detail->init(new, key); 94 95 spin_lock(&detail->hash_lock); 96 97 /* check if entry appeared while we slept */ 98 hlist_for_each_entry_rcu(tmp, head, cache_list) { 99 if (detail->match(tmp, key)) { 100 if (cache_is_expired(detail, tmp)) { 101 hlist_del_init_rcu(&tmp->cache_list); 102 detail->entries --; 103 freeme = tmp; 104 break; 105 } 106 cache_get(tmp); 107 spin_unlock(&detail->hash_lock); 108 cache_put(new, detail); 109 return tmp; 110 } 111 } 112 113 hlist_add_head_rcu(&new->cache_list, head); 114 detail->entries++; 115 cache_get(new); 116 spin_unlock(&detail->hash_lock); 117 118 if (freeme) 119 cache_put(freeme, detail); 120 return new; 121 } 122 123 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail, 124 struct cache_head *key, int hash) 125 { 126 struct cache_head *ret; 127 128 ret = sunrpc_cache_find_rcu(detail, key, hash); 129 if (ret) 130 return ret; 131 /* Didn't find anything, insert an empty entry */ 132 return sunrpc_cache_add_entry(detail, key, hash); 133 } 134 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu); 135 136 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); 137 138 static void cache_fresh_locked(struct cache_head *head, time_t expiry, 139 struct cache_detail *detail) 140 { 141 time_t now = seconds_since_boot(); 142 if (now <= detail->flush_time) 143 /* ensure it isn't immediately treated as expired */ 144 now = detail->flush_time + 1; 145 head->expiry_time = expiry; 146 head->last_refresh = now; 147 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ 148 set_bit(CACHE_VALID, &head->flags); 149 } 150 151 static void cache_fresh_unlocked(struct cache_head *head, 152 struct cache_detail *detail) 153 { 154 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { 155 cache_revisit_request(head); 156 cache_dequeue(detail, head); 157 } 158 } 159 160 struct cache_head *sunrpc_cache_update(struct cache_detail *detail, 161 struct cache_head *new, struct cache_head *old, int hash) 162 { 163 /* The 'old' entry is to be replaced by 'new'. 164 * If 'old' is not VALID, we update it directly, 165 * otherwise we need to replace it 166 */ 167 struct cache_head *tmp; 168 169 if (!test_bit(CACHE_VALID, &old->flags)) { 170 spin_lock(&detail->hash_lock); 171 if (!test_bit(CACHE_VALID, &old->flags)) { 172 if (test_bit(CACHE_NEGATIVE, &new->flags)) 173 set_bit(CACHE_NEGATIVE, &old->flags); 174 else 175 detail->update(old, new); 176 cache_fresh_locked(old, new->expiry_time, detail); 177 spin_unlock(&detail->hash_lock); 178 cache_fresh_unlocked(old, detail); 179 return old; 180 } 181 spin_unlock(&detail->hash_lock); 182 } 183 /* We need to insert a new entry */ 184 tmp = detail->alloc(); 185 if (!tmp) { 186 cache_put(old, detail); 187 return NULL; 188 } 189 cache_init(tmp, detail); 190 detail->init(tmp, old); 191 192 spin_lock(&detail->hash_lock); 193 if (test_bit(CACHE_NEGATIVE, &new->flags)) 194 set_bit(CACHE_NEGATIVE, &tmp->flags); 195 else 196 detail->update(tmp, new); 197 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); 198 detail->entries++; 199 cache_get(tmp); 200 cache_fresh_locked(tmp, new->expiry_time, detail); 201 cache_fresh_locked(old, 0, detail); 202 spin_unlock(&detail->hash_lock); 203 cache_fresh_unlocked(tmp, detail); 204 cache_fresh_unlocked(old, detail); 205 cache_put(old, detail); 206 return tmp; 207 } 208 EXPORT_SYMBOL_GPL(sunrpc_cache_update); 209 210 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) 211 { 212 if (cd->cache_upcall) 213 return cd->cache_upcall(cd, h); 214 return sunrpc_cache_pipe_upcall(cd, h); 215 } 216 217 static inline int cache_is_valid(struct cache_head *h) 218 { 219 if (!test_bit(CACHE_VALID, &h->flags)) 220 return -EAGAIN; 221 else { 222 /* entry is valid */ 223 if (test_bit(CACHE_NEGATIVE, &h->flags)) 224 return -ENOENT; 225 else { 226 /* 227 * In combination with write barrier in 228 * sunrpc_cache_update, ensures that anyone 229 * using the cache entry after this sees the 230 * updated contents: 231 */ 232 smp_rmb(); 233 return 0; 234 } 235 } 236 } 237 238 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) 239 { 240 int rv; 241 242 spin_lock(&detail->hash_lock); 243 rv = cache_is_valid(h); 244 if (rv == -EAGAIN) { 245 set_bit(CACHE_NEGATIVE, &h->flags); 246 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY, 247 detail); 248 rv = -ENOENT; 249 } 250 spin_unlock(&detail->hash_lock); 251 cache_fresh_unlocked(h, detail); 252 return rv; 253 } 254 255 /* 256 * This is the generic cache management routine for all 257 * the authentication caches. 258 * It checks the currency of a cache item and will (later) 259 * initiate an upcall to fill it if needed. 260 * 261 * 262 * Returns 0 if the cache_head can be used, or cache_puts it and returns 263 * -EAGAIN if upcall is pending and request has been queued 264 * -ETIMEDOUT if upcall failed or request could not be queue or 265 * upcall completed but item is still invalid (implying that 266 * the cache item has been replaced with a newer one). 267 * -ENOENT if cache entry was negative 268 */ 269 int cache_check(struct cache_detail *detail, 270 struct cache_head *h, struct cache_req *rqstp) 271 { 272 int rv; 273 long refresh_age, age; 274 275 /* First decide return status as best we can */ 276 rv = cache_is_valid(h); 277 278 /* now see if we want to start an upcall */ 279 refresh_age = (h->expiry_time - h->last_refresh); 280 age = seconds_since_boot() - h->last_refresh; 281 282 if (rqstp == NULL) { 283 if (rv == -EAGAIN) 284 rv = -ENOENT; 285 } else if (rv == -EAGAIN || 286 (h->expiry_time != 0 && age > refresh_age/2)) { 287 dprintk("RPC: Want update, refage=%ld, age=%ld\n", 288 refresh_age, age); 289 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { 290 switch (cache_make_upcall(detail, h)) { 291 case -EINVAL: 292 rv = try_to_negate_entry(detail, h); 293 break; 294 case -EAGAIN: 295 cache_fresh_unlocked(h, detail); 296 break; 297 } 298 } 299 } 300 301 if (rv == -EAGAIN) { 302 if (!cache_defer_req(rqstp, h)) { 303 /* 304 * Request was not deferred; handle it as best 305 * we can ourselves: 306 */ 307 rv = cache_is_valid(h); 308 if (rv == -EAGAIN) 309 rv = -ETIMEDOUT; 310 } 311 } 312 if (rv) 313 cache_put(h, detail); 314 return rv; 315 } 316 EXPORT_SYMBOL_GPL(cache_check); 317 318 /* 319 * caches need to be periodically cleaned. 320 * For this we maintain a list of cache_detail and 321 * a current pointer into that list and into the table 322 * for that entry. 323 * 324 * Each time cache_clean is called it finds the next non-empty entry 325 * in the current table and walks the list in that entry 326 * looking for entries that can be removed. 327 * 328 * An entry gets removed if: 329 * - The expiry is before current time 330 * - The last_refresh time is before the flush_time for that cache 331 * 332 * later we might drop old entries with non-NEVER expiry if that table 333 * is getting 'full' for some definition of 'full' 334 * 335 * The question of "how often to scan a table" is an interesting one 336 * and is answered in part by the use of the "nextcheck" field in the 337 * cache_detail. 338 * When a scan of a table begins, the nextcheck field is set to a time 339 * that is well into the future. 340 * While scanning, if an expiry time is found that is earlier than the 341 * current nextcheck time, nextcheck is set to that expiry time. 342 * If the flush_time is ever set to a time earlier than the nextcheck 343 * time, the nextcheck time is then set to that flush_time. 344 * 345 * A table is then only scanned if the current time is at least 346 * the nextcheck time. 347 * 348 */ 349 350 static LIST_HEAD(cache_list); 351 static DEFINE_SPINLOCK(cache_list_lock); 352 static struct cache_detail *current_detail; 353 static int current_index; 354 355 static void do_cache_clean(struct work_struct *work); 356 static struct delayed_work cache_cleaner; 357 358 void sunrpc_init_cache_detail(struct cache_detail *cd) 359 { 360 spin_lock_init(&cd->hash_lock); 361 INIT_LIST_HEAD(&cd->queue); 362 spin_lock(&cache_list_lock); 363 cd->nextcheck = 0; 364 cd->entries = 0; 365 atomic_set(&cd->readers, 0); 366 cd->last_close = 0; 367 cd->last_warn = -1; 368 list_add(&cd->others, &cache_list); 369 spin_unlock(&cache_list_lock); 370 371 /* start the cleaning process */ 372 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0); 373 } 374 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); 375 376 void sunrpc_destroy_cache_detail(struct cache_detail *cd) 377 { 378 cache_purge(cd); 379 spin_lock(&cache_list_lock); 380 spin_lock(&cd->hash_lock); 381 if (current_detail == cd) 382 current_detail = NULL; 383 list_del_init(&cd->others); 384 spin_unlock(&cd->hash_lock); 385 spin_unlock(&cache_list_lock); 386 if (list_empty(&cache_list)) { 387 /* module must be being unloaded so its safe to kill the worker */ 388 cancel_delayed_work_sync(&cache_cleaner); 389 } 390 } 391 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); 392 393 /* clean cache tries to find something to clean 394 * and cleans it. 395 * It returns 1 if it cleaned something, 396 * 0 if it didn't find anything this time 397 * -1 if it fell off the end of the list. 398 */ 399 static int cache_clean(void) 400 { 401 int rv = 0; 402 struct list_head *next; 403 404 spin_lock(&cache_list_lock); 405 406 /* find a suitable table if we don't already have one */ 407 while (current_detail == NULL || 408 current_index >= current_detail->hash_size) { 409 if (current_detail) 410 next = current_detail->others.next; 411 else 412 next = cache_list.next; 413 if (next == &cache_list) { 414 current_detail = NULL; 415 spin_unlock(&cache_list_lock); 416 return -1; 417 } 418 current_detail = list_entry(next, struct cache_detail, others); 419 if (current_detail->nextcheck > seconds_since_boot()) 420 current_index = current_detail->hash_size; 421 else { 422 current_index = 0; 423 current_detail->nextcheck = seconds_since_boot()+30*60; 424 } 425 } 426 427 /* find a non-empty bucket in the table */ 428 while (current_detail && 429 current_index < current_detail->hash_size && 430 hlist_empty(¤t_detail->hash_table[current_index])) 431 current_index++; 432 433 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 434 435 if (current_detail && current_index < current_detail->hash_size) { 436 struct cache_head *ch = NULL; 437 struct cache_detail *d; 438 struct hlist_head *head; 439 struct hlist_node *tmp; 440 441 spin_lock(¤t_detail->hash_lock); 442 443 /* Ok, now to clean this strand */ 444 445 head = ¤t_detail->hash_table[current_index]; 446 hlist_for_each_entry_safe(ch, tmp, head, cache_list) { 447 if (current_detail->nextcheck > ch->expiry_time) 448 current_detail->nextcheck = ch->expiry_time+1; 449 if (!cache_is_expired(current_detail, ch)) 450 continue; 451 452 hlist_del_init_rcu(&ch->cache_list); 453 current_detail->entries--; 454 rv = 1; 455 break; 456 } 457 458 spin_unlock(¤t_detail->hash_lock); 459 d = current_detail; 460 if (!ch) 461 current_index ++; 462 spin_unlock(&cache_list_lock); 463 if (ch) { 464 set_bit(CACHE_CLEANED, &ch->flags); 465 cache_fresh_unlocked(ch, d); 466 cache_put(ch, d); 467 } 468 } else 469 spin_unlock(&cache_list_lock); 470 471 return rv; 472 } 473 474 /* 475 * We want to regularly clean the cache, so we need to schedule some work ... 476 */ 477 static void do_cache_clean(struct work_struct *work) 478 { 479 int delay = 5; 480 if (cache_clean() == -1) 481 delay = round_jiffies_relative(30*HZ); 482 483 if (list_empty(&cache_list)) 484 delay = 0; 485 486 if (delay) 487 queue_delayed_work(system_power_efficient_wq, 488 &cache_cleaner, delay); 489 } 490 491 492 /* 493 * Clean all caches promptly. This just calls cache_clean 494 * repeatedly until we are sure that every cache has had a chance to 495 * be fully cleaned 496 */ 497 void cache_flush(void) 498 { 499 while (cache_clean() != -1) 500 cond_resched(); 501 while (cache_clean() != -1) 502 cond_resched(); 503 } 504 EXPORT_SYMBOL_GPL(cache_flush); 505 506 void cache_purge(struct cache_detail *detail) 507 { 508 struct cache_head *ch = NULL; 509 struct hlist_head *head = NULL; 510 struct hlist_node *tmp = NULL; 511 int i = 0; 512 513 spin_lock(&detail->hash_lock); 514 if (!detail->entries) { 515 spin_unlock(&detail->hash_lock); 516 return; 517 } 518 519 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); 520 for (i = 0; i < detail->hash_size; i++) { 521 head = &detail->hash_table[i]; 522 hlist_for_each_entry_safe(ch, tmp, head, cache_list) { 523 hlist_del_init_rcu(&ch->cache_list); 524 detail->entries--; 525 526 set_bit(CACHE_CLEANED, &ch->flags); 527 spin_unlock(&detail->hash_lock); 528 cache_fresh_unlocked(ch, detail); 529 cache_put(ch, detail); 530 spin_lock(&detail->hash_lock); 531 } 532 } 533 spin_unlock(&detail->hash_lock); 534 } 535 EXPORT_SYMBOL_GPL(cache_purge); 536 537 538 /* 539 * Deferral and Revisiting of Requests. 540 * 541 * If a cache lookup finds a pending entry, we 542 * need to defer the request and revisit it later. 543 * All deferred requests are stored in a hash table, 544 * indexed by "struct cache_head *". 545 * As it may be wasteful to store a whole request 546 * structure, we allow the request to provide a 547 * deferred form, which must contain a 548 * 'struct cache_deferred_req' 549 * This cache_deferred_req contains a method to allow 550 * it to be revisited when cache info is available 551 */ 552 553 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head)) 554 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) 555 556 #define DFR_MAX 300 /* ??? */ 557 558 static DEFINE_SPINLOCK(cache_defer_lock); 559 static LIST_HEAD(cache_defer_list); 560 static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; 561 static int cache_defer_cnt; 562 563 static void __unhash_deferred_req(struct cache_deferred_req *dreq) 564 { 565 hlist_del_init(&dreq->hash); 566 if (!list_empty(&dreq->recent)) { 567 list_del_init(&dreq->recent); 568 cache_defer_cnt--; 569 } 570 } 571 572 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) 573 { 574 int hash = DFR_HASH(item); 575 576 INIT_LIST_HEAD(&dreq->recent); 577 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); 578 } 579 580 static void setup_deferral(struct cache_deferred_req *dreq, 581 struct cache_head *item, 582 int count_me) 583 { 584 585 dreq->item = item; 586 587 spin_lock(&cache_defer_lock); 588 589 __hash_deferred_req(dreq, item); 590 591 if (count_me) { 592 cache_defer_cnt++; 593 list_add(&dreq->recent, &cache_defer_list); 594 } 595 596 spin_unlock(&cache_defer_lock); 597 598 } 599 600 struct thread_deferred_req { 601 struct cache_deferred_req handle; 602 struct completion completion; 603 }; 604 605 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) 606 { 607 struct thread_deferred_req *dr = 608 container_of(dreq, struct thread_deferred_req, handle); 609 complete(&dr->completion); 610 } 611 612 static void cache_wait_req(struct cache_req *req, struct cache_head *item) 613 { 614 struct thread_deferred_req sleeper; 615 struct cache_deferred_req *dreq = &sleeper.handle; 616 617 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); 618 dreq->revisit = cache_restart_thread; 619 620 setup_deferral(dreq, item, 0); 621 622 if (!test_bit(CACHE_PENDING, &item->flags) || 623 wait_for_completion_interruptible_timeout( 624 &sleeper.completion, req->thread_wait) <= 0) { 625 /* The completion wasn't completed, so we need 626 * to clean up 627 */ 628 spin_lock(&cache_defer_lock); 629 if (!hlist_unhashed(&sleeper.handle.hash)) { 630 __unhash_deferred_req(&sleeper.handle); 631 spin_unlock(&cache_defer_lock); 632 } else { 633 /* cache_revisit_request already removed 634 * this from the hash table, but hasn't 635 * called ->revisit yet. It will very soon 636 * and we need to wait for it. 637 */ 638 spin_unlock(&cache_defer_lock); 639 wait_for_completion(&sleeper.completion); 640 } 641 } 642 } 643 644 static void cache_limit_defers(void) 645 { 646 /* Make sure we haven't exceed the limit of allowed deferred 647 * requests. 648 */ 649 struct cache_deferred_req *discard = NULL; 650 651 if (cache_defer_cnt <= DFR_MAX) 652 return; 653 654 spin_lock(&cache_defer_lock); 655 656 /* Consider removing either the first or the last */ 657 if (cache_defer_cnt > DFR_MAX) { 658 if (prandom_u32() & 1) 659 discard = list_entry(cache_defer_list.next, 660 struct cache_deferred_req, recent); 661 else 662 discard = list_entry(cache_defer_list.prev, 663 struct cache_deferred_req, recent); 664 __unhash_deferred_req(discard); 665 } 666 spin_unlock(&cache_defer_lock); 667 if (discard) 668 discard->revisit(discard, 1); 669 } 670 671 /* Return true if and only if a deferred request is queued. */ 672 static bool cache_defer_req(struct cache_req *req, struct cache_head *item) 673 { 674 struct cache_deferred_req *dreq; 675 676 if (req->thread_wait) { 677 cache_wait_req(req, item); 678 if (!test_bit(CACHE_PENDING, &item->flags)) 679 return false; 680 } 681 dreq = req->defer(req); 682 if (dreq == NULL) 683 return false; 684 setup_deferral(dreq, item, 1); 685 if (!test_bit(CACHE_PENDING, &item->flags)) 686 /* Bit could have been cleared before we managed to 687 * set up the deferral, so need to revisit just in case 688 */ 689 cache_revisit_request(item); 690 691 cache_limit_defers(); 692 return true; 693 } 694 695 static void cache_revisit_request(struct cache_head *item) 696 { 697 struct cache_deferred_req *dreq; 698 struct list_head pending; 699 struct hlist_node *tmp; 700 int hash = DFR_HASH(item); 701 702 INIT_LIST_HEAD(&pending); 703 spin_lock(&cache_defer_lock); 704 705 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) 706 if (dreq->item == item) { 707 __unhash_deferred_req(dreq); 708 list_add(&dreq->recent, &pending); 709 } 710 711 spin_unlock(&cache_defer_lock); 712 713 while (!list_empty(&pending)) { 714 dreq = list_entry(pending.next, struct cache_deferred_req, recent); 715 list_del_init(&dreq->recent); 716 dreq->revisit(dreq, 0); 717 } 718 } 719 720 void cache_clean_deferred(void *owner) 721 { 722 struct cache_deferred_req *dreq, *tmp; 723 struct list_head pending; 724 725 726 INIT_LIST_HEAD(&pending); 727 spin_lock(&cache_defer_lock); 728 729 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 730 if (dreq->owner == owner) { 731 __unhash_deferred_req(dreq); 732 list_add(&dreq->recent, &pending); 733 } 734 } 735 spin_unlock(&cache_defer_lock); 736 737 while (!list_empty(&pending)) { 738 dreq = list_entry(pending.next, struct cache_deferred_req, recent); 739 list_del_init(&dreq->recent); 740 dreq->revisit(dreq, 1); 741 } 742 } 743 744 /* 745 * communicate with user-space 746 * 747 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel. 748 * On read, you get a full request, or block. 749 * On write, an update request is processed. 750 * Poll works if anything to read, and always allows write. 751 * 752 * Implemented by linked list of requests. Each open file has 753 * a ->private that also exists in this list. New requests are added 754 * to the end and may wakeup and preceding readers. 755 * New readers are added to the head. If, on read, an item is found with 756 * CACHE_UPCALLING clear, we free it from the list. 757 * 758 */ 759 760 static DEFINE_SPINLOCK(queue_lock); 761 static DEFINE_MUTEX(queue_io_mutex); 762 763 struct cache_queue { 764 struct list_head list; 765 int reader; /* if 0, then request */ 766 }; 767 struct cache_request { 768 struct cache_queue q; 769 struct cache_head *item; 770 char * buf; 771 int len; 772 int readers; 773 }; 774 struct cache_reader { 775 struct cache_queue q; 776 int offset; /* if non-0, we have a refcnt on next request */ 777 }; 778 779 static int cache_request(struct cache_detail *detail, 780 struct cache_request *crq) 781 { 782 char *bp = crq->buf; 783 int len = PAGE_SIZE; 784 785 detail->cache_request(detail, crq->item, &bp, &len); 786 if (len < 0) 787 return -EAGAIN; 788 return PAGE_SIZE - len; 789 } 790 791 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, 792 loff_t *ppos, struct cache_detail *cd) 793 { 794 struct cache_reader *rp = filp->private_data; 795 struct cache_request *rq; 796 struct inode *inode = file_inode(filp); 797 int err; 798 799 if (count == 0) 800 return 0; 801 802 inode_lock(inode); /* protect against multiple concurrent 803 * readers on this file */ 804 again: 805 spin_lock(&queue_lock); 806 /* need to find next request */ 807 while (rp->q.list.next != &cd->queue && 808 list_entry(rp->q.list.next, struct cache_queue, list) 809 ->reader) { 810 struct list_head *next = rp->q.list.next; 811 list_move(&rp->q.list, next); 812 } 813 if (rp->q.list.next == &cd->queue) { 814 spin_unlock(&queue_lock); 815 inode_unlock(inode); 816 WARN_ON_ONCE(rp->offset); 817 return 0; 818 } 819 rq = container_of(rp->q.list.next, struct cache_request, q.list); 820 WARN_ON_ONCE(rq->q.reader); 821 if (rp->offset == 0) 822 rq->readers++; 823 spin_unlock(&queue_lock); 824 825 if (rq->len == 0) { 826 err = cache_request(cd, rq); 827 if (err < 0) 828 goto out; 829 rq->len = err; 830 } 831 832 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { 833 err = -EAGAIN; 834 spin_lock(&queue_lock); 835 list_move(&rp->q.list, &rq->q.list); 836 spin_unlock(&queue_lock); 837 } else { 838 if (rp->offset + count > rq->len) 839 count = rq->len - rp->offset; 840 err = -EFAULT; 841 if (copy_to_user(buf, rq->buf + rp->offset, count)) 842 goto out; 843 rp->offset += count; 844 if (rp->offset >= rq->len) { 845 rp->offset = 0; 846 spin_lock(&queue_lock); 847 list_move(&rp->q.list, &rq->q.list); 848 spin_unlock(&queue_lock); 849 } 850 err = 0; 851 } 852 out: 853 if (rp->offset == 0) { 854 /* need to release rq */ 855 spin_lock(&queue_lock); 856 rq->readers--; 857 if (rq->readers == 0 && 858 !test_bit(CACHE_PENDING, &rq->item->flags)) { 859 list_del(&rq->q.list); 860 spin_unlock(&queue_lock); 861 cache_put(rq->item, cd); 862 kfree(rq->buf); 863 kfree(rq); 864 } else 865 spin_unlock(&queue_lock); 866 } 867 if (err == -EAGAIN) 868 goto again; 869 inode_unlock(inode); 870 return err ? err : count; 871 } 872 873 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, 874 size_t count, struct cache_detail *cd) 875 { 876 ssize_t ret; 877 878 if (count == 0) 879 return -EINVAL; 880 if (copy_from_user(kaddr, buf, count)) 881 return -EFAULT; 882 kaddr[count] = '\0'; 883 ret = cd->cache_parse(cd, kaddr, count); 884 if (!ret) 885 ret = count; 886 return ret; 887 } 888 889 static ssize_t cache_slow_downcall(const char __user *buf, 890 size_t count, struct cache_detail *cd) 891 { 892 static char write_buf[8192]; /* protected by queue_io_mutex */ 893 ssize_t ret = -EINVAL; 894 895 if (count >= sizeof(write_buf)) 896 goto out; 897 mutex_lock(&queue_io_mutex); 898 ret = cache_do_downcall(write_buf, buf, count, cd); 899 mutex_unlock(&queue_io_mutex); 900 out: 901 return ret; 902 } 903 904 static ssize_t cache_downcall(struct address_space *mapping, 905 const char __user *buf, 906 size_t count, struct cache_detail *cd) 907 { 908 struct page *page; 909 char *kaddr; 910 ssize_t ret = -ENOMEM; 911 912 if (count >= PAGE_SIZE) 913 goto out_slow; 914 915 page = find_or_create_page(mapping, 0, GFP_KERNEL); 916 if (!page) 917 goto out_slow; 918 919 kaddr = kmap(page); 920 ret = cache_do_downcall(kaddr, buf, count, cd); 921 kunmap(page); 922 unlock_page(page); 923 put_page(page); 924 return ret; 925 out_slow: 926 return cache_slow_downcall(buf, count, cd); 927 } 928 929 static ssize_t cache_write(struct file *filp, const char __user *buf, 930 size_t count, loff_t *ppos, 931 struct cache_detail *cd) 932 { 933 struct address_space *mapping = filp->f_mapping; 934 struct inode *inode = file_inode(filp); 935 ssize_t ret = -EINVAL; 936 937 if (!cd->cache_parse) 938 goto out; 939 940 inode_lock(inode); 941 ret = cache_downcall(mapping, buf, count, cd); 942 inode_unlock(inode); 943 out: 944 return ret; 945 } 946 947 static DECLARE_WAIT_QUEUE_HEAD(queue_wait); 948 949 static __poll_t cache_poll(struct file *filp, poll_table *wait, 950 struct cache_detail *cd) 951 { 952 __poll_t mask; 953 struct cache_reader *rp = filp->private_data; 954 struct cache_queue *cq; 955 956 poll_wait(filp, &queue_wait, wait); 957 958 /* alway allow write */ 959 mask = EPOLLOUT | EPOLLWRNORM; 960 961 if (!rp) 962 return mask; 963 964 spin_lock(&queue_lock); 965 966 for (cq= &rp->q; &cq->list != &cd->queue; 967 cq = list_entry(cq->list.next, struct cache_queue, list)) 968 if (!cq->reader) { 969 mask |= EPOLLIN | EPOLLRDNORM; 970 break; 971 } 972 spin_unlock(&queue_lock); 973 return mask; 974 } 975 976 static int cache_ioctl(struct inode *ino, struct file *filp, 977 unsigned int cmd, unsigned long arg, 978 struct cache_detail *cd) 979 { 980 int len = 0; 981 struct cache_reader *rp = filp->private_data; 982 struct cache_queue *cq; 983 984 if (cmd != FIONREAD || !rp) 985 return -EINVAL; 986 987 spin_lock(&queue_lock); 988 989 /* only find the length remaining in current request, 990 * or the length of the next request 991 */ 992 for (cq= &rp->q; &cq->list != &cd->queue; 993 cq = list_entry(cq->list.next, struct cache_queue, list)) 994 if (!cq->reader) { 995 struct cache_request *cr = 996 container_of(cq, struct cache_request, q); 997 len = cr->len - rp->offset; 998 break; 999 } 1000 spin_unlock(&queue_lock); 1001 1002 return put_user(len, (int __user *)arg); 1003 } 1004 1005 static int cache_open(struct inode *inode, struct file *filp, 1006 struct cache_detail *cd) 1007 { 1008 struct cache_reader *rp = NULL; 1009 1010 if (!cd || !try_module_get(cd->owner)) 1011 return -EACCES; 1012 nonseekable_open(inode, filp); 1013 if (filp->f_mode & FMODE_READ) { 1014 rp = kmalloc(sizeof(*rp), GFP_KERNEL); 1015 if (!rp) { 1016 module_put(cd->owner); 1017 return -ENOMEM; 1018 } 1019 rp->offset = 0; 1020 rp->q.reader = 1; 1021 atomic_inc(&cd->readers); 1022 spin_lock(&queue_lock); 1023 list_add(&rp->q.list, &cd->queue); 1024 spin_unlock(&queue_lock); 1025 } 1026 filp->private_data = rp; 1027 return 0; 1028 } 1029 1030 static int cache_release(struct inode *inode, struct file *filp, 1031 struct cache_detail *cd) 1032 { 1033 struct cache_reader *rp = filp->private_data; 1034 1035 if (rp) { 1036 spin_lock(&queue_lock); 1037 if (rp->offset) { 1038 struct cache_queue *cq; 1039 for (cq= &rp->q; &cq->list != &cd->queue; 1040 cq = list_entry(cq->list.next, struct cache_queue, list)) 1041 if (!cq->reader) { 1042 container_of(cq, struct cache_request, q) 1043 ->readers--; 1044 break; 1045 } 1046 rp->offset = 0; 1047 } 1048 list_del(&rp->q.list); 1049 spin_unlock(&queue_lock); 1050 1051 filp->private_data = NULL; 1052 kfree(rp); 1053 1054 cd->last_close = seconds_since_boot(); 1055 atomic_dec(&cd->readers); 1056 } 1057 module_put(cd->owner); 1058 return 0; 1059 } 1060 1061 1062 1063 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) 1064 { 1065 struct cache_queue *cq, *tmp; 1066 struct cache_request *cr; 1067 struct list_head dequeued; 1068 1069 INIT_LIST_HEAD(&dequeued); 1070 spin_lock(&queue_lock); 1071 list_for_each_entry_safe(cq, tmp, &detail->queue, list) 1072 if (!cq->reader) { 1073 cr = container_of(cq, struct cache_request, q); 1074 if (cr->item != ch) 1075 continue; 1076 if (test_bit(CACHE_PENDING, &ch->flags)) 1077 /* Lost a race and it is pending again */ 1078 break; 1079 if (cr->readers != 0) 1080 continue; 1081 list_move(&cr->q.list, &dequeued); 1082 } 1083 spin_unlock(&queue_lock); 1084 while (!list_empty(&dequeued)) { 1085 cr = list_entry(dequeued.next, struct cache_request, q.list); 1086 list_del(&cr->q.list); 1087 cache_put(cr->item, detail); 1088 kfree(cr->buf); 1089 kfree(cr); 1090 } 1091 } 1092 1093 /* 1094 * Support routines for text-based upcalls. 1095 * Fields are separated by spaces. 1096 * Fields are either mangled to quote space tab newline slosh with slosh 1097 * or a hexified with a leading \x 1098 * Record is terminated with newline. 1099 * 1100 */ 1101 1102 void qword_add(char **bpp, int *lp, char *str) 1103 { 1104 char *bp = *bpp; 1105 int len = *lp; 1106 int ret; 1107 1108 if (len < 0) return; 1109 1110 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t"); 1111 if (ret >= len) { 1112 bp += len; 1113 len = -1; 1114 } else { 1115 bp += ret; 1116 len -= ret; 1117 *bp++ = ' '; 1118 len--; 1119 } 1120 *bpp = bp; 1121 *lp = len; 1122 } 1123 EXPORT_SYMBOL_GPL(qword_add); 1124 1125 void qword_addhex(char **bpp, int *lp, char *buf, int blen) 1126 { 1127 char *bp = *bpp; 1128 int len = *lp; 1129 1130 if (len < 0) return; 1131 1132 if (len > 2) { 1133 *bp++ = '\\'; 1134 *bp++ = 'x'; 1135 len -= 2; 1136 while (blen && len >= 2) { 1137 bp = hex_byte_pack(bp, *buf++); 1138 len -= 2; 1139 blen--; 1140 } 1141 } 1142 if (blen || len<1) len = -1; 1143 else { 1144 *bp++ = ' '; 1145 len--; 1146 } 1147 *bpp = bp; 1148 *lp = len; 1149 } 1150 EXPORT_SYMBOL_GPL(qword_addhex); 1151 1152 static void warn_no_listener(struct cache_detail *detail) 1153 { 1154 if (detail->last_warn != detail->last_close) { 1155 detail->last_warn = detail->last_close; 1156 if (detail->warn_no_listener) 1157 detail->warn_no_listener(detail, detail->last_close != 0); 1158 } 1159 } 1160 1161 static bool cache_listeners_exist(struct cache_detail *detail) 1162 { 1163 if (atomic_read(&detail->readers)) 1164 return true; 1165 if (detail->last_close == 0) 1166 /* This cache was never opened */ 1167 return false; 1168 if (detail->last_close < seconds_since_boot() - 30) 1169 /* 1170 * We allow for the possibility that someone might 1171 * restart a userspace daemon without restarting the 1172 * server; but after 30 seconds, we give up. 1173 */ 1174 return false; 1175 return true; 1176 } 1177 1178 /* 1179 * register an upcall request to user-space and queue it up for read() by the 1180 * upcall daemon. 1181 * 1182 * Each request is at most one page long. 1183 */ 1184 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) 1185 { 1186 1187 char *buf; 1188 struct cache_request *crq; 1189 int ret = 0; 1190 1191 if (!detail->cache_request) 1192 return -EINVAL; 1193 1194 if (!cache_listeners_exist(detail)) { 1195 warn_no_listener(detail); 1196 return -EINVAL; 1197 } 1198 if (test_bit(CACHE_CLEANED, &h->flags)) 1199 /* Too late to make an upcall */ 1200 return -EAGAIN; 1201 1202 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 1203 if (!buf) 1204 return -EAGAIN; 1205 1206 crq = kmalloc(sizeof (*crq), GFP_KERNEL); 1207 if (!crq) { 1208 kfree(buf); 1209 return -EAGAIN; 1210 } 1211 1212 crq->q.reader = 0; 1213 crq->buf = buf; 1214 crq->len = 0; 1215 crq->readers = 0; 1216 spin_lock(&queue_lock); 1217 if (test_bit(CACHE_PENDING, &h->flags)) { 1218 crq->item = cache_get(h); 1219 list_add_tail(&crq->q.list, &detail->queue); 1220 } else 1221 /* Lost a race, no longer PENDING, so don't enqueue */ 1222 ret = -EAGAIN; 1223 spin_unlock(&queue_lock); 1224 wake_up(&queue_wait); 1225 if (ret == -EAGAIN) { 1226 kfree(buf); 1227 kfree(crq); 1228 } 1229 return ret; 1230 } 1231 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); 1232 1233 /* 1234 * parse a message from user-space and pass it 1235 * to an appropriate cache 1236 * Messages are, like requests, separated into fields by 1237 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal 1238 * 1239 * Message is 1240 * reply cachename expiry key ... content.... 1241 * 1242 * key and content are both parsed by cache 1243 */ 1244 1245 int qword_get(char **bpp, char *dest, int bufsize) 1246 { 1247 /* return bytes copied, or -1 on error */ 1248 char *bp = *bpp; 1249 int len = 0; 1250 1251 while (*bp == ' ') bp++; 1252 1253 if (bp[0] == '\\' && bp[1] == 'x') { 1254 /* HEX STRING */ 1255 bp += 2; 1256 while (len < bufsize - 1) { 1257 int h, l; 1258 1259 h = hex_to_bin(bp[0]); 1260 if (h < 0) 1261 break; 1262 1263 l = hex_to_bin(bp[1]); 1264 if (l < 0) 1265 break; 1266 1267 *dest++ = (h << 4) | l; 1268 bp += 2; 1269 len++; 1270 } 1271 } else { 1272 /* text with \nnn octal quoting */ 1273 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { 1274 if (*bp == '\\' && 1275 isodigit(bp[1]) && (bp[1] <= '3') && 1276 isodigit(bp[2]) && 1277 isodigit(bp[3])) { 1278 int byte = (*++bp -'0'); 1279 bp++; 1280 byte = (byte << 3) | (*bp++ - '0'); 1281 byte = (byte << 3) | (*bp++ - '0'); 1282 *dest++ = byte; 1283 len++; 1284 } else { 1285 *dest++ = *bp++; 1286 len++; 1287 } 1288 } 1289 } 1290 1291 if (*bp != ' ' && *bp != '\n' && *bp != '\0') 1292 return -1; 1293 while (*bp == ' ') bp++; 1294 *bpp = bp; 1295 *dest = '\0'; 1296 return len; 1297 } 1298 EXPORT_SYMBOL_GPL(qword_get); 1299 1300 1301 /* 1302 * support /proc/net/rpc/$CACHENAME/content 1303 * as a seqfile. 1304 * We call ->cache_show passing NULL for the item to 1305 * get a header, then pass each real item in the cache 1306 */ 1307 1308 static void *__cache_seq_start(struct seq_file *m, loff_t *pos) 1309 { 1310 loff_t n = *pos; 1311 unsigned int hash, entry; 1312 struct cache_head *ch; 1313 struct cache_detail *cd = m->private; 1314 1315 if (!n--) 1316 return SEQ_START_TOKEN; 1317 hash = n >> 32; 1318 entry = n & ((1LL<<32) - 1); 1319 1320 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list) 1321 if (!entry--) 1322 return ch; 1323 n &= ~((1LL<<32) - 1); 1324 do { 1325 hash++; 1326 n += 1LL<<32; 1327 } while(hash < cd->hash_size && 1328 hlist_empty(&cd->hash_table[hash])); 1329 if (hash >= cd->hash_size) 1330 return NULL; 1331 *pos = n+1; 1332 return hlist_entry_safe(rcu_dereference_raw( 1333 hlist_first_rcu(&cd->hash_table[hash])), 1334 struct cache_head, cache_list); 1335 } 1336 1337 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos) 1338 { 1339 struct cache_head *ch = p; 1340 int hash = (*pos >> 32); 1341 struct cache_detail *cd = m->private; 1342 1343 if (p == SEQ_START_TOKEN) 1344 hash = 0; 1345 else if (ch->cache_list.next == NULL) { 1346 hash++; 1347 *pos += 1LL<<32; 1348 } else { 1349 ++*pos; 1350 return hlist_entry_safe(rcu_dereference_raw( 1351 hlist_next_rcu(&ch->cache_list)), 1352 struct cache_head, cache_list); 1353 } 1354 *pos &= ~((1LL<<32) - 1); 1355 while (hash < cd->hash_size && 1356 hlist_empty(&cd->hash_table[hash])) { 1357 hash++; 1358 *pos += 1LL<<32; 1359 } 1360 if (hash >= cd->hash_size) 1361 return NULL; 1362 ++*pos; 1363 return hlist_entry_safe(rcu_dereference_raw( 1364 hlist_first_rcu(&cd->hash_table[hash])), 1365 struct cache_head, cache_list); 1366 } 1367 EXPORT_SYMBOL_GPL(cache_seq_next); 1368 1369 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos) 1370 __acquires(RCU) 1371 { 1372 rcu_read_lock(); 1373 return __cache_seq_start(m, pos); 1374 } 1375 EXPORT_SYMBOL_GPL(cache_seq_start_rcu); 1376 1377 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos) 1378 { 1379 return cache_seq_next(file, p, pos); 1380 } 1381 EXPORT_SYMBOL_GPL(cache_seq_next_rcu); 1382 1383 void cache_seq_stop_rcu(struct seq_file *m, void *p) 1384 __releases(RCU) 1385 { 1386 rcu_read_unlock(); 1387 } 1388 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu); 1389 1390 static int c_show(struct seq_file *m, void *p) 1391 { 1392 struct cache_head *cp = p; 1393 struct cache_detail *cd = m->private; 1394 1395 if (p == SEQ_START_TOKEN) 1396 return cd->cache_show(m, cd, NULL); 1397 1398 ifdebug(CACHE) 1399 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", 1400 convert_to_wallclock(cp->expiry_time), 1401 kref_read(&cp->ref), cp->flags); 1402 cache_get(cp); 1403 if (cache_check(cd, cp, NULL)) 1404 /* cache_check does a cache_put on failure */ 1405 seq_printf(m, "# "); 1406 else { 1407 if (cache_is_expired(cd, cp)) 1408 seq_printf(m, "# "); 1409 cache_put(cp, cd); 1410 } 1411 1412 return cd->cache_show(m, cd, cp); 1413 } 1414 1415 static const struct seq_operations cache_content_op = { 1416 .start = cache_seq_start_rcu, 1417 .next = cache_seq_next_rcu, 1418 .stop = cache_seq_stop_rcu, 1419 .show = c_show, 1420 }; 1421 1422 static int content_open(struct inode *inode, struct file *file, 1423 struct cache_detail *cd) 1424 { 1425 struct seq_file *seq; 1426 int err; 1427 1428 if (!cd || !try_module_get(cd->owner)) 1429 return -EACCES; 1430 1431 err = seq_open(file, &cache_content_op); 1432 if (err) { 1433 module_put(cd->owner); 1434 return err; 1435 } 1436 1437 seq = file->private_data; 1438 seq->private = cd; 1439 return 0; 1440 } 1441 1442 static int content_release(struct inode *inode, struct file *file, 1443 struct cache_detail *cd) 1444 { 1445 int ret = seq_release(inode, file); 1446 module_put(cd->owner); 1447 return ret; 1448 } 1449 1450 static int open_flush(struct inode *inode, struct file *file, 1451 struct cache_detail *cd) 1452 { 1453 if (!cd || !try_module_get(cd->owner)) 1454 return -EACCES; 1455 return nonseekable_open(inode, file); 1456 } 1457 1458 static int release_flush(struct inode *inode, struct file *file, 1459 struct cache_detail *cd) 1460 { 1461 module_put(cd->owner); 1462 return 0; 1463 } 1464 1465 static ssize_t read_flush(struct file *file, char __user *buf, 1466 size_t count, loff_t *ppos, 1467 struct cache_detail *cd) 1468 { 1469 char tbuf[22]; 1470 size_t len; 1471 1472 len = snprintf(tbuf, sizeof(tbuf), "%lu\n", 1473 convert_to_wallclock(cd->flush_time)); 1474 return simple_read_from_buffer(buf, count, ppos, tbuf, len); 1475 } 1476 1477 static ssize_t write_flush(struct file *file, const char __user *buf, 1478 size_t count, loff_t *ppos, 1479 struct cache_detail *cd) 1480 { 1481 char tbuf[20]; 1482 char *ep; 1483 time_t now; 1484 1485 if (*ppos || count > sizeof(tbuf)-1) 1486 return -EINVAL; 1487 if (copy_from_user(tbuf, buf, count)) 1488 return -EFAULT; 1489 tbuf[count] = 0; 1490 simple_strtoul(tbuf, &ep, 0); 1491 if (*ep && *ep != '\n') 1492 return -EINVAL; 1493 /* Note that while we check that 'buf' holds a valid number, 1494 * we always ignore the value and just flush everything. 1495 * Making use of the number leads to races. 1496 */ 1497 1498 now = seconds_since_boot(); 1499 /* Always flush everything, so behave like cache_purge() 1500 * Do this by advancing flush_time to the current time, 1501 * or by one second if it has already reached the current time. 1502 * Newly added cache entries will always have ->last_refresh greater 1503 * that ->flush_time, so they don't get flushed prematurely. 1504 */ 1505 1506 if (cd->flush_time >= now) 1507 now = cd->flush_time + 1; 1508 1509 cd->flush_time = now; 1510 cd->nextcheck = now; 1511 cache_flush(); 1512 1513 *ppos += count; 1514 return count; 1515 } 1516 1517 static ssize_t cache_read_procfs(struct file *filp, char __user *buf, 1518 size_t count, loff_t *ppos) 1519 { 1520 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1521 1522 return cache_read(filp, buf, count, ppos, cd); 1523 } 1524 1525 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, 1526 size_t count, loff_t *ppos) 1527 { 1528 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1529 1530 return cache_write(filp, buf, count, ppos, cd); 1531 } 1532 1533 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait) 1534 { 1535 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1536 1537 return cache_poll(filp, wait, cd); 1538 } 1539 1540 static long cache_ioctl_procfs(struct file *filp, 1541 unsigned int cmd, unsigned long arg) 1542 { 1543 struct inode *inode = file_inode(filp); 1544 struct cache_detail *cd = PDE_DATA(inode); 1545 1546 return cache_ioctl(inode, filp, cmd, arg, cd); 1547 } 1548 1549 static int cache_open_procfs(struct inode *inode, struct file *filp) 1550 { 1551 struct cache_detail *cd = PDE_DATA(inode); 1552 1553 return cache_open(inode, filp, cd); 1554 } 1555 1556 static int cache_release_procfs(struct inode *inode, struct file *filp) 1557 { 1558 struct cache_detail *cd = PDE_DATA(inode); 1559 1560 return cache_release(inode, filp, cd); 1561 } 1562 1563 static const struct file_operations cache_file_operations_procfs = { 1564 .owner = THIS_MODULE, 1565 .llseek = no_llseek, 1566 .read = cache_read_procfs, 1567 .write = cache_write_procfs, 1568 .poll = cache_poll_procfs, 1569 .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */ 1570 .open = cache_open_procfs, 1571 .release = cache_release_procfs, 1572 }; 1573 1574 static int content_open_procfs(struct inode *inode, struct file *filp) 1575 { 1576 struct cache_detail *cd = PDE_DATA(inode); 1577 1578 return content_open(inode, filp, cd); 1579 } 1580 1581 static int content_release_procfs(struct inode *inode, struct file *filp) 1582 { 1583 struct cache_detail *cd = PDE_DATA(inode); 1584 1585 return content_release(inode, filp, cd); 1586 } 1587 1588 static const struct file_operations content_file_operations_procfs = { 1589 .open = content_open_procfs, 1590 .read = seq_read, 1591 .llseek = seq_lseek, 1592 .release = content_release_procfs, 1593 }; 1594 1595 static int open_flush_procfs(struct inode *inode, struct file *filp) 1596 { 1597 struct cache_detail *cd = PDE_DATA(inode); 1598 1599 return open_flush(inode, filp, cd); 1600 } 1601 1602 static int release_flush_procfs(struct inode *inode, struct file *filp) 1603 { 1604 struct cache_detail *cd = PDE_DATA(inode); 1605 1606 return release_flush(inode, filp, cd); 1607 } 1608 1609 static ssize_t read_flush_procfs(struct file *filp, char __user *buf, 1610 size_t count, loff_t *ppos) 1611 { 1612 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1613 1614 return read_flush(filp, buf, count, ppos, cd); 1615 } 1616 1617 static ssize_t write_flush_procfs(struct file *filp, 1618 const char __user *buf, 1619 size_t count, loff_t *ppos) 1620 { 1621 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1622 1623 return write_flush(filp, buf, count, ppos, cd); 1624 } 1625 1626 static const struct file_operations cache_flush_operations_procfs = { 1627 .open = open_flush_procfs, 1628 .read = read_flush_procfs, 1629 .write = write_flush_procfs, 1630 .release = release_flush_procfs, 1631 .llseek = no_llseek, 1632 }; 1633 1634 static void remove_cache_proc_entries(struct cache_detail *cd) 1635 { 1636 if (cd->procfs) { 1637 proc_remove(cd->procfs); 1638 cd->procfs = NULL; 1639 } 1640 } 1641 1642 #ifdef CONFIG_PROC_FS 1643 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) 1644 { 1645 struct proc_dir_entry *p; 1646 struct sunrpc_net *sn; 1647 1648 sn = net_generic(net, sunrpc_net_id); 1649 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); 1650 if (cd->procfs == NULL) 1651 goto out_nomem; 1652 1653 p = proc_create_data("flush", S_IFREG | 0600, 1654 cd->procfs, &cache_flush_operations_procfs, cd); 1655 if (p == NULL) 1656 goto out_nomem; 1657 1658 if (cd->cache_request || cd->cache_parse) { 1659 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs, 1660 &cache_file_operations_procfs, cd); 1661 if (p == NULL) 1662 goto out_nomem; 1663 } 1664 if (cd->cache_show) { 1665 p = proc_create_data("content", S_IFREG | 0400, cd->procfs, 1666 &content_file_operations_procfs, cd); 1667 if (p == NULL) 1668 goto out_nomem; 1669 } 1670 return 0; 1671 out_nomem: 1672 remove_cache_proc_entries(cd); 1673 return -ENOMEM; 1674 } 1675 #else /* CONFIG_PROC_FS */ 1676 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) 1677 { 1678 return 0; 1679 } 1680 #endif 1681 1682 void __init cache_initialize(void) 1683 { 1684 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean); 1685 } 1686 1687 int cache_register_net(struct cache_detail *cd, struct net *net) 1688 { 1689 int ret; 1690 1691 sunrpc_init_cache_detail(cd); 1692 ret = create_cache_proc_entries(cd, net); 1693 if (ret) 1694 sunrpc_destroy_cache_detail(cd); 1695 return ret; 1696 } 1697 EXPORT_SYMBOL_GPL(cache_register_net); 1698 1699 void cache_unregister_net(struct cache_detail *cd, struct net *net) 1700 { 1701 remove_cache_proc_entries(cd); 1702 sunrpc_destroy_cache_detail(cd); 1703 } 1704 EXPORT_SYMBOL_GPL(cache_unregister_net); 1705 1706 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net) 1707 { 1708 struct cache_detail *cd; 1709 int i; 1710 1711 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); 1712 if (cd == NULL) 1713 return ERR_PTR(-ENOMEM); 1714 1715 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head), 1716 GFP_KERNEL); 1717 if (cd->hash_table == NULL) { 1718 kfree(cd); 1719 return ERR_PTR(-ENOMEM); 1720 } 1721 1722 for (i = 0; i < cd->hash_size; i++) 1723 INIT_HLIST_HEAD(&cd->hash_table[i]); 1724 cd->net = net; 1725 return cd; 1726 } 1727 EXPORT_SYMBOL_GPL(cache_create_net); 1728 1729 void cache_destroy_net(struct cache_detail *cd, struct net *net) 1730 { 1731 kfree(cd->hash_table); 1732 kfree(cd); 1733 } 1734 EXPORT_SYMBOL_GPL(cache_destroy_net); 1735 1736 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, 1737 size_t count, loff_t *ppos) 1738 { 1739 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1740 1741 return cache_read(filp, buf, count, ppos, cd); 1742 } 1743 1744 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, 1745 size_t count, loff_t *ppos) 1746 { 1747 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1748 1749 return cache_write(filp, buf, count, ppos, cd); 1750 } 1751 1752 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait) 1753 { 1754 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1755 1756 return cache_poll(filp, wait, cd); 1757 } 1758 1759 static long cache_ioctl_pipefs(struct file *filp, 1760 unsigned int cmd, unsigned long arg) 1761 { 1762 struct inode *inode = file_inode(filp); 1763 struct cache_detail *cd = RPC_I(inode)->private; 1764 1765 return cache_ioctl(inode, filp, cmd, arg, cd); 1766 } 1767 1768 static int cache_open_pipefs(struct inode *inode, struct file *filp) 1769 { 1770 struct cache_detail *cd = RPC_I(inode)->private; 1771 1772 return cache_open(inode, filp, cd); 1773 } 1774 1775 static int cache_release_pipefs(struct inode *inode, struct file *filp) 1776 { 1777 struct cache_detail *cd = RPC_I(inode)->private; 1778 1779 return cache_release(inode, filp, cd); 1780 } 1781 1782 const struct file_operations cache_file_operations_pipefs = { 1783 .owner = THIS_MODULE, 1784 .llseek = no_llseek, 1785 .read = cache_read_pipefs, 1786 .write = cache_write_pipefs, 1787 .poll = cache_poll_pipefs, 1788 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ 1789 .open = cache_open_pipefs, 1790 .release = cache_release_pipefs, 1791 }; 1792 1793 static int content_open_pipefs(struct inode *inode, struct file *filp) 1794 { 1795 struct cache_detail *cd = RPC_I(inode)->private; 1796 1797 return content_open(inode, filp, cd); 1798 } 1799 1800 static int content_release_pipefs(struct inode *inode, struct file *filp) 1801 { 1802 struct cache_detail *cd = RPC_I(inode)->private; 1803 1804 return content_release(inode, filp, cd); 1805 } 1806 1807 const struct file_operations content_file_operations_pipefs = { 1808 .open = content_open_pipefs, 1809 .read = seq_read, 1810 .llseek = seq_lseek, 1811 .release = content_release_pipefs, 1812 }; 1813 1814 static int open_flush_pipefs(struct inode *inode, struct file *filp) 1815 { 1816 struct cache_detail *cd = RPC_I(inode)->private; 1817 1818 return open_flush(inode, filp, cd); 1819 } 1820 1821 static int release_flush_pipefs(struct inode *inode, struct file *filp) 1822 { 1823 struct cache_detail *cd = RPC_I(inode)->private; 1824 1825 return release_flush(inode, filp, cd); 1826 } 1827 1828 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, 1829 size_t count, loff_t *ppos) 1830 { 1831 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1832 1833 return read_flush(filp, buf, count, ppos, cd); 1834 } 1835 1836 static ssize_t write_flush_pipefs(struct file *filp, 1837 const char __user *buf, 1838 size_t count, loff_t *ppos) 1839 { 1840 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1841 1842 return write_flush(filp, buf, count, ppos, cd); 1843 } 1844 1845 const struct file_operations cache_flush_operations_pipefs = { 1846 .open = open_flush_pipefs, 1847 .read = read_flush_pipefs, 1848 .write = write_flush_pipefs, 1849 .release = release_flush_pipefs, 1850 .llseek = no_llseek, 1851 }; 1852 1853 int sunrpc_cache_register_pipefs(struct dentry *parent, 1854 const char *name, umode_t umode, 1855 struct cache_detail *cd) 1856 { 1857 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); 1858 if (IS_ERR(dir)) 1859 return PTR_ERR(dir); 1860 cd->pipefs = dir; 1861 return 0; 1862 } 1863 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); 1864 1865 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) 1866 { 1867 if (cd->pipefs) { 1868 rpc_remove_cache_dir(cd->pipefs); 1869 cd->pipefs = NULL; 1870 } 1871 } 1872 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); 1873 1874 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) 1875 { 1876 spin_lock(&cd->hash_lock); 1877 if (!hlist_unhashed(&h->cache_list)){ 1878 hlist_del_init_rcu(&h->cache_list); 1879 cd->entries--; 1880 spin_unlock(&cd->hash_lock); 1881 cache_put(h, cd); 1882 } else 1883 spin_unlock(&cd->hash_lock); 1884 } 1885 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash); 1886