1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/sunrpc/cache.c 4 * 5 * Generic code for various authentication-related caches 6 * used by sunrpc clients and servers. 7 * 8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/fs.h> 13 #include <linux/file.h> 14 #include <linux/slab.h> 15 #include <linux/signal.h> 16 #include <linux/sched.h> 17 #include <linux/kmod.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/ctype.h> 21 #include <linux/string_helpers.h> 22 #include <linux/uaccess.h> 23 #include <linux/poll.h> 24 #include <linux/seq_file.h> 25 #include <linux/proc_fs.h> 26 #include <linux/net.h> 27 #include <linux/workqueue.h> 28 #include <linux/mutex.h> 29 #include <linux/pagemap.h> 30 #include <asm/ioctls.h> 31 #include <linux/sunrpc/types.h> 32 #include <linux/sunrpc/cache.h> 33 #include <linux/sunrpc/stats.h> 34 #include <linux/sunrpc/rpc_pipe_fs.h> 35 #include <trace/events/sunrpc.h> 36 #include "netns.h" 37 38 #define RPCDBG_FACILITY RPCDBG_CACHE 39 40 static bool cache_defer_req(struct cache_req *req, struct cache_head *item); 41 static void cache_revisit_request(struct cache_head *item); 42 43 static void cache_init(struct cache_head *h, struct cache_detail *detail) 44 { 45 time64_t now = seconds_since_boot(); 46 INIT_HLIST_NODE(&h->cache_list); 47 h->flags = 0; 48 kref_init(&h->ref); 49 h->expiry_time = now + CACHE_NEW_EXPIRY; 50 if (now <= detail->flush_time) 51 /* ensure it isn't already expired */ 52 now = detail->flush_time + 1; 53 h->last_refresh = now; 54 } 55 56 static void cache_fresh_unlocked(struct cache_head *head, 57 struct cache_detail *detail); 58 59 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail, 60 struct cache_head *key, 61 int hash) 62 { 63 struct hlist_head *head = &detail->hash_table[hash]; 64 struct cache_head *tmp; 65 66 rcu_read_lock(); 67 hlist_for_each_entry_rcu(tmp, head, cache_list) { 68 if (!detail->match(tmp, key)) 69 continue; 70 if (test_bit(CACHE_VALID, &tmp->flags) && 71 cache_is_expired(detail, tmp)) 72 continue; 73 tmp = cache_get_rcu(tmp); 74 rcu_read_unlock(); 75 return tmp; 76 } 77 rcu_read_unlock(); 78 return NULL; 79 } 80 81 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch, 82 struct cache_detail *cd) 83 { 84 /* Must be called under cd->hash_lock */ 85 hlist_del_init_rcu(&ch->cache_list); 86 set_bit(CACHE_CLEANED, &ch->flags); 87 cd->entries --; 88 } 89 90 static void sunrpc_end_cache_remove_entry(struct cache_head *ch, 91 struct cache_detail *cd) 92 { 93 cache_fresh_unlocked(ch, cd); 94 cache_put(ch, cd); 95 } 96 97 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, 98 struct cache_head *key, 99 int hash) 100 { 101 struct cache_head *new, *tmp, *freeme = NULL; 102 struct hlist_head *head = &detail->hash_table[hash]; 103 104 new = detail->alloc(); 105 if (!new) 106 return NULL; 107 /* must fully initialise 'new', else 108 * we might get lose if we need to 109 * cache_put it soon. 110 */ 111 cache_init(new, detail); 112 detail->init(new, key); 113 114 spin_lock(&detail->hash_lock); 115 116 /* check if entry appeared while we slept */ 117 hlist_for_each_entry_rcu(tmp, head, cache_list, 118 lockdep_is_held(&detail->hash_lock)) { 119 if (!detail->match(tmp, key)) 120 continue; 121 if (test_bit(CACHE_VALID, &tmp->flags) && 122 cache_is_expired(detail, tmp)) { 123 sunrpc_begin_cache_remove_entry(tmp, detail); 124 trace_cache_entry_expired(detail, tmp); 125 freeme = tmp; 126 break; 127 } 128 cache_get(tmp); 129 spin_unlock(&detail->hash_lock); 130 cache_put(new, detail); 131 return tmp; 132 } 133 134 hlist_add_head_rcu(&new->cache_list, head); 135 detail->entries++; 136 cache_get(new); 137 spin_unlock(&detail->hash_lock); 138 139 if (freeme) 140 sunrpc_end_cache_remove_entry(freeme, detail); 141 return new; 142 } 143 144 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail, 145 struct cache_head *key, int hash) 146 { 147 struct cache_head *ret; 148 149 ret = sunrpc_cache_find_rcu(detail, key, hash); 150 if (ret) 151 return ret; 152 /* Didn't find anything, insert an empty entry */ 153 return sunrpc_cache_add_entry(detail, key, hash); 154 } 155 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu); 156 157 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); 158 159 static void cache_fresh_locked(struct cache_head *head, time64_t expiry, 160 struct cache_detail *detail) 161 { 162 time64_t now = seconds_since_boot(); 163 if (now <= detail->flush_time) 164 /* ensure it isn't immediately treated as expired */ 165 now = detail->flush_time + 1; 166 head->expiry_time = expiry; 167 head->last_refresh = now; 168 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ 169 set_bit(CACHE_VALID, &head->flags); 170 } 171 172 static void cache_fresh_unlocked(struct cache_head *head, 173 struct cache_detail *detail) 174 { 175 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { 176 cache_revisit_request(head); 177 cache_dequeue(detail, head); 178 } 179 } 180 181 static void cache_make_negative(struct cache_detail *detail, 182 struct cache_head *h) 183 { 184 set_bit(CACHE_NEGATIVE, &h->flags); 185 trace_cache_entry_make_negative(detail, h); 186 } 187 188 static void cache_entry_update(struct cache_detail *detail, 189 struct cache_head *h, 190 struct cache_head *new) 191 { 192 if (!test_bit(CACHE_NEGATIVE, &new->flags)) { 193 detail->update(h, new); 194 trace_cache_entry_update(detail, h); 195 } else { 196 cache_make_negative(detail, h); 197 } 198 } 199 200 struct cache_head *sunrpc_cache_update(struct cache_detail *detail, 201 struct cache_head *new, struct cache_head *old, int hash) 202 { 203 /* The 'old' entry is to be replaced by 'new'. 204 * If 'old' is not VALID, we update it directly, 205 * otherwise we need to replace it 206 */ 207 struct cache_head *tmp; 208 209 if (!test_bit(CACHE_VALID, &old->flags)) { 210 spin_lock(&detail->hash_lock); 211 if (!test_bit(CACHE_VALID, &old->flags)) { 212 cache_entry_update(detail, old, new); 213 cache_fresh_locked(old, new->expiry_time, detail); 214 spin_unlock(&detail->hash_lock); 215 cache_fresh_unlocked(old, detail); 216 return old; 217 } 218 spin_unlock(&detail->hash_lock); 219 } 220 /* We need to insert a new entry */ 221 tmp = detail->alloc(); 222 if (!tmp) { 223 cache_put(old, detail); 224 return NULL; 225 } 226 cache_init(tmp, detail); 227 detail->init(tmp, old); 228 229 spin_lock(&detail->hash_lock); 230 cache_entry_update(detail, tmp, new); 231 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); 232 detail->entries++; 233 cache_get(tmp); 234 cache_fresh_locked(tmp, new->expiry_time, detail); 235 cache_fresh_locked(old, 0, detail); 236 spin_unlock(&detail->hash_lock); 237 cache_fresh_unlocked(tmp, detail); 238 cache_fresh_unlocked(old, detail); 239 cache_put(old, detail); 240 return tmp; 241 } 242 EXPORT_SYMBOL_GPL(sunrpc_cache_update); 243 244 static inline int cache_is_valid(struct cache_head *h) 245 { 246 if (!test_bit(CACHE_VALID, &h->flags)) 247 return -EAGAIN; 248 else { 249 /* entry is valid */ 250 if (test_bit(CACHE_NEGATIVE, &h->flags)) 251 return -ENOENT; 252 else { 253 /* 254 * In combination with write barrier in 255 * sunrpc_cache_update, ensures that anyone 256 * using the cache entry after this sees the 257 * updated contents: 258 */ 259 smp_rmb(); 260 return 0; 261 } 262 } 263 } 264 265 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) 266 { 267 int rv; 268 269 spin_lock(&detail->hash_lock); 270 rv = cache_is_valid(h); 271 if (rv == -EAGAIN) { 272 cache_make_negative(detail, h); 273 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY, 274 detail); 275 rv = -ENOENT; 276 } 277 spin_unlock(&detail->hash_lock); 278 cache_fresh_unlocked(h, detail); 279 return rv; 280 } 281 282 /* 283 * This is the generic cache management routine for all 284 * the authentication caches. 285 * It checks the currency of a cache item and will (later) 286 * initiate an upcall to fill it if needed. 287 * 288 * 289 * Returns 0 if the cache_head can be used, or cache_puts it and returns 290 * -EAGAIN if upcall is pending and request has been queued 291 * -ETIMEDOUT if upcall failed or request could not be queue or 292 * upcall completed but item is still invalid (implying that 293 * the cache item has been replaced with a newer one). 294 * -ENOENT if cache entry was negative 295 */ 296 int cache_check(struct cache_detail *detail, 297 struct cache_head *h, struct cache_req *rqstp) 298 { 299 int rv; 300 time64_t refresh_age, age; 301 302 /* First decide return status as best we can */ 303 rv = cache_is_valid(h); 304 305 /* now see if we want to start an upcall */ 306 refresh_age = (h->expiry_time - h->last_refresh); 307 age = seconds_since_boot() - h->last_refresh; 308 309 if (rqstp == NULL) { 310 if (rv == -EAGAIN) 311 rv = -ENOENT; 312 } else if (rv == -EAGAIN || 313 (h->expiry_time != 0 && age > refresh_age/2)) { 314 dprintk("RPC: Want update, refage=%lld, age=%lld\n", 315 refresh_age, age); 316 switch (detail->cache_upcall(detail, h)) { 317 case -EINVAL: 318 rv = try_to_negate_entry(detail, h); 319 break; 320 case -EAGAIN: 321 cache_fresh_unlocked(h, detail); 322 break; 323 } 324 } 325 326 if (rv == -EAGAIN) { 327 if (!cache_defer_req(rqstp, h)) { 328 /* 329 * Request was not deferred; handle it as best 330 * we can ourselves: 331 */ 332 rv = cache_is_valid(h); 333 if (rv == -EAGAIN) 334 rv = -ETIMEDOUT; 335 } 336 } 337 if (rv) 338 cache_put(h, detail); 339 return rv; 340 } 341 EXPORT_SYMBOL_GPL(cache_check); 342 343 /* 344 * caches need to be periodically cleaned. 345 * For this we maintain a list of cache_detail and 346 * a current pointer into that list and into the table 347 * for that entry. 348 * 349 * Each time cache_clean is called it finds the next non-empty entry 350 * in the current table and walks the list in that entry 351 * looking for entries that can be removed. 352 * 353 * An entry gets removed if: 354 * - The expiry is before current time 355 * - The last_refresh time is before the flush_time for that cache 356 * 357 * later we might drop old entries with non-NEVER expiry if that table 358 * is getting 'full' for some definition of 'full' 359 * 360 * The question of "how often to scan a table" is an interesting one 361 * and is answered in part by the use of the "nextcheck" field in the 362 * cache_detail. 363 * When a scan of a table begins, the nextcheck field is set to a time 364 * that is well into the future. 365 * While scanning, if an expiry time is found that is earlier than the 366 * current nextcheck time, nextcheck is set to that expiry time. 367 * If the flush_time is ever set to a time earlier than the nextcheck 368 * time, the nextcheck time is then set to that flush_time. 369 * 370 * A table is then only scanned if the current time is at least 371 * the nextcheck time. 372 * 373 */ 374 375 static LIST_HEAD(cache_list); 376 static DEFINE_SPINLOCK(cache_list_lock); 377 static struct cache_detail *current_detail; 378 static int current_index; 379 380 static void do_cache_clean(struct work_struct *work); 381 static struct delayed_work cache_cleaner; 382 383 void sunrpc_init_cache_detail(struct cache_detail *cd) 384 { 385 spin_lock_init(&cd->hash_lock); 386 INIT_LIST_HEAD(&cd->queue); 387 spin_lock(&cache_list_lock); 388 cd->nextcheck = 0; 389 cd->entries = 0; 390 atomic_set(&cd->writers, 0); 391 cd->last_close = 0; 392 cd->last_warn = -1; 393 list_add(&cd->others, &cache_list); 394 spin_unlock(&cache_list_lock); 395 396 /* start the cleaning process */ 397 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0); 398 } 399 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); 400 401 void sunrpc_destroy_cache_detail(struct cache_detail *cd) 402 { 403 cache_purge(cd); 404 spin_lock(&cache_list_lock); 405 spin_lock(&cd->hash_lock); 406 if (current_detail == cd) 407 current_detail = NULL; 408 list_del_init(&cd->others); 409 spin_unlock(&cd->hash_lock); 410 spin_unlock(&cache_list_lock); 411 if (list_empty(&cache_list)) { 412 /* module must be being unloaded so its safe to kill the worker */ 413 cancel_delayed_work_sync(&cache_cleaner); 414 } 415 } 416 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); 417 418 /* clean cache tries to find something to clean 419 * and cleans it. 420 * It returns 1 if it cleaned something, 421 * 0 if it didn't find anything this time 422 * -1 if it fell off the end of the list. 423 */ 424 static int cache_clean(void) 425 { 426 int rv = 0; 427 struct list_head *next; 428 429 spin_lock(&cache_list_lock); 430 431 /* find a suitable table if we don't already have one */ 432 while (current_detail == NULL || 433 current_index >= current_detail->hash_size) { 434 if (current_detail) 435 next = current_detail->others.next; 436 else 437 next = cache_list.next; 438 if (next == &cache_list) { 439 current_detail = NULL; 440 spin_unlock(&cache_list_lock); 441 return -1; 442 } 443 current_detail = list_entry(next, struct cache_detail, others); 444 if (current_detail->nextcheck > seconds_since_boot()) 445 current_index = current_detail->hash_size; 446 else { 447 current_index = 0; 448 current_detail->nextcheck = seconds_since_boot()+30*60; 449 } 450 } 451 452 /* find a non-empty bucket in the table */ 453 while (current_detail && 454 current_index < current_detail->hash_size && 455 hlist_empty(¤t_detail->hash_table[current_index])) 456 current_index++; 457 458 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 459 460 if (current_detail && current_index < current_detail->hash_size) { 461 struct cache_head *ch = NULL; 462 struct cache_detail *d; 463 struct hlist_head *head; 464 struct hlist_node *tmp; 465 466 spin_lock(¤t_detail->hash_lock); 467 468 /* Ok, now to clean this strand */ 469 470 head = ¤t_detail->hash_table[current_index]; 471 hlist_for_each_entry_safe(ch, tmp, head, cache_list) { 472 if (current_detail->nextcheck > ch->expiry_time) 473 current_detail->nextcheck = ch->expiry_time+1; 474 if (!cache_is_expired(current_detail, ch)) 475 continue; 476 477 sunrpc_begin_cache_remove_entry(ch, current_detail); 478 trace_cache_entry_expired(current_detail, ch); 479 rv = 1; 480 break; 481 } 482 483 spin_unlock(¤t_detail->hash_lock); 484 d = current_detail; 485 if (!ch) 486 current_index ++; 487 spin_unlock(&cache_list_lock); 488 if (ch) 489 sunrpc_end_cache_remove_entry(ch, d); 490 } else 491 spin_unlock(&cache_list_lock); 492 493 return rv; 494 } 495 496 /* 497 * We want to regularly clean the cache, so we need to schedule some work ... 498 */ 499 static void do_cache_clean(struct work_struct *work) 500 { 501 int delay; 502 503 if (list_empty(&cache_list)) 504 return; 505 506 if (cache_clean() == -1) 507 delay = round_jiffies_relative(30*HZ); 508 else 509 delay = 5; 510 511 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay); 512 } 513 514 515 /* 516 * Clean all caches promptly. This just calls cache_clean 517 * repeatedly until we are sure that every cache has had a chance to 518 * be fully cleaned 519 */ 520 void cache_flush(void) 521 { 522 while (cache_clean() != -1) 523 cond_resched(); 524 while (cache_clean() != -1) 525 cond_resched(); 526 } 527 EXPORT_SYMBOL_GPL(cache_flush); 528 529 void cache_purge(struct cache_detail *detail) 530 { 531 struct cache_head *ch = NULL; 532 struct hlist_head *head = NULL; 533 int i = 0; 534 535 spin_lock(&detail->hash_lock); 536 if (!detail->entries) { 537 spin_unlock(&detail->hash_lock); 538 return; 539 } 540 541 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); 542 for (i = 0; i < detail->hash_size; i++) { 543 head = &detail->hash_table[i]; 544 while (!hlist_empty(head)) { 545 ch = hlist_entry(head->first, struct cache_head, 546 cache_list); 547 sunrpc_begin_cache_remove_entry(ch, detail); 548 spin_unlock(&detail->hash_lock); 549 sunrpc_end_cache_remove_entry(ch, detail); 550 spin_lock(&detail->hash_lock); 551 } 552 } 553 spin_unlock(&detail->hash_lock); 554 } 555 EXPORT_SYMBOL_GPL(cache_purge); 556 557 558 /* 559 * Deferral and Revisiting of Requests. 560 * 561 * If a cache lookup finds a pending entry, we 562 * need to defer the request and revisit it later. 563 * All deferred requests are stored in a hash table, 564 * indexed by "struct cache_head *". 565 * As it may be wasteful to store a whole request 566 * structure, we allow the request to provide a 567 * deferred form, which must contain a 568 * 'struct cache_deferred_req' 569 * This cache_deferred_req contains a method to allow 570 * it to be revisited when cache info is available 571 */ 572 573 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head)) 574 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) 575 576 #define DFR_MAX 300 /* ??? */ 577 578 static DEFINE_SPINLOCK(cache_defer_lock); 579 static LIST_HEAD(cache_defer_list); 580 static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; 581 static int cache_defer_cnt; 582 583 static void __unhash_deferred_req(struct cache_deferred_req *dreq) 584 { 585 hlist_del_init(&dreq->hash); 586 if (!list_empty(&dreq->recent)) { 587 list_del_init(&dreq->recent); 588 cache_defer_cnt--; 589 } 590 } 591 592 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) 593 { 594 int hash = DFR_HASH(item); 595 596 INIT_LIST_HEAD(&dreq->recent); 597 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); 598 } 599 600 static void setup_deferral(struct cache_deferred_req *dreq, 601 struct cache_head *item, 602 int count_me) 603 { 604 605 dreq->item = item; 606 607 spin_lock(&cache_defer_lock); 608 609 __hash_deferred_req(dreq, item); 610 611 if (count_me) { 612 cache_defer_cnt++; 613 list_add(&dreq->recent, &cache_defer_list); 614 } 615 616 spin_unlock(&cache_defer_lock); 617 618 } 619 620 struct thread_deferred_req { 621 struct cache_deferred_req handle; 622 struct completion completion; 623 }; 624 625 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) 626 { 627 struct thread_deferred_req *dr = 628 container_of(dreq, struct thread_deferred_req, handle); 629 complete(&dr->completion); 630 } 631 632 static void cache_wait_req(struct cache_req *req, struct cache_head *item) 633 { 634 struct thread_deferred_req sleeper; 635 struct cache_deferred_req *dreq = &sleeper.handle; 636 637 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); 638 dreq->revisit = cache_restart_thread; 639 640 setup_deferral(dreq, item, 0); 641 642 if (!test_bit(CACHE_PENDING, &item->flags) || 643 wait_for_completion_interruptible_timeout( 644 &sleeper.completion, req->thread_wait) <= 0) { 645 /* The completion wasn't completed, so we need 646 * to clean up 647 */ 648 spin_lock(&cache_defer_lock); 649 if (!hlist_unhashed(&sleeper.handle.hash)) { 650 __unhash_deferred_req(&sleeper.handle); 651 spin_unlock(&cache_defer_lock); 652 } else { 653 /* cache_revisit_request already removed 654 * this from the hash table, but hasn't 655 * called ->revisit yet. It will very soon 656 * and we need to wait for it. 657 */ 658 spin_unlock(&cache_defer_lock); 659 wait_for_completion(&sleeper.completion); 660 } 661 } 662 } 663 664 static void cache_limit_defers(void) 665 { 666 /* Make sure we haven't exceed the limit of allowed deferred 667 * requests. 668 */ 669 struct cache_deferred_req *discard = NULL; 670 671 if (cache_defer_cnt <= DFR_MAX) 672 return; 673 674 spin_lock(&cache_defer_lock); 675 676 /* Consider removing either the first or the last */ 677 if (cache_defer_cnt > DFR_MAX) { 678 if (prandom_u32() & 1) 679 discard = list_entry(cache_defer_list.next, 680 struct cache_deferred_req, recent); 681 else 682 discard = list_entry(cache_defer_list.prev, 683 struct cache_deferred_req, recent); 684 __unhash_deferred_req(discard); 685 } 686 spin_unlock(&cache_defer_lock); 687 if (discard) 688 discard->revisit(discard, 1); 689 } 690 691 /* Return true if and only if a deferred request is queued. */ 692 static bool cache_defer_req(struct cache_req *req, struct cache_head *item) 693 { 694 struct cache_deferred_req *dreq; 695 696 if (req->thread_wait) { 697 cache_wait_req(req, item); 698 if (!test_bit(CACHE_PENDING, &item->flags)) 699 return false; 700 } 701 dreq = req->defer(req); 702 if (dreq == NULL) 703 return false; 704 setup_deferral(dreq, item, 1); 705 if (!test_bit(CACHE_PENDING, &item->flags)) 706 /* Bit could have been cleared before we managed to 707 * set up the deferral, so need to revisit just in case 708 */ 709 cache_revisit_request(item); 710 711 cache_limit_defers(); 712 return true; 713 } 714 715 static void cache_revisit_request(struct cache_head *item) 716 { 717 struct cache_deferred_req *dreq; 718 struct list_head pending; 719 struct hlist_node *tmp; 720 int hash = DFR_HASH(item); 721 722 INIT_LIST_HEAD(&pending); 723 spin_lock(&cache_defer_lock); 724 725 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) 726 if (dreq->item == item) { 727 __unhash_deferred_req(dreq); 728 list_add(&dreq->recent, &pending); 729 } 730 731 spin_unlock(&cache_defer_lock); 732 733 while (!list_empty(&pending)) { 734 dreq = list_entry(pending.next, struct cache_deferred_req, recent); 735 list_del_init(&dreq->recent); 736 dreq->revisit(dreq, 0); 737 } 738 } 739 740 void cache_clean_deferred(void *owner) 741 { 742 struct cache_deferred_req *dreq, *tmp; 743 struct list_head pending; 744 745 746 INIT_LIST_HEAD(&pending); 747 spin_lock(&cache_defer_lock); 748 749 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 750 if (dreq->owner == owner) { 751 __unhash_deferred_req(dreq); 752 list_add(&dreq->recent, &pending); 753 } 754 } 755 spin_unlock(&cache_defer_lock); 756 757 while (!list_empty(&pending)) { 758 dreq = list_entry(pending.next, struct cache_deferred_req, recent); 759 list_del_init(&dreq->recent); 760 dreq->revisit(dreq, 1); 761 } 762 } 763 764 /* 765 * communicate with user-space 766 * 767 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel. 768 * On read, you get a full request, or block. 769 * On write, an update request is processed. 770 * Poll works if anything to read, and always allows write. 771 * 772 * Implemented by linked list of requests. Each open file has 773 * a ->private that also exists in this list. New requests are added 774 * to the end and may wakeup and preceding readers. 775 * New readers are added to the head. If, on read, an item is found with 776 * CACHE_UPCALLING clear, we free it from the list. 777 * 778 */ 779 780 static DEFINE_SPINLOCK(queue_lock); 781 static DEFINE_MUTEX(queue_io_mutex); 782 783 struct cache_queue { 784 struct list_head list; 785 int reader; /* if 0, then request */ 786 }; 787 struct cache_request { 788 struct cache_queue q; 789 struct cache_head *item; 790 char * buf; 791 int len; 792 int readers; 793 }; 794 struct cache_reader { 795 struct cache_queue q; 796 int offset; /* if non-0, we have a refcnt on next request */ 797 }; 798 799 static int cache_request(struct cache_detail *detail, 800 struct cache_request *crq) 801 { 802 char *bp = crq->buf; 803 int len = PAGE_SIZE; 804 805 detail->cache_request(detail, crq->item, &bp, &len); 806 if (len < 0) 807 return -EAGAIN; 808 return PAGE_SIZE - len; 809 } 810 811 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, 812 loff_t *ppos, struct cache_detail *cd) 813 { 814 struct cache_reader *rp = filp->private_data; 815 struct cache_request *rq; 816 struct inode *inode = file_inode(filp); 817 int err; 818 819 if (count == 0) 820 return 0; 821 822 inode_lock(inode); /* protect against multiple concurrent 823 * readers on this file */ 824 again: 825 spin_lock(&queue_lock); 826 /* need to find next request */ 827 while (rp->q.list.next != &cd->queue && 828 list_entry(rp->q.list.next, struct cache_queue, list) 829 ->reader) { 830 struct list_head *next = rp->q.list.next; 831 list_move(&rp->q.list, next); 832 } 833 if (rp->q.list.next == &cd->queue) { 834 spin_unlock(&queue_lock); 835 inode_unlock(inode); 836 WARN_ON_ONCE(rp->offset); 837 return 0; 838 } 839 rq = container_of(rp->q.list.next, struct cache_request, q.list); 840 WARN_ON_ONCE(rq->q.reader); 841 if (rp->offset == 0) 842 rq->readers++; 843 spin_unlock(&queue_lock); 844 845 if (rq->len == 0) { 846 err = cache_request(cd, rq); 847 if (err < 0) 848 goto out; 849 rq->len = err; 850 } 851 852 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { 853 err = -EAGAIN; 854 spin_lock(&queue_lock); 855 list_move(&rp->q.list, &rq->q.list); 856 spin_unlock(&queue_lock); 857 } else { 858 if (rp->offset + count > rq->len) 859 count = rq->len - rp->offset; 860 err = -EFAULT; 861 if (copy_to_user(buf, rq->buf + rp->offset, count)) 862 goto out; 863 rp->offset += count; 864 if (rp->offset >= rq->len) { 865 rp->offset = 0; 866 spin_lock(&queue_lock); 867 list_move(&rp->q.list, &rq->q.list); 868 spin_unlock(&queue_lock); 869 } 870 err = 0; 871 } 872 out: 873 if (rp->offset == 0) { 874 /* need to release rq */ 875 spin_lock(&queue_lock); 876 rq->readers--; 877 if (rq->readers == 0 && 878 !test_bit(CACHE_PENDING, &rq->item->flags)) { 879 list_del(&rq->q.list); 880 spin_unlock(&queue_lock); 881 cache_put(rq->item, cd); 882 kfree(rq->buf); 883 kfree(rq); 884 } else 885 spin_unlock(&queue_lock); 886 } 887 if (err == -EAGAIN) 888 goto again; 889 inode_unlock(inode); 890 return err ? err : count; 891 } 892 893 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, 894 size_t count, struct cache_detail *cd) 895 { 896 ssize_t ret; 897 898 if (count == 0) 899 return -EINVAL; 900 if (copy_from_user(kaddr, buf, count)) 901 return -EFAULT; 902 kaddr[count] = '\0'; 903 ret = cd->cache_parse(cd, kaddr, count); 904 if (!ret) 905 ret = count; 906 return ret; 907 } 908 909 static ssize_t cache_slow_downcall(const char __user *buf, 910 size_t count, struct cache_detail *cd) 911 { 912 static char write_buf[32768]; /* protected by queue_io_mutex */ 913 ssize_t ret = -EINVAL; 914 915 if (count >= sizeof(write_buf)) 916 goto out; 917 mutex_lock(&queue_io_mutex); 918 ret = cache_do_downcall(write_buf, buf, count, cd); 919 mutex_unlock(&queue_io_mutex); 920 out: 921 return ret; 922 } 923 924 static ssize_t cache_downcall(struct address_space *mapping, 925 const char __user *buf, 926 size_t count, struct cache_detail *cd) 927 { 928 struct page *page; 929 char *kaddr; 930 ssize_t ret = -ENOMEM; 931 932 if (count >= PAGE_SIZE) 933 goto out_slow; 934 935 page = find_or_create_page(mapping, 0, GFP_KERNEL); 936 if (!page) 937 goto out_slow; 938 939 kaddr = kmap(page); 940 ret = cache_do_downcall(kaddr, buf, count, cd); 941 kunmap(page); 942 unlock_page(page); 943 put_page(page); 944 return ret; 945 out_slow: 946 return cache_slow_downcall(buf, count, cd); 947 } 948 949 static ssize_t cache_write(struct file *filp, const char __user *buf, 950 size_t count, loff_t *ppos, 951 struct cache_detail *cd) 952 { 953 struct address_space *mapping = filp->f_mapping; 954 struct inode *inode = file_inode(filp); 955 ssize_t ret = -EINVAL; 956 957 if (!cd->cache_parse) 958 goto out; 959 960 inode_lock(inode); 961 ret = cache_downcall(mapping, buf, count, cd); 962 inode_unlock(inode); 963 out: 964 return ret; 965 } 966 967 static DECLARE_WAIT_QUEUE_HEAD(queue_wait); 968 969 static __poll_t cache_poll(struct file *filp, poll_table *wait, 970 struct cache_detail *cd) 971 { 972 __poll_t mask; 973 struct cache_reader *rp = filp->private_data; 974 struct cache_queue *cq; 975 976 poll_wait(filp, &queue_wait, wait); 977 978 /* alway allow write */ 979 mask = EPOLLOUT | EPOLLWRNORM; 980 981 if (!rp) 982 return mask; 983 984 spin_lock(&queue_lock); 985 986 for (cq= &rp->q; &cq->list != &cd->queue; 987 cq = list_entry(cq->list.next, struct cache_queue, list)) 988 if (!cq->reader) { 989 mask |= EPOLLIN | EPOLLRDNORM; 990 break; 991 } 992 spin_unlock(&queue_lock); 993 return mask; 994 } 995 996 static int cache_ioctl(struct inode *ino, struct file *filp, 997 unsigned int cmd, unsigned long arg, 998 struct cache_detail *cd) 999 { 1000 int len = 0; 1001 struct cache_reader *rp = filp->private_data; 1002 struct cache_queue *cq; 1003 1004 if (cmd != FIONREAD || !rp) 1005 return -EINVAL; 1006 1007 spin_lock(&queue_lock); 1008 1009 /* only find the length remaining in current request, 1010 * or the length of the next request 1011 */ 1012 for (cq= &rp->q; &cq->list != &cd->queue; 1013 cq = list_entry(cq->list.next, struct cache_queue, list)) 1014 if (!cq->reader) { 1015 struct cache_request *cr = 1016 container_of(cq, struct cache_request, q); 1017 len = cr->len - rp->offset; 1018 break; 1019 } 1020 spin_unlock(&queue_lock); 1021 1022 return put_user(len, (int __user *)arg); 1023 } 1024 1025 static int cache_open(struct inode *inode, struct file *filp, 1026 struct cache_detail *cd) 1027 { 1028 struct cache_reader *rp = NULL; 1029 1030 if (!cd || !try_module_get(cd->owner)) 1031 return -EACCES; 1032 nonseekable_open(inode, filp); 1033 if (filp->f_mode & FMODE_READ) { 1034 rp = kmalloc(sizeof(*rp), GFP_KERNEL); 1035 if (!rp) { 1036 module_put(cd->owner); 1037 return -ENOMEM; 1038 } 1039 rp->offset = 0; 1040 rp->q.reader = 1; 1041 1042 spin_lock(&queue_lock); 1043 list_add(&rp->q.list, &cd->queue); 1044 spin_unlock(&queue_lock); 1045 } 1046 if (filp->f_mode & FMODE_WRITE) 1047 atomic_inc(&cd->writers); 1048 filp->private_data = rp; 1049 return 0; 1050 } 1051 1052 static int cache_release(struct inode *inode, struct file *filp, 1053 struct cache_detail *cd) 1054 { 1055 struct cache_reader *rp = filp->private_data; 1056 1057 if (rp) { 1058 spin_lock(&queue_lock); 1059 if (rp->offset) { 1060 struct cache_queue *cq; 1061 for (cq= &rp->q; &cq->list != &cd->queue; 1062 cq = list_entry(cq->list.next, struct cache_queue, list)) 1063 if (!cq->reader) { 1064 container_of(cq, struct cache_request, q) 1065 ->readers--; 1066 break; 1067 } 1068 rp->offset = 0; 1069 } 1070 list_del(&rp->q.list); 1071 spin_unlock(&queue_lock); 1072 1073 filp->private_data = NULL; 1074 kfree(rp); 1075 1076 } 1077 if (filp->f_mode & FMODE_WRITE) { 1078 atomic_dec(&cd->writers); 1079 cd->last_close = seconds_since_boot(); 1080 } 1081 module_put(cd->owner); 1082 return 0; 1083 } 1084 1085 1086 1087 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) 1088 { 1089 struct cache_queue *cq, *tmp; 1090 struct cache_request *cr; 1091 struct list_head dequeued; 1092 1093 INIT_LIST_HEAD(&dequeued); 1094 spin_lock(&queue_lock); 1095 list_for_each_entry_safe(cq, tmp, &detail->queue, list) 1096 if (!cq->reader) { 1097 cr = container_of(cq, struct cache_request, q); 1098 if (cr->item != ch) 1099 continue; 1100 if (test_bit(CACHE_PENDING, &ch->flags)) 1101 /* Lost a race and it is pending again */ 1102 break; 1103 if (cr->readers != 0) 1104 continue; 1105 list_move(&cr->q.list, &dequeued); 1106 } 1107 spin_unlock(&queue_lock); 1108 while (!list_empty(&dequeued)) { 1109 cr = list_entry(dequeued.next, struct cache_request, q.list); 1110 list_del(&cr->q.list); 1111 cache_put(cr->item, detail); 1112 kfree(cr->buf); 1113 kfree(cr); 1114 } 1115 } 1116 1117 /* 1118 * Support routines for text-based upcalls. 1119 * Fields are separated by spaces. 1120 * Fields are either mangled to quote space tab newline slosh with slosh 1121 * or a hexified with a leading \x 1122 * Record is terminated with newline. 1123 * 1124 */ 1125 1126 void qword_add(char **bpp, int *lp, char *str) 1127 { 1128 char *bp = *bpp; 1129 int len = *lp; 1130 int ret; 1131 1132 if (len < 0) return; 1133 1134 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t"); 1135 if (ret >= len) { 1136 bp += len; 1137 len = -1; 1138 } else { 1139 bp += ret; 1140 len -= ret; 1141 *bp++ = ' '; 1142 len--; 1143 } 1144 *bpp = bp; 1145 *lp = len; 1146 } 1147 EXPORT_SYMBOL_GPL(qword_add); 1148 1149 void qword_addhex(char **bpp, int *lp, char *buf, int blen) 1150 { 1151 char *bp = *bpp; 1152 int len = *lp; 1153 1154 if (len < 0) return; 1155 1156 if (len > 2) { 1157 *bp++ = '\\'; 1158 *bp++ = 'x'; 1159 len -= 2; 1160 while (blen && len >= 2) { 1161 bp = hex_byte_pack(bp, *buf++); 1162 len -= 2; 1163 blen--; 1164 } 1165 } 1166 if (blen || len<1) len = -1; 1167 else { 1168 *bp++ = ' '; 1169 len--; 1170 } 1171 *bpp = bp; 1172 *lp = len; 1173 } 1174 EXPORT_SYMBOL_GPL(qword_addhex); 1175 1176 static void warn_no_listener(struct cache_detail *detail) 1177 { 1178 if (detail->last_warn != detail->last_close) { 1179 detail->last_warn = detail->last_close; 1180 if (detail->warn_no_listener) 1181 detail->warn_no_listener(detail, detail->last_close != 0); 1182 } 1183 } 1184 1185 static bool cache_listeners_exist(struct cache_detail *detail) 1186 { 1187 if (atomic_read(&detail->writers)) 1188 return true; 1189 if (detail->last_close == 0) 1190 /* This cache was never opened */ 1191 return false; 1192 if (detail->last_close < seconds_since_boot() - 30) 1193 /* 1194 * We allow for the possibility that someone might 1195 * restart a userspace daemon without restarting the 1196 * server; but after 30 seconds, we give up. 1197 */ 1198 return false; 1199 return true; 1200 } 1201 1202 /* 1203 * register an upcall request to user-space and queue it up for read() by the 1204 * upcall daemon. 1205 * 1206 * Each request is at most one page long. 1207 */ 1208 static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) 1209 { 1210 char *buf; 1211 struct cache_request *crq; 1212 int ret = 0; 1213 1214 if (test_bit(CACHE_CLEANED, &h->flags)) 1215 /* Too late to make an upcall */ 1216 return -EAGAIN; 1217 1218 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 1219 if (!buf) 1220 return -EAGAIN; 1221 1222 crq = kmalloc(sizeof (*crq), GFP_KERNEL); 1223 if (!crq) { 1224 kfree(buf); 1225 return -EAGAIN; 1226 } 1227 1228 crq->q.reader = 0; 1229 crq->buf = buf; 1230 crq->len = 0; 1231 crq->readers = 0; 1232 spin_lock(&queue_lock); 1233 if (test_bit(CACHE_PENDING, &h->flags)) { 1234 crq->item = cache_get(h); 1235 list_add_tail(&crq->q.list, &detail->queue); 1236 trace_cache_entry_upcall(detail, h); 1237 } else 1238 /* Lost a race, no longer PENDING, so don't enqueue */ 1239 ret = -EAGAIN; 1240 spin_unlock(&queue_lock); 1241 wake_up(&queue_wait); 1242 if (ret == -EAGAIN) { 1243 kfree(buf); 1244 kfree(crq); 1245 } 1246 return ret; 1247 } 1248 1249 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) 1250 { 1251 if (test_and_set_bit(CACHE_PENDING, &h->flags)) 1252 return 0; 1253 return cache_pipe_upcall(detail, h); 1254 } 1255 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); 1256 1257 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail, 1258 struct cache_head *h) 1259 { 1260 if (!cache_listeners_exist(detail)) { 1261 warn_no_listener(detail); 1262 trace_cache_entry_no_listener(detail, h); 1263 return -EINVAL; 1264 } 1265 return sunrpc_cache_pipe_upcall(detail, h); 1266 } 1267 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout); 1268 1269 /* 1270 * parse a message from user-space and pass it 1271 * to an appropriate cache 1272 * Messages are, like requests, separated into fields by 1273 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal 1274 * 1275 * Message is 1276 * reply cachename expiry key ... content.... 1277 * 1278 * key and content are both parsed by cache 1279 */ 1280 1281 int qword_get(char **bpp, char *dest, int bufsize) 1282 { 1283 /* return bytes copied, or -1 on error */ 1284 char *bp = *bpp; 1285 int len = 0; 1286 1287 while (*bp == ' ') bp++; 1288 1289 if (bp[0] == '\\' && bp[1] == 'x') { 1290 /* HEX STRING */ 1291 bp += 2; 1292 while (len < bufsize - 1) { 1293 int h, l; 1294 1295 h = hex_to_bin(bp[0]); 1296 if (h < 0) 1297 break; 1298 1299 l = hex_to_bin(bp[1]); 1300 if (l < 0) 1301 break; 1302 1303 *dest++ = (h << 4) | l; 1304 bp += 2; 1305 len++; 1306 } 1307 } else { 1308 /* text with \nnn octal quoting */ 1309 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { 1310 if (*bp == '\\' && 1311 isodigit(bp[1]) && (bp[1] <= '3') && 1312 isodigit(bp[2]) && 1313 isodigit(bp[3])) { 1314 int byte = (*++bp -'0'); 1315 bp++; 1316 byte = (byte << 3) | (*bp++ - '0'); 1317 byte = (byte << 3) | (*bp++ - '0'); 1318 *dest++ = byte; 1319 len++; 1320 } else { 1321 *dest++ = *bp++; 1322 len++; 1323 } 1324 } 1325 } 1326 1327 if (*bp != ' ' && *bp != '\n' && *bp != '\0') 1328 return -1; 1329 while (*bp == ' ') bp++; 1330 *bpp = bp; 1331 *dest = '\0'; 1332 return len; 1333 } 1334 EXPORT_SYMBOL_GPL(qword_get); 1335 1336 1337 /* 1338 * support /proc/net/rpc/$CACHENAME/content 1339 * as a seqfile. 1340 * We call ->cache_show passing NULL for the item to 1341 * get a header, then pass each real item in the cache 1342 */ 1343 1344 static void *__cache_seq_start(struct seq_file *m, loff_t *pos) 1345 { 1346 loff_t n = *pos; 1347 unsigned int hash, entry; 1348 struct cache_head *ch; 1349 struct cache_detail *cd = m->private; 1350 1351 if (!n--) 1352 return SEQ_START_TOKEN; 1353 hash = n >> 32; 1354 entry = n & ((1LL<<32) - 1); 1355 1356 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list) 1357 if (!entry--) 1358 return ch; 1359 n &= ~((1LL<<32) - 1); 1360 do { 1361 hash++; 1362 n += 1LL<<32; 1363 } while(hash < cd->hash_size && 1364 hlist_empty(&cd->hash_table[hash])); 1365 if (hash >= cd->hash_size) 1366 return NULL; 1367 *pos = n+1; 1368 return hlist_entry_safe(rcu_dereference_raw( 1369 hlist_first_rcu(&cd->hash_table[hash])), 1370 struct cache_head, cache_list); 1371 } 1372 1373 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos) 1374 { 1375 struct cache_head *ch = p; 1376 int hash = (*pos >> 32); 1377 struct cache_detail *cd = m->private; 1378 1379 if (p == SEQ_START_TOKEN) 1380 hash = 0; 1381 else if (ch->cache_list.next == NULL) { 1382 hash++; 1383 *pos += 1LL<<32; 1384 } else { 1385 ++*pos; 1386 return hlist_entry_safe(rcu_dereference_raw( 1387 hlist_next_rcu(&ch->cache_list)), 1388 struct cache_head, cache_list); 1389 } 1390 *pos &= ~((1LL<<32) - 1); 1391 while (hash < cd->hash_size && 1392 hlist_empty(&cd->hash_table[hash])) { 1393 hash++; 1394 *pos += 1LL<<32; 1395 } 1396 if (hash >= cd->hash_size) 1397 return NULL; 1398 ++*pos; 1399 return hlist_entry_safe(rcu_dereference_raw( 1400 hlist_first_rcu(&cd->hash_table[hash])), 1401 struct cache_head, cache_list); 1402 } 1403 1404 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos) 1405 __acquires(RCU) 1406 { 1407 rcu_read_lock(); 1408 return __cache_seq_start(m, pos); 1409 } 1410 EXPORT_SYMBOL_GPL(cache_seq_start_rcu); 1411 1412 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos) 1413 { 1414 return cache_seq_next(file, p, pos); 1415 } 1416 EXPORT_SYMBOL_GPL(cache_seq_next_rcu); 1417 1418 void cache_seq_stop_rcu(struct seq_file *m, void *p) 1419 __releases(RCU) 1420 { 1421 rcu_read_unlock(); 1422 } 1423 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu); 1424 1425 static int c_show(struct seq_file *m, void *p) 1426 { 1427 struct cache_head *cp = p; 1428 struct cache_detail *cd = m->private; 1429 1430 if (p == SEQ_START_TOKEN) 1431 return cd->cache_show(m, cd, NULL); 1432 1433 ifdebug(CACHE) 1434 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n", 1435 convert_to_wallclock(cp->expiry_time), 1436 kref_read(&cp->ref), cp->flags); 1437 cache_get(cp); 1438 if (cache_check(cd, cp, NULL)) 1439 /* cache_check does a cache_put on failure */ 1440 seq_puts(m, "# "); 1441 else { 1442 if (cache_is_expired(cd, cp)) 1443 seq_puts(m, "# "); 1444 cache_put(cp, cd); 1445 } 1446 1447 return cd->cache_show(m, cd, cp); 1448 } 1449 1450 static const struct seq_operations cache_content_op = { 1451 .start = cache_seq_start_rcu, 1452 .next = cache_seq_next_rcu, 1453 .stop = cache_seq_stop_rcu, 1454 .show = c_show, 1455 }; 1456 1457 static int content_open(struct inode *inode, struct file *file, 1458 struct cache_detail *cd) 1459 { 1460 struct seq_file *seq; 1461 int err; 1462 1463 if (!cd || !try_module_get(cd->owner)) 1464 return -EACCES; 1465 1466 err = seq_open(file, &cache_content_op); 1467 if (err) { 1468 module_put(cd->owner); 1469 return err; 1470 } 1471 1472 seq = file->private_data; 1473 seq->private = cd; 1474 return 0; 1475 } 1476 1477 static int content_release(struct inode *inode, struct file *file, 1478 struct cache_detail *cd) 1479 { 1480 int ret = seq_release(inode, file); 1481 module_put(cd->owner); 1482 return ret; 1483 } 1484 1485 static int open_flush(struct inode *inode, struct file *file, 1486 struct cache_detail *cd) 1487 { 1488 if (!cd || !try_module_get(cd->owner)) 1489 return -EACCES; 1490 return nonseekable_open(inode, file); 1491 } 1492 1493 static int release_flush(struct inode *inode, struct file *file, 1494 struct cache_detail *cd) 1495 { 1496 module_put(cd->owner); 1497 return 0; 1498 } 1499 1500 static ssize_t read_flush(struct file *file, char __user *buf, 1501 size_t count, loff_t *ppos, 1502 struct cache_detail *cd) 1503 { 1504 char tbuf[22]; 1505 size_t len; 1506 1507 len = snprintf(tbuf, sizeof(tbuf), "%llu\n", 1508 convert_to_wallclock(cd->flush_time)); 1509 return simple_read_from_buffer(buf, count, ppos, tbuf, len); 1510 } 1511 1512 static ssize_t write_flush(struct file *file, const char __user *buf, 1513 size_t count, loff_t *ppos, 1514 struct cache_detail *cd) 1515 { 1516 char tbuf[20]; 1517 char *ep; 1518 time64_t now; 1519 1520 if (*ppos || count > sizeof(tbuf)-1) 1521 return -EINVAL; 1522 if (copy_from_user(tbuf, buf, count)) 1523 return -EFAULT; 1524 tbuf[count] = 0; 1525 simple_strtoul(tbuf, &ep, 0); 1526 if (*ep && *ep != '\n') 1527 return -EINVAL; 1528 /* Note that while we check that 'buf' holds a valid number, 1529 * we always ignore the value and just flush everything. 1530 * Making use of the number leads to races. 1531 */ 1532 1533 now = seconds_since_boot(); 1534 /* Always flush everything, so behave like cache_purge() 1535 * Do this by advancing flush_time to the current time, 1536 * or by one second if it has already reached the current time. 1537 * Newly added cache entries will always have ->last_refresh greater 1538 * that ->flush_time, so they don't get flushed prematurely. 1539 */ 1540 1541 if (cd->flush_time >= now) 1542 now = cd->flush_time + 1; 1543 1544 cd->flush_time = now; 1545 cd->nextcheck = now; 1546 cache_flush(); 1547 1548 if (cd->flush) 1549 cd->flush(); 1550 1551 *ppos += count; 1552 return count; 1553 } 1554 1555 static ssize_t cache_read_procfs(struct file *filp, char __user *buf, 1556 size_t count, loff_t *ppos) 1557 { 1558 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1559 1560 return cache_read(filp, buf, count, ppos, cd); 1561 } 1562 1563 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, 1564 size_t count, loff_t *ppos) 1565 { 1566 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1567 1568 return cache_write(filp, buf, count, ppos, cd); 1569 } 1570 1571 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait) 1572 { 1573 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1574 1575 return cache_poll(filp, wait, cd); 1576 } 1577 1578 static long cache_ioctl_procfs(struct file *filp, 1579 unsigned int cmd, unsigned long arg) 1580 { 1581 struct inode *inode = file_inode(filp); 1582 struct cache_detail *cd = PDE_DATA(inode); 1583 1584 return cache_ioctl(inode, filp, cmd, arg, cd); 1585 } 1586 1587 static int cache_open_procfs(struct inode *inode, struct file *filp) 1588 { 1589 struct cache_detail *cd = PDE_DATA(inode); 1590 1591 return cache_open(inode, filp, cd); 1592 } 1593 1594 static int cache_release_procfs(struct inode *inode, struct file *filp) 1595 { 1596 struct cache_detail *cd = PDE_DATA(inode); 1597 1598 return cache_release(inode, filp, cd); 1599 } 1600 1601 static const struct proc_ops cache_channel_proc_ops = { 1602 .proc_lseek = no_llseek, 1603 .proc_read = cache_read_procfs, 1604 .proc_write = cache_write_procfs, 1605 .proc_poll = cache_poll_procfs, 1606 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */ 1607 .proc_open = cache_open_procfs, 1608 .proc_release = cache_release_procfs, 1609 }; 1610 1611 static int content_open_procfs(struct inode *inode, struct file *filp) 1612 { 1613 struct cache_detail *cd = PDE_DATA(inode); 1614 1615 return content_open(inode, filp, cd); 1616 } 1617 1618 static int content_release_procfs(struct inode *inode, struct file *filp) 1619 { 1620 struct cache_detail *cd = PDE_DATA(inode); 1621 1622 return content_release(inode, filp, cd); 1623 } 1624 1625 static const struct proc_ops content_proc_ops = { 1626 .proc_open = content_open_procfs, 1627 .proc_read = seq_read, 1628 .proc_lseek = seq_lseek, 1629 .proc_release = content_release_procfs, 1630 }; 1631 1632 static int open_flush_procfs(struct inode *inode, struct file *filp) 1633 { 1634 struct cache_detail *cd = PDE_DATA(inode); 1635 1636 return open_flush(inode, filp, cd); 1637 } 1638 1639 static int release_flush_procfs(struct inode *inode, struct file *filp) 1640 { 1641 struct cache_detail *cd = PDE_DATA(inode); 1642 1643 return release_flush(inode, filp, cd); 1644 } 1645 1646 static ssize_t read_flush_procfs(struct file *filp, char __user *buf, 1647 size_t count, loff_t *ppos) 1648 { 1649 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1650 1651 return read_flush(filp, buf, count, ppos, cd); 1652 } 1653 1654 static ssize_t write_flush_procfs(struct file *filp, 1655 const char __user *buf, 1656 size_t count, loff_t *ppos) 1657 { 1658 struct cache_detail *cd = PDE_DATA(file_inode(filp)); 1659 1660 return write_flush(filp, buf, count, ppos, cd); 1661 } 1662 1663 static const struct proc_ops cache_flush_proc_ops = { 1664 .proc_open = open_flush_procfs, 1665 .proc_read = read_flush_procfs, 1666 .proc_write = write_flush_procfs, 1667 .proc_release = release_flush_procfs, 1668 .proc_lseek = no_llseek, 1669 }; 1670 1671 static void remove_cache_proc_entries(struct cache_detail *cd) 1672 { 1673 if (cd->procfs) { 1674 proc_remove(cd->procfs); 1675 cd->procfs = NULL; 1676 } 1677 } 1678 1679 #ifdef CONFIG_PROC_FS 1680 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) 1681 { 1682 struct proc_dir_entry *p; 1683 struct sunrpc_net *sn; 1684 1685 sn = net_generic(net, sunrpc_net_id); 1686 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); 1687 if (cd->procfs == NULL) 1688 goto out_nomem; 1689 1690 p = proc_create_data("flush", S_IFREG | 0600, 1691 cd->procfs, &cache_flush_proc_ops, cd); 1692 if (p == NULL) 1693 goto out_nomem; 1694 1695 if (cd->cache_request || cd->cache_parse) { 1696 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs, 1697 &cache_channel_proc_ops, cd); 1698 if (p == NULL) 1699 goto out_nomem; 1700 } 1701 if (cd->cache_show) { 1702 p = proc_create_data("content", S_IFREG | 0400, cd->procfs, 1703 &content_proc_ops, cd); 1704 if (p == NULL) 1705 goto out_nomem; 1706 } 1707 return 0; 1708 out_nomem: 1709 remove_cache_proc_entries(cd); 1710 return -ENOMEM; 1711 } 1712 #else /* CONFIG_PROC_FS */ 1713 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) 1714 { 1715 return 0; 1716 } 1717 #endif 1718 1719 void __init cache_initialize(void) 1720 { 1721 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean); 1722 } 1723 1724 int cache_register_net(struct cache_detail *cd, struct net *net) 1725 { 1726 int ret; 1727 1728 sunrpc_init_cache_detail(cd); 1729 ret = create_cache_proc_entries(cd, net); 1730 if (ret) 1731 sunrpc_destroy_cache_detail(cd); 1732 return ret; 1733 } 1734 EXPORT_SYMBOL_GPL(cache_register_net); 1735 1736 void cache_unregister_net(struct cache_detail *cd, struct net *net) 1737 { 1738 remove_cache_proc_entries(cd); 1739 sunrpc_destroy_cache_detail(cd); 1740 } 1741 EXPORT_SYMBOL_GPL(cache_unregister_net); 1742 1743 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net) 1744 { 1745 struct cache_detail *cd; 1746 int i; 1747 1748 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); 1749 if (cd == NULL) 1750 return ERR_PTR(-ENOMEM); 1751 1752 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head), 1753 GFP_KERNEL); 1754 if (cd->hash_table == NULL) { 1755 kfree(cd); 1756 return ERR_PTR(-ENOMEM); 1757 } 1758 1759 for (i = 0; i < cd->hash_size; i++) 1760 INIT_HLIST_HEAD(&cd->hash_table[i]); 1761 cd->net = net; 1762 return cd; 1763 } 1764 EXPORT_SYMBOL_GPL(cache_create_net); 1765 1766 void cache_destroy_net(struct cache_detail *cd, struct net *net) 1767 { 1768 kfree(cd->hash_table); 1769 kfree(cd); 1770 } 1771 EXPORT_SYMBOL_GPL(cache_destroy_net); 1772 1773 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, 1774 size_t count, loff_t *ppos) 1775 { 1776 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1777 1778 return cache_read(filp, buf, count, ppos, cd); 1779 } 1780 1781 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, 1782 size_t count, loff_t *ppos) 1783 { 1784 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1785 1786 return cache_write(filp, buf, count, ppos, cd); 1787 } 1788 1789 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait) 1790 { 1791 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1792 1793 return cache_poll(filp, wait, cd); 1794 } 1795 1796 static long cache_ioctl_pipefs(struct file *filp, 1797 unsigned int cmd, unsigned long arg) 1798 { 1799 struct inode *inode = file_inode(filp); 1800 struct cache_detail *cd = RPC_I(inode)->private; 1801 1802 return cache_ioctl(inode, filp, cmd, arg, cd); 1803 } 1804 1805 static int cache_open_pipefs(struct inode *inode, struct file *filp) 1806 { 1807 struct cache_detail *cd = RPC_I(inode)->private; 1808 1809 return cache_open(inode, filp, cd); 1810 } 1811 1812 static int cache_release_pipefs(struct inode *inode, struct file *filp) 1813 { 1814 struct cache_detail *cd = RPC_I(inode)->private; 1815 1816 return cache_release(inode, filp, cd); 1817 } 1818 1819 const struct file_operations cache_file_operations_pipefs = { 1820 .owner = THIS_MODULE, 1821 .llseek = no_llseek, 1822 .read = cache_read_pipefs, 1823 .write = cache_write_pipefs, 1824 .poll = cache_poll_pipefs, 1825 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ 1826 .open = cache_open_pipefs, 1827 .release = cache_release_pipefs, 1828 }; 1829 1830 static int content_open_pipefs(struct inode *inode, struct file *filp) 1831 { 1832 struct cache_detail *cd = RPC_I(inode)->private; 1833 1834 return content_open(inode, filp, cd); 1835 } 1836 1837 static int content_release_pipefs(struct inode *inode, struct file *filp) 1838 { 1839 struct cache_detail *cd = RPC_I(inode)->private; 1840 1841 return content_release(inode, filp, cd); 1842 } 1843 1844 const struct file_operations content_file_operations_pipefs = { 1845 .open = content_open_pipefs, 1846 .read = seq_read, 1847 .llseek = seq_lseek, 1848 .release = content_release_pipefs, 1849 }; 1850 1851 static int open_flush_pipefs(struct inode *inode, struct file *filp) 1852 { 1853 struct cache_detail *cd = RPC_I(inode)->private; 1854 1855 return open_flush(inode, filp, cd); 1856 } 1857 1858 static int release_flush_pipefs(struct inode *inode, struct file *filp) 1859 { 1860 struct cache_detail *cd = RPC_I(inode)->private; 1861 1862 return release_flush(inode, filp, cd); 1863 } 1864 1865 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, 1866 size_t count, loff_t *ppos) 1867 { 1868 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1869 1870 return read_flush(filp, buf, count, ppos, cd); 1871 } 1872 1873 static ssize_t write_flush_pipefs(struct file *filp, 1874 const char __user *buf, 1875 size_t count, loff_t *ppos) 1876 { 1877 struct cache_detail *cd = RPC_I(file_inode(filp))->private; 1878 1879 return write_flush(filp, buf, count, ppos, cd); 1880 } 1881 1882 const struct file_operations cache_flush_operations_pipefs = { 1883 .open = open_flush_pipefs, 1884 .read = read_flush_pipefs, 1885 .write = write_flush_pipefs, 1886 .release = release_flush_pipefs, 1887 .llseek = no_llseek, 1888 }; 1889 1890 int sunrpc_cache_register_pipefs(struct dentry *parent, 1891 const char *name, umode_t umode, 1892 struct cache_detail *cd) 1893 { 1894 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); 1895 if (IS_ERR(dir)) 1896 return PTR_ERR(dir); 1897 cd->pipefs = dir; 1898 return 0; 1899 } 1900 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); 1901 1902 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) 1903 { 1904 if (cd->pipefs) { 1905 rpc_remove_cache_dir(cd->pipefs); 1906 cd->pipefs = NULL; 1907 } 1908 } 1909 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); 1910 1911 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) 1912 { 1913 spin_lock(&cd->hash_lock); 1914 if (!hlist_unhashed(&h->cache_list)){ 1915 sunrpc_begin_cache_remove_entry(h, cd); 1916 spin_unlock(&cd->hash_lock); 1917 sunrpc_end_cache_remove_entry(h, cd); 1918 } else 1919 spin_unlock(&cd->hash_lock); 1920 } 1921 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash); 1922