1 /* 2 * Request reply cache. This is currently a global cache, but this may 3 * change in the future and be a per-client cache. 4 * 5 * This code is heavily inspired by the 44BSD implementation, although 6 * it does things a bit differently. 7 * 8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/sunrpc/addr.h> 13 #include <linux/highmem.h> 14 #include <linux/log2.h> 15 #include <linux/hash.h> 16 #include <net/checksum.h> 17 18 #include "nfsd.h" 19 #include "cache.h" 20 21 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE 22 23 /* 24 * We use this value to determine the number of hash buckets from the max 25 * cache size, the idea being that when the cache is at its maximum number 26 * of entries, then this should be the average number of entries per bucket. 27 */ 28 #define TARGET_BUCKET_SIZE 64 29 30 static struct hlist_head * cache_hash; 31 static struct list_head lru_head; 32 static struct kmem_cache *drc_slab; 33 34 /* max number of entries allowed in the cache */ 35 static unsigned int max_drc_entries; 36 37 /* number of significant bits in the hash value */ 38 static unsigned int maskbits; 39 40 /* 41 * Stats and other tracking of on the duplicate reply cache. All of these and 42 * the "rc" fields in nfsdstats are protected by the cache_lock 43 */ 44 45 /* total number of entries */ 46 static unsigned int num_drc_entries; 47 48 /* cache misses due only to checksum comparison failures */ 49 static unsigned int payload_misses; 50 51 /* amount of memory (in bytes) currently consumed by the DRC */ 52 static unsigned int drc_mem_usage; 53 54 /* longest hash chain seen */ 55 static unsigned int longest_chain; 56 57 /* size of cache when we saw the longest hash chain */ 58 static unsigned int longest_chain_cachesize; 59 60 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 61 static void cache_cleaner_func(struct work_struct *unused); 62 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, 63 struct shrink_control *sc); 64 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, 65 struct shrink_control *sc); 66 67 static struct shrinker nfsd_reply_cache_shrinker = { 68 .scan_objects = nfsd_reply_cache_scan, 69 .count_objects = nfsd_reply_cache_count, 70 .seeks = 1, 71 }; 72 73 /* 74 * locking for the reply cache: 75 * A cache entry is "single use" if c_state == RC_INPROG 76 * Otherwise, it when accessing _prev or _next, the lock must be held. 77 */ 78 static DEFINE_SPINLOCK(cache_lock); 79 static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); 80 81 /* 82 * Put a cap on the size of the DRC based on the amount of available 83 * low memory in the machine. 84 * 85 * 64MB: 8192 86 * 128MB: 11585 87 * 256MB: 16384 88 * 512MB: 23170 89 * 1GB: 32768 90 * 2GB: 46340 91 * 4GB: 65536 92 * 8GB: 92681 93 * 16GB: 131072 94 * 95 * ...with a hard cap of 256k entries. In the worst case, each entry will be 96 * ~1k, so the above numbers should give a rough max of the amount of memory 97 * used in k. 98 */ 99 static unsigned int 100 nfsd_cache_size_limit(void) 101 { 102 unsigned int limit; 103 unsigned long low_pages = totalram_pages - totalhigh_pages; 104 105 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); 106 return min_t(unsigned int, limit, 256*1024); 107 } 108 109 /* 110 * Compute the number of hash buckets we need. Divide the max cachesize by 111 * the "target" max bucket size, and round up to next power of two. 112 */ 113 static unsigned int 114 nfsd_hashsize(unsigned int limit) 115 { 116 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); 117 } 118 119 static struct svc_cacherep * 120 nfsd_reply_cache_alloc(void) 121 { 122 struct svc_cacherep *rp; 123 124 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); 125 if (rp) { 126 rp->c_state = RC_UNUSED; 127 rp->c_type = RC_NOCACHE; 128 INIT_LIST_HEAD(&rp->c_lru); 129 INIT_HLIST_NODE(&rp->c_hash); 130 } 131 return rp; 132 } 133 134 static void 135 nfsd_reply_cache_unhash(struct svc_cacherep *rp) 136 { 137 hlist_del_init(&rp->c_hash); 138 list_del_init(&rp->c_lru); 139 } 140 141 static void 142 nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 143 { 144 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { 145 drc_mem_usage -= rp->c_replvec.iov_len; 146 kfree(rp->c_replvec.iov_base); 147 } 148 if (!hlist_unhashed(&rp->c_hash)) 149 hlist_del(&rp->c_hash); 150 list_del(&rp->c_lru); 151 --num_drc_entries; 152 drc_mem_usage -= sizeof(*rp); 153 kmem_cache_free(drc_slab, rp); 154 } 155 156 static void 157 nfsd_reply_cache_free(struct svc_cacherep *rp) 158 { 159 spin_lock(&cache_lock); 160 nfsd_reply_cache_free_locked(rp); 161 spin_unlock(&cache_lock); 162 } 163 164 int nfsd_reply_cache_init(void) 165 { 166 unsigned int hashsize; 167 168 INIT_LIST_HEAD(&lru_head); 169 max_drc_entries = nfsd_cache_size_limit(); 170 num_drc_entries = 0; 171 hashsize = nfsd_hashsize(max_drc_entries); 172 maskbits = ilog2(hashsize); 173 174 register_shrinker(&nfsd_reply_cache_shrinker); 175 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 176 0, 0, NULL); 177 if (!drc_slab) 178 goto out_nomem; 179 180 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL); 181 if (!cache_hash) 182 goto out_nomem; 183 184 return 0; 185 out_nomem: 186 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 187 nfsd_reply_cache_shutdown(); 188 return -ENOMEM; 189 } 190 191 void nfsd_reply_cache_shutdown(void) 192 { 193 struct svc_cacherep *rp; 194 195 unregister_shrinker(&nfsd_reply_cache_shrinker); 196 cancel_delayed_work_sync(&cache_cleaner); 197 198 while (!list_empty(&lru_head)) { 199 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); 200 nfsd_reply_cache_free_locked(rp); 201 } 202 203 kfree (cache_hash); 204 cache_hash = NULL; 205 206 if (drc_slab) { 207 kmem_cache_destroy(drc_slab); 208 drc_slab = NULL; 209 } 210 } 211 212 /* 213 * Move cache entry to end of LRU list, and queue the cleaner to run if it's 214 * not already scheduled. 215 */ 216 static void 217 lru_put_end(struct svc_cacherep *rp) 218 { 219 rp->c_timestamp = jiffies; 220 list_move_tail(&rp->c_lru, &lru_head); 221 schedule_delayed_work(&cache_cleaner, RC_EXPIRE); 222 } 223 224 /* 225 * Move a cache entry from one hash list to another 226 */ 227 static void 228 hash_refile(struct svc_cacherep *rp) 229 { 230 hlist_del_init(&rp->c_hash); 231 hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits)); 232 } 233 234 static inline bool 235 nfsd_cache_entry_expired(struct svc_cacherep *rp) 236 { 237 return rp->c_state != RC_INPROG && 238 time_after(jiffies, rp->c_timestamp + RC_EXPIRE); 239 } 240 241 /* 242 * Walk the LRU list and prune off entries that are older than RC_EXPIRE. 243 * Also prune the oldest ones when the total exceeds the max number of entries. 244 */ 245 static long 246 prune_cache_entries(void) 247 { 248 struct svc_cacherep *rp, *tmp; 249 long freed = 0; 250 251 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { 252 if (!nfsd_cache_entry_expired(rp) && 253 num_drc_entries <= max_drc_entries) 254 break; 255 nfsd_reply_cache_free_locked(rp); 256 freed++; 257 } 258 259 /* 260 * Conditionally rearm the job. If we cleaned out the list, then 261 * cancel any pending run (since there won't be any work to do). 262 * Otherwise, we rearm the job or modify the existing one to run in 263 * RC_EXPIRE since we just ran the pruner. 264 */ 265 if (list_empty(&lru_head)) 266 cancel_delayed_work(&cache_cleaner); 267 else 268 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); 269 return freed; 270 } 271 272 static void 273 cache_cleaner_func(struct work_struct *unused) 274 { 275 spin_lock(&cache_lock); 276 prune_cache_entries(); 277 spin_unlock(&cache_lock); 278 } 279 280 static unsigned long 281 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) 282 { 283 unsigned long num; 284 285 spin_lock(&cache_lock); 286 num = num_drc_entries; 287 spin_unlock(&cache_lock); 288 289 return num; 290 } 291 292 static unsigned long 293 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) 294 { 295 unsigned long freed; 296 297 spin_lock(&cache_lock); 298 freed = prune_cache_entries(); 299 spin_unlock(&cache_lock); 300 return freed; 301 } 302 /* 303 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 304 */ 305 static __wsum 306 nfsd_cache_csum(struct svc_rqst *rqstp) 307 { 308 int idx; 309 unsigned int base; 310 __wsum csum; 311 struct xdr_buf *buf = &rqstp->rq_arg; 312 const unsigned char *p = buf->head[0].iov_base; 313 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, 314 RC_CSUMLEN); 315 size_t len = min(buf->head[0].iov_len, csum_len); 316 317 /* rq_arg.head first */ 318 csum = csum_partial(p, len, 0); 319 csum_len -= len; 320 321 /* Continue into page array */ 322 idx = buf->page_base / PAGE_SIZE; 323 base = buf->page_base & ~PAGE_MASK; 324 while (csum_len) { 325 p = page_address(buf->pages[idx]) + base; 326 len = min_t(size_t, PAGE_SIZE - base, csum_len); 327 csum = csum_partial(p, len, csum); 328 csum_len -= len; 329 base = 0; 330 ++idx; 331 } 332 return csum; 333 } 334 335 static bool 336 nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) 337 { 338 /* Check RPC header info first */ 339 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc || 340 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers || 341 rqstp->rq_arg.len != rp->c_len || 342 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || 343 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) 344 return false; 345 346 /* compare checksum of NFS data */ 347 if (csum != rp->c_csum) { 348 ++payload_misses; 349 return false; 350 } 351 352 return true; 353 } 354 355 /* 356 * Search the request hash for an entry that matches the given rqstp. 357 * Must be called with cache_lock held. Returns the found entry or 358 * NULL on failure. 359 */ 360 static struct svc_cacherep * 361 nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) 362 { 363 struct svc_cacherep *rp, *ret = NULL; 364 struct hlist_head *rh; 365 unsigned int entries = 0; 366 367 rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)]; 368 hlist_for_each_entry(rp, rh, c_hash) { 369 ++entries; 370 if (nfsd_cache_match(rqstp, csum, rp)) { 371 ret = rp; 372 break; 373 } 374 } 375 376 /* tally hash chain length stats */ 377 if (entries > longest_chain) { 378 longest_chain = entries; 379 longest_chain_cachesize = num_drc_entries; 380 } else if (entries == longest_chain) { 381 /* prefer to keep the smallest cachesize possible here */ 382 longest_chain_cachesize = min(longest_chain_cachesize, 383 num_drc_entries); 384 } 385 386 return ret; 387 } 388 389 /* 390 * Try to find an entry matching the current call in the cache. When none 391 * is found, we try to grab the oldest expired entry off the LRU list. If 392 * a suitable one isn't there, then drop the cache_lock and allocate a 393 * new one, then search again in case one got inserted while this thread 394 * didn't hold the lock. 395 */ 396 int 397 nfsd_cache_lookup(struct svc_rqst *rqstp) 398 { 399 struct svc_cacherep *rp, *found; 400 __be32 xid = rqstp->rq_xid; 401 u32 proto = rqstp->rq_prot, 402 vers = rqstp->rq_vers, 403 proc = rqstp->rq_proc; 404 __wsum csum; 405 unsigned long age; 406 int type = rqstp->rq_cachetype; 407 int rtn = RC_DOIT; 408 409 rqstp->rq_cacherep = NULL; 410 if (type == RC_NOCACHE) { 411 nfsdstats.rcnocache++; 412 return rtn; 413 } 414 415 csum = nfsd_cache_csum(rqstp); 416 417 /* 418 * Since the common case is a cache miss followed by an insert, 419 * preallocate an entry. First, try to reuse the first entry on the LRU 420 * if it works, then go ahead and prune the LRU list. 421 */ 422 spin_lock(&cache_lock); 423 if (!list_empty(&lru_head)) { 424 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 425 if (nfsd_cache_entry_expired(rp) || 426 num_drc_entries >= max_drc_entries) { 427 nfsd_reply_cache_unhash(rp); 428 prune_cache_entries(); 429 goto search_cache; 430 } 431 } 432 433 /* No expired ones available, allocate a new one. */ 434 spin_unlock(&cache_lock); 435 rp = nfsd_reply_cache_alloc(); 436 spin_lock(&cache_lock); 437 if (likely(rp)) { 438 ++num_drc_entries; 439 drc_mem_usage += sizeof(*rp); 440 } 441 442 search_cache: 443 found = nfsd_cache_search(rqstp, csum); 444 if (found) { 445 if (likely(rp)) 446 nfsd_reply_cache_free_locked(rp); 447 rp = found; 448 goto found_entry; 449 } 450 451 if (!rp) { 452 dprintk("nfsd: unable to allocate DRC entry!\n"); 453 goto out; 454 } 455 456 /* 457 * We're keeping the one we just allocated. Are we now over the 458 * limit? Prune one off the tip of the LRU in trade for the one we 459 * just allocated if so. 460 */ 461 if (num_drc_entries >= max_drc_entries) 462 nfsd_reply_cache_free_locked(list_first_entry(&lru_head, 463 struct svc_cacherep, c_lru)); 464 465 nfsdstats.rcmisses++; 466 rqstp->rq_cacherep = rp; 467 rp->c_state = RC_INPROG; 468 rp->c_xid = xid; 469 rp->c_proc = proc; 470 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); 471 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); 472 rp->c_prot = proto; 473 rp->c_vers = vers; 474 rp->c_len = rqstp->rq_arg.len; 475 rp->c_csum = csum; 476 477 hash_refile(rp); 478 lru_put_end(rp); 479 480 /* release any buffer */ 481 if (rp->c_type == RC_REPLBUFF) { 482 drc_mem_usage -= rp->c_replvec.iov_len; 483 kfree(rp->c_replvec.iov_base); 484 rp->c_replvec.iov_base = NULL; 485 } 486 rp->c_type = RC_NOCACHE; 487 out: 488 spin_unlock(&cache_lock); 489 return rtn; 490 491 found_entry: 492 nfsdstats.rchits++; 493 /* We found a matching entry which is either in progress or done. */ 494 age = jiffies - rp->c_timestamp; 495 lru_put_end(rp); 496 497 rtn = RC_DROPIT; 498 /* Request being processed or excessive rexmits */ 499 if (rp->c_state == RC_INPROG || age < RC_DELAY) 500 goto out; 501 502 /* From the hall of fame of impractical attacks: 503 * Is this a user who tries to snoop on the cache? */ 504 rtn = RC_DOIT; 505 if (!rqstp->rq_secure && rp->c_secure) 506 goto out; 507 508 /* Compose RPC reply header */ 509 switch (rp->c_type) { 510 case RC_NOCACHE: 511 break; 512 case RC_REPLSTAT: 513 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); 514 rtn = RC_REPLY; 515 break; 516 case RC_REPLBUFF: 517 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) 518 goto out; /* should not happen */ 519 rtn = RC_REPLY; 520 break; 521 default: 522 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); 523 nfsd_reply_cache_free_locked(rp); 524 } 525 526 goto out; 527 } 528 529 /* 530 * Update a cache entry. This is called from nfsd_dispatch when 531 * the procedure has been executed and the complete reply is in 532 * rqstp->rq_res. 533 * 534 * We're copying around data here rather than swapping buffers because 535 * the toplevel loop requires max-sized buffers, which would be a waste 536 * of memory for a cache with a max reply size of 100 bytes (diropokres). 537 * 538 * If we should start to use different types of cache entries tailored 539 * specifically for attrstat and fh's, we may save even more space. 540 * 541 * Also note that a cachetype of RC_NOCACHE can legally be passed when 542 * nfsd failed to encode a reply that otherwise would have been cached. 543 * In this case, nfsd_cache_update is called with statp == NULL. 544 */ 545 void 546 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) 547 { 548 struct svc_cacherep *rp = rqstp->rq_cacherep; 549 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 550 int len; 551 size_t bufsize = 0; 552 553 if (!rp) 554 return; 555 556 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 557 len >>= 2; 558 559 /* Don't cache excessive amounts of data and XDR failures */ 560 if (!statp || len > (256 >> 2)) { 561 nfsd_reply_cache_free(rp); 562 return; 563 } 564 565 switch (cachetype) { 566 case RC_REPLSTAT: 567 if (len != 1) 568 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); 569 rp->c_replstat = *statp; 570 break; 571 case RC_REPLBUFF: 572 cachv = &rp->c_replvec; 573 bufsize = len << 2; 574 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); 575 if (!cachv->iov_base) { 576 nfsd_reply_cache_free(rp); 577 return; 578 } 579 cachv->iov_len = bufsize; 580 memcpy(cachv->iov_base, statp, bufsize); 581 break; 582 case RC_NOCACHE: 583 nfsd_reply_cache_free(rp); 584 return; 585 } 586 spin_lock(&cache_lock); 587 drc_mem_usage += bufsize; 588 lru_put_end(rp); 589 rp->c_secure = rqstp->rq_secure; 590 rp->c_type = cachetype; 591 rp->c_state = RC_DONE; 592 spin_unlock(&cache_lock); 593 return; 594 } 595 596 /* 597 * Copy cached reply to current reply buffer. Should always fit. 598 * FIXME as reply is in a page, we should just attach the page, and 599 * keep a refcount.... 600 */ 601 static int 602 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) 603 { 604 struct kvec *vec = &rqstp->rq_res.head[0]; 605 606 if (vec->iov_len + data->iov_len > PAGE_SIZE) { 607 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", 608 data->iov_len); 609 return 0; 610 } 611 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); 612 vec->iov_len += data->iov_len; 613 return 1; 614 } 615 616 /* 617 * Note that fields may be added, removed or reordered in the future. Programs 618 * scraping this file for info should test the labels to ensure they're 619 * getting the correct field. 620 */ 621 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 622 { 623 spin_lock(&cache_lock); 624 seq_printf(m, "max entries: %u\n", max_drc_entries); 625 seq_printf(m, "num entries: %u\n", num_drc_entries); 626 seq_printf(m, "hash buckets: %u\n", 1 << maskbits); 627 seq_printf(m, "mem usage: %u\n", drc_mem_usage); 628 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); 629 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); 630 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); 631 seq_printf(m, "payload misses: %u\n", payload_misses); 632 seq_printf(m, "longest chain len: %u\n", longest_chain); 633 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); 634 spin_unlock(&cache_lock); 635 return 0; 636 } 637 638 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) 639 { 640 return single_open(file, nfsd_reply_cache_stats_show, NULL); 641 } 642