1 /* 2 * Request reply cache. This is currently a global cache, but this may 3 * change in the future and be a per-client cache. 4 * 5 * This code is heavily inspired by the 44BSD implementation, although 6 * it does things a bit differently. 7 * 8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/sunrpc/addr.h> 13 #include <linux/highmem.h> 14 #include <linux/log2.h> 15 #include <linux/hash.h> 16 #include <net/checksum.h> 17 18 #include "nfsd.h" 19 #include "cache.h" 20 21 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE 22 23 /* 24 * We use this value to determine the number of hash buckets from the max 25 * cache size, the idea being that when the cache is at its maximum number 26 * of entries, then this should be the average number of entries per bucket. 27 */ 28 #define TARGET_BUCKET_SIZE 64 29 30 static struct hlist_head * cache_hash; 31 static struct list_head lru_head; 32 static struct kmem_cache *drc_slab; 33 34 /* max number of entries allowed in the cache */ 35 static unsigned int max_drc_entries; 36 37 /* number of significant bits in the hash value */ 38 static unsigned int maskbits; 39 40 /* 41 * Stats and other tracking of on the duplicate reply cache. All of these and 42 * the "rc" fields in nfsdstats are protected by the cache_lock 43 */ 44 45 /* total number of entries */ 46 static unsigned int num_drc_entries; 47 48 /* cache misses due only to checksum comparison failures */ 49 static unsigned int payload_misses; 50 51 /* amount of memory (in bytes) currently consumed by the DRC */ 52 static unsigned int drc_mem_usage; 53 54 /* longest hash chain seen */ 55 static unsigned int longest_chain; 56 57 /* size of cache when we saw the longest hash chain */ 58 static unsigned int longest_chain_cachesize; 59 60 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 61 static void cache_cleaner_func(struct work_struct *unused); 62 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, 63 struct shrink_control *sc); 64 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, 65 struct shrink_control *sc); 66 67 static struct shrinker nfsd_reply_cache_shrinker = { 68 .scan_objects = nfsd_reply_cache_scan, 69 .count_objects = nfsd_reply_cache_count, 70 .seeks = 1, 71 }; 72 73 /* 74 * locking for the reply cache: 75 * A cache entry is "single use" if c_state == RC_INPROG 76 * Otherwise, it when accessing _prev or _next, the lock must be held. 77 */ 78 static DEFINE_SPINLOCK(cache_lock); 79 static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); 80 81 /* 82 * Put a cap on the size of the DRC based on the amount of available 83 * low memory in the machine. 84 * 85 * 64MB: 8192 86 * 128MB: 11585 87 * 256MB: 16384 88 * 512MB: 23170 89 * 1GB: 32768 90 * 2GB: 46340 91 * 4GB: 65536 92 * 8GB: 92681 93 * 16GB: 131072 94 * 95 * ...with a hard cap of 256k entries. In the worst case, each entry will be 96 * ~1k, so the above numbers should give a rough max of the amount of memory 97 * used in k. 98 */ 99 static unsigned int 100 nfsd_cache_size_limit(void) 101 { 102 unsigned int limit; 103 unsigned long low_pages = totalram_pages - totalhigh_pages; 104 105 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); 106 return min_t(unsigned int, limit, 256*1024); 107 } 108 109 /* 110 * Compute the number of hash buckets we need. Divide the max cachesize by 111 * the "target" max bucket size, and round up to next power of two. 112 */ 113 static unsigned int 114 nfsd_hashsize(unsigned int limit) 115 { 116 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); 117 } 118 119 static struct svc_cacherep * 120 nfsd_reply_cache_alloc(void) 121 { 122 struct svc_cacherep *rp; 123 124 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); 125 if (rp) { 126 rp->c_state = RC_UNUSED; 127 rp->c_type = RC_NOCACHE; 128 INIT_LIST_HEAD(&rp->c_lru); 129 INIT_HLIST_NODE(&rp->c_hash); 130 } 131 return rp; 132 } 133 134 static void 135 nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 136 { 137 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { 138 drc_mem_usage -= rp->c_replvec.iov_len; 139 kfree(rp->c_replvec.iov_base); 140 } 141 if (!hlist_unhashed(&rp->c_hash)) 142 hlist_del(&rp->c_hash); 143 list_del(&rp->c_lru); 144 --num_drc_entries; 145 drc_mem_usage -= sizeof(*rp); 146 kmem_cache_free(drc_slab, rp); 147 } 148 149 static void 150 nfsd_reply_cache_free(struct svc_cacherep *rp) 151 { 152 spin_lock(&cache_lock); 153 nfsd_reply_cache_free_locked(rp); 154 spin_unlock(&cache_lock); 155 } 156 157 int nfsd_reply_cache_init(void) 158 { 159 unsigned int hashsize; 160 161 INIT_LIST_HEAD(&lru_head); 162 max_drc_entries = nfsd_cache_size_limit(); 163 num_drc_entries = 0; 164 hashsize = nfsd_hashsize(max_drc_entries); 165 maskbits = ilog2(hashsize); 166 167 register_shrinker(&nfsd_reply_cache_shrinker); 168 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 169 0, 0, NULL); 170 if (!drc_slab) 171 goto out_nomem; 172 173 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL); 174 if (!cache_hash) 175 goto out_nomem; 176 177 return 0; 178 out_nomem: 179 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 180 nfsd_reply_cache_shutdown(); 181 return -ENOMEM; 182 } 183 184 void nfsd_reply_cache_shutdown(void) 185 { 186 struct svc_cacherep *rp; 187 188 unregister_shrinker(&nfsd_reply_cache_shrinker); 189 cancel_delayed_work_sync(&cache_cleaner); 190 191 while (!list_empty(&lru_head)) { 192 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); 193 nfsd_reply_cache_free_locked(rp); 194 } 195 196 kfree (cache_hash); 197 cache_hash = NULL; 198 199 if (drc_slab) { 200 kmem_cache_destroy(drc_slab); 201 drc_slab = NULL; 202 } 203 } 204 205 /* 206 * Move cache entry to end of LRU list, and queue the cleaner to run if it's 207 * not already scheduled. 208 */ 209 static void 210 lru_put_end(struct svc_cacherep *rp) 211 { 212 rp->c_timestamp = jiffies; 213 list_move_tail(&rp->c_lru, &lru_head); 214 schedule_delayed_work(&cache_cleaner, RC_EXPIRE); 215 } 216 217 /* 218 * Move a cache entry from one hash list to another 219 */ 220 static void 221 hash_refile(struct svc_cacherep *rp) 222 { 223 hlist_del_init(&rp->c_hash); 224 hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits)); 225 } 226 227 static inline bool 228 nfsd_cache_entry_expired(struct svc_cacherep *rp) 229 { 230 return rp->c_state != RC_INPROG && 231 time_after(jiffies, rp->c_timestamp + RC_EXPIRE); 232 } 233 234 /* 235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE. 236 * Also prune the oldest ones when the total exceeds the max number of entries. 237 */ 238 static long 239 prune_cache_entries(void) 240 { 241 struct svc_cacherep *rp, *tmp; 242 long freed = 0; 243 244 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { 245 if (!nfsd_cache_entry_expired(rp) && 246 num_drc_entries <= max_drc_entries) 247 break; 248 nfsd_reply_cache_free_locked(rp); 249 freed++; 250 } 251 252 /* 253 * Conditionally rearm the job. If we cleaned out the list, then 254 * cancel any pending run (since there won't be any work to do). 255 * Otherwise, we rearm the job or modify the existing one to run in 256 * RC_EXPIRE since we just ran the pruner. 257 */ 258 if (list_empty(&lru_head)) 259 cancel_delayed_work(&cache_cleaner); 260 else 261 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); 262 return freed; 263 } 264 265 static void 266 cache_cleaner_func(struct work_struct *unused) 267 { 268 spin_lock(&cache_lock); 269 prune_cache_entries(); 270 spin_unlock(&cache_lock); 271 } 272 273 static unsigned long 274 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) 275 { 276 unsigned long num; 277 278 spin_lock(&cache_lock); 279 num = num_drc_entries; 280 spin_unlock(&cache_lock); 281 282 return num; 283 } 284 285 static unsigned long 286 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) 287 { 288 unsigned long freed; 289 290 spin_lock(&cache_lock); 291 freed = prune_cache_entries(); 292 spin_unlock(&cache_lock); 293 return freed; 294 } 295 /* 296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 297 */ 298 static __wsum 299 nfsd_cache_csum(struct svc_rqst *rqstp) 300 { 301 int idx; 302 unsigned int base; 303 __wsum csum; 304 struct xdr_buf *buf = &rqstp->rq_arg; 305 const unsigned char *p = buf->head[0].iov_base; 306 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, 307 RC_CSUMLEN); 308 size_t len = min(buf->head[0].iov_len, csum_len); 309 310 /* rq_arg.head first */ 311 csum = csum_partial(p, len, 0); 312 csum_len -= len; 313 314 /* Continue into page array */ 315 idx = buf->page_base / PAGE_SIZE; 316 base = buf->page_base & ~PAGE_MASK; 317 while (csum_len) { 318 p = page_address(buf->pages[idx]) + base; 319 len = min_t(size_t, PAGE_SIZE - base, csum_len); 320 csum = csum_partial(p, len, csum); 321 csum_len -= len; 322 base = 0; 323 ++idx; 324 } 325 return csum; 326 } 327 328 static bool 329 nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) 330 { 331 /* Check RPC header info first */ 332 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc || 333 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers || 334 rqstp->rq_arg.len != rp->c_len || 335 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || 336 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) 337 return false; 338 339 /* compare checksum of NFS data */ 340 if (csum != rp->c_csum) { 341 ++payload_misses; 342 return false; 343 } 344 345 return true; 346 } 347 348 /* 349 * Search the request hash for an entry that matches the given rqstp. 350 * Must be called with cache_lock held. Returns the found entry or 351 * NULL on failure. 352 */ 353 static struct svc_cacherep * 354 nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) 355 { 356 struct svc_cacherep *rp, *ret = NULL; 357 struct hlist_head *rh; 358 unsigned int entries = 0; 359 360 rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)]; 361 hlist_for_each_entry(rp, rh, c_hash) { 362 ++entries; 363 if (nfsd_cache_match(rqstp, csum, rp)) { 364 ret = rp; 365 break; 366 } 367 } 368 369 /* tally hash chain length stats */ 370 if (entries > longest_chain) { 371 longest_chain = entries; 372 longest_chain_cachesize = num_drc_entries; 373 } else if (entries == longest_chain) { 374 /* prefer to keep the smallest cachesize possible here */ 375 longest_chain_cachesize = min(longest_chain_cachesize, 376 num_drc_entries); 377 } 378 379 return ret; 380 } 381 382 /* 383 * Try to find an entry matching the current call in the cache. When none 384 * is found, we try to grab the oldest expired entry off the LRU list. If 385 * a suitable one isn't there, then drop the cache_lock and allocate a 386 * new one, then search again in case one got inserted while this thread 387 * didn't hold the lock. 388 */ 389 int 390 nfsd_cache_lookup(struct svc_rqst *rqstp) 391 { 392 struct svc_cacherep *rp, *found; 393 __be32 xid = rqstp->rq_xid; 394 u32 proto = rqstp->rq_prot, 395 vers = rqstp->rq_vers, 396 proc = rqstp->rq_proc; 397 __wsum csum; 398 unsigned long age; 399 int type = rqstp->rq_cachetype; 400 int rtn = RC_DOIT; 401 402 rqstp->rq_cacherep = NULL; 403 if (type == RC_NOCACHE) { 404 nfsdstats.rcnocache++; 405 return rtn; 406 } 407 408 csum = nfsd_cache_csum(rqstp); 409 410 /* 411 * Since the common case is a cache miss followed by an insert, 412 * preallocate an entry. First, try to reuse the first entry on the LRU 413 * if it works, then go ahead and prune the LRU list. 414 */ 415 spin_lock(&cache_lock); 416 if (!list_empty(&lru_head)) { 417 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); 418 if (nfsd_cache_entry_expired(rp) || 419 num_drc_entries >= max_drc_entries) { 420 lru_put_end(rp); 421 prune_cache_entries(); 422 goto search_cache; 423 } 424 } 425 426 /* No expired ones available, allocate a new one. */ 427 spin_unlock(&cache_lock); 428 rp = nfsd_reply_cache_alloc(); 429 spin_lock(&cache_lock); 430 if (likely(rp)) { 431 ++num_drc_entries; 432 drc_mem_usage += sizeof(*rp); 433 } 434 435 search_cache: 436 found = nfsd_cache_search(rqstp, csum); 437 if (found) { 438 if (likely(rp)) 439 nfsd_reply_cache_free_locked(rp); 440 rp = found; 441 goto found_entry; 442 } 443 444 if (!rp) { 445 dprintk("nfsd: unable to allocate DRC entry!\n"); 446 goto out; 447 } 448 449 /* 450 * We're keeping the one we just allocated. Are we now over the 451 * limit? Prune one off the tip of the LRU in trade for the one we 452 * just allocated if so. 453 */ 454 if (num_drc_entries >= max_drc_entries) 455 nfsd_reply_cache_free_locked(list_first_entry(&lru_head, 456 struct svc_cacherep, c_lru)); 457 458 nfsdstats.rcmisses++; 459 rqstp->rq_cacherep = rp; 460 rp->c_state = RC_INPROG; 461 rp->c_xid = xid; 462 rp->c_proc = proc; 463 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); 464 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); 465 rp->c_prot = proto; 466 rp->c_vers = vers; 467 rp->c_len = rqstp->rq_arg.len; 468 rp->c_csum = csum; 469 470 hash_refile(rp); 471 lru_put_end(rp); 472 473 /* release any buffer */ 474 if (rp->c_type == RC_REPLBUFF) { 475 drc_mem_usage -= rp->c_replvec.iov_len; 476 kfree(rp->c_replvec.iov_base); 477 rp->c_replvec.iov_base = NULL; 478 } 479 rp->c_type = RC_NOCACHE; 480 out: 481 spin_unlock(&cache_lock); 482 return rtn; 483 484 found_entry: 485 nfsdstats.rchits++; 486 /* We found a matching entry which is either in progress or done. */ 487 age = jiffies - rp->c_timestamp; 488 lru_put_end(rp); 489 490 rtn = RC_DROPIT; 491 /* Request being processed or excessive rexmits */ 492 if (rp->c_state == RC_INPROG || age < RC_DELAY) 493 goto out; 494 495 /* From the hall of fame of impractical attacks: 496 * Is this a user who tries to snoop on the cache? */ 497 rtn = RC_DOIT; 498 if (!rqstp->rq_secure && rp->c_secure) 499 goto out; 500 501 /* Compose RPC reply header */ 502 switch (rp->c_type) { 503 case RC_NOCACHE: 504 break; 505 case RC_REPLSTAT: 506 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); 507 rtn = RC_REPLY; 508 break; 509 case RC_REPLBUFF: 510 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) 511 goto out; /* should not happen */ 512 rtn = RC_REPLY; 513 break; 514 default: 515 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); 516 nfsd_reply_cache_free_locked(rp); 517 } 518 519 goto out; 520 } 521 522 /* 523 * Update a cache entry. This is called from nfsd_dispatch when 524 * the procedure has been executed and the complete reply is in 525 * rqstp->rq_res. 526 * 527 * We're copying around data here rather than swapping buffers because 528 * the toplevel loop requires max-sized buffers, which would be a waste 529 * of memory for a cache with a max reply size of 100 bytes (diropokres). 530 * 531 * If we should start to use different types of cache entries tailored 532 * specifically for attrstat and fh's, we may save even more space. 533 * 534 * Also note that a cachetype of RC_NOCACHE can legally be passed when 535 * nfsd failed to encode a reply that otherwise would have been cached. 536 * In this case, nfsd_cache_update is called with statp == NULL. 537 */ 538 void 539 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) 540 { 541 struct svc_cacherep *rp = rqstp->rq_cacherep; 542 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 543 int len; 544 size_t bufsize = 0; 545 546 if (!rp) 547 return; 548 549 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 550 len >>= 2; 551 552 /* Don't cache excessive amounts of data and XDR failures */ 553 if (!statp || len > (256 >> 2)) { 554 nfsd_reply_cache_free(rp); 555 return; 556 } 557 558 switch (cachetype) { 559 case RC_REPLSTAT: 560 if (len != 1) 561 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); 562 rp->c_replstat = *statp; 563 break; 564 case RC_REPLBUFF: 565 cachv = &rp->c_replvec; 566 bufsize = len << 2; 567 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); 568 if (!cachv->iov_base) { 569 nfsd_reply_cache_free(rp); 570 return; 571 } 572 cachv->iov_len = bufsize; 573 memcpy(cachv->iov_base, statp, bufsize); 574 break; 575 case RC_NOCACHE: 576 nfsd_reply_cache_free(rp); 577 return; 578 } 579 spin_lock(&cache_lock); 580 drc_mem_usage += bufsize; 581 lru_put_end(rp); 582 rp->c_secure = rqstp->rq_secure; 583 rp->c_type = cachetype; 584 rp->c_state = RC_DONE; 585 spin_unlock(&cache_lock); 586 return; 587 } 588 589 /* 590 * Copy cached reply to current reply buffer. Should always fit. 591 * FIXME as reply is in a page, we should just attach the page, and 592 * keep a refcount.... 593 */ 594 static int 595 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) 596 { 597 struct kvec *vec = &rqstp->rq_res.head[0]; 598 599 if (vec->iov_len + data->iov_len > PAGE_SIZE) { 600 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", 601 data->iov_len); 602 return 0; 603 } 604 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); 605 vec->iov_len += data->iov_len; 606 return 1; 607 } 608 609 /* 610 * Note that fields may be added, removed or reordered in the future. Programs 611 * scraping this file for info should test the labels to ensure they're 612 * getting the correct field. 613 */ 614 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 615 { 616 spin_lock(&cache_lock); 617 seq_printf(m, "max entries: %u\n", max_drc_entries); 618 seq_printf(m, "num entries: %u\n", num_drc_entries); 619 seq_printf(m, "hash buckets: %u\n", 1 << maskbits); 620 seq_printf(m, "mem usage: %u\n", drc_mem_usage); 621 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); 622 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); 623 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); 624 seq_printf(m, "payload misses: %u\n", payload_misses); 625 seq_printf(m, "longest chain len: %u\n", longest_chain); 626 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); 627 spin_unlock(&cache_lock); 628 return 0; 629 } 630 631 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) 632 { 633 return single_open(file, nfsd_reply_cache_stats_show, NULL); 634 } 635