1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Request reply cache. This is currently a global cache, but this may 4 * change in the future and be a per-client cache. 5 * 6 * This code is heavily inspired by the 44BSD implementation, although 7 * it does things a bit differently. 8 * 9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/vmalloc.h> 14 #include <linux/sunrpc/addr.h> 15 #include <linux/highmem.h> 16 #include <linux/log2.h> 17 #include <linux/hash.h> 18 #include <net/checksum.h> 19 20 #include "nfsd.h" 21 #include "cache.h" 22 23 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE 24 25 /* 26 * We use this value to determine the number of hash buckets from the max 27 * cache size, the idea being that when the cache is at its maximum number 28 * of entries, then this should be the average number of entries per bucket. 29 */ 30 #define TARGET_BUCKET_SIZE 64 31 32 struct nfsd_drc_bucket { 33 struct list_head lru_head; 34 spinlock_t cache_lock; 35 }; 36 37 static struct nfsd_drc_bucket *drc_hashtbl; 38 static struct kmem_cache *drc_slab; 39 40 /* max number of entries allowed in the cache */ 41 static unsigned int max_drc_entries; 42 43 /* number of significant bits in the hash value */ 44 static unsigned int maskbits; 45 static unsigned int drc_hashsize; 46 47 /* 48 * Stats and other tracking of on the duplicate reply cache. All of these and 49 * the "rc" fields in nfsdstats are protected by the cache_lock 50 */ 51 52 /* total number of entries */ 53 static atomic_t num_drc_entries; 54 55 /* cache misses due only to checksum comparison failures */ 56 static unsigned int payload_misses; 57 58 /* amount of memory (in bytes) currently consumed by the DRC */ 59 static unsigned int drc_mem_usage; 60 61 /* longest hash chain seen */ 62 static unsigned int longest_chain; 63 64 /* size of cache when we saw the longest hash chain */ 65 static unsigned int longest_chain_cachesize; 66 67 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 68 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, 69 struct shrink_control *sc); 70 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, 71 struct shrink_control *sc); 72 73 static struct shrinker nfsd_reply_cache_shrinker = { 74 .scan_objects = nfsd_reply_cache_scan, 75 .count_objects = nfsd_reply_cache_count, 76 .seeks = 1, 77 }; 78 79 /* 80 * Put a cap on the size of the DRC based on the amount of available 81 * low memory in the machine. 82 * 83 * 64MB: 8192 84 * 128MB: 11585 85 * 256MB: 16384 86 * 512MB: 23170 87 * 1GB: 32768 88 * 2GB: 46340 89 * 4GB: 65536 90 * 8GB: 92681 91 * 16GB: 131072 92 * 93 * ...with a hard cap of 256k entries. In the worst case, each entry will be 94 * ~1k, so the above numbers should give a rough max of the amount of memory 95 * used in k. 96 */ 97 static unsigned int 98 nfsd_cache_size_limit(void) 99 { 100 unsigned int limit; 101 unsigned long low_pages = totalram_pages - totalhigh_pages; 102 103 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); 104 return min_t(unsigned int, limit, 256*1024); 105 } 106 107 /* 108 * Compute the number of hash buckets we need. Divide the max cachesize by 109 * the "target" max bucket size, and round up to next power of two. 110 */ 111 static unsigned int 112 nfsd_hashsize(unsigned int limit) 113 { 114 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); 115 } 116 117 static u32 118 nfsd_cache_hash(__be32 xid) 119 { 120 return hash_32(be32_to_cpu(xid), maskbits); 121 } 122 123 static struct svc_cacherep * 124 nfsd_reply_cache_alloc(void) 125 { 126 struct svc_cacherep *rp; 127 128 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); 129 if (rp) { 130 rp->c_state = RC_UNUSED; 131 rp->c_type = RC_NOCACHE; 132 INIT_LIST_HEAD(&rp->c_lru); 133 } 134 return rp; 135 } 136 137 static void 138 nfsd_reply_cache_free_locked(struct svc_cacherep *rp) 139 { 140 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { 141 drc_mem_usage -= rp->c_replvec.iov_len; 142 kfree(rp->c_replvec.iov_base); 143 } 144 list_del(&rp->c_lru); 145 atomic_dec(&num_drc_entries); 146 drc_mem_usage -= sizeof(*rp); 147 kmem_cache_free(drc_slab, rp); 148 } 149 150 static void 151 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) 152 { 153 spin_lock(&b->cache_lock); 154 nfsd_reply_cache_free_locked(rp); 155 spin_unlock(&b->cache_lock); 156 } 157 158 int nfsd_reply_cache_init(void) 159 { 160 unsigned int hashsize; 161 unsigned int i; 162 int status = 0; 163 164 max_drc_entries = nfsd_cache_size_limit(); 165 atomic_set(&num_drc_entries, 0); 166 hashsize = nfsd_hashsize(max_drc_entries); 167 maskbits = ilog2(hashsize); 168 169 status = register_shrinker(&nfsd_reply_cache_shrinker); 170 if (status) 171 return status; 172 173 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 174 0, 0, NULL); 175 if (!drc_slab) 176 goto out_nomem; 177 178 drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL); 179 if (!drc_hashtbl) { 180 drc_hashtbl = vzalloc(hashsize * sizeof(*drc_hashtbl)); 181 if (!drc_hashtbl) 182 goto out_nomem; 183 } 184 185 for (i = 0; i < hashsize; i++) { 186 INIT_LIST_HEAD(&drc_hashtbl[i].lru_head); 187 spin_lock_init(&drc_hashtbl[i].cache_lock); 188 } 189 drc_hashsize = hashsize; 190 191 return 0; 192 out_nomem: 193 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 194 nfsd_reply_cache_shutdown(); 195 return -ENOMEM; 196 } 197 198 void nfsd_reply_cache_shutdown(void) 199 { 200 struct svc_cacherep *rp; 201 unsigned int i; 202 203 unregister_shrinker(&nfsd_reply_cache_shrinker); 204 205 for (i = 0; i < drc_hashsize; i++) { 206 struct list_head *head = &drc_hashtbl[i].lru_head; 207 while (!list_empty(head)) { 208 rp = list_first_entry(head, struct svc_cacherep, c_lru); 209 nfsd_reply_cache_free_locked(rp); 210 } 211 } 212 213 kvfree(drc_hashtbl); 214 drc_hashtbl = NULL; 215 drc_hashsize = 0; 216 217 kmem_cache_destroy(drc_slab); 218 drc_slab = NULL; 219 } 220 221 /* 222 * Move cache entry to end of LRU list, and queue the cleaner to run if it's 223 * not already scheduled. 224 */ 225 static void 226 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) 227 { 228 rp->c_timestamp = jiffies; 229 list_move_tail(&rp->c_lru, &b->lru_head); 230 } 231 232 static long 233 prune_bucket(struct nfsd_drc_bucket *b) 234 { 235 struct svc_cacherep *rp, *tmp; 236 long freed = 0; 237 238 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { 239 /* 240 * Don't free entries attached to calls that are still 241 * in-progress, but do keep scanning the list. 242 */ 243 if (rp->c_state == RC_INPROG) 244 continue; 245 if (atomic_read(&num_drc_entries) <= max_drc_entries && 246 time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) 247 break; 248 nfsd_reply_cache_free_locked(rp); 249 freed++; 250 } 251 return freed; 252 } 253 254 /* 255 * Walk the LRU list and prune off entries that are older than RC_EXPIRE. 256 * Also prune the oldest ones when the total exceeds the max number of entries. 257 */ 258 static long 259 prune_cache_entries(void) 260 { 261 unsigned int i; 262 long freed = 0; 263 264 for (i = 0; i < drc_hashsize; i++) { 265 struct nfsd_drc_bucket *b = &drc_hashtbl[i]; 266 267 if (list_empty(&b->lru_head)) 268 continue; 269 spin_lock(&b->cache_lock); 270 freed += prune_bucket(b); 271 spin_unlock(&b->cache_lock); 272 } 273 return freed; 274 } 275 276 static unsigned long 277 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) 278 { 279 return atomic_read(&num_drc_entries); 280 } 281 282 static unsigned long 283 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) 284 { 285 return prune_cache_entries(); 286 } 287 /* 288 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 289 */ 290 static __wsum 291 nfsd_cache_csum(struct svc_rqst *rqstp) 292 { 293 int idx; 294 unsigned int base; 295 __wsum csum; 296 struct xdr_buf *buf = &rqstp->rq_arg; 297 const unsigned char *p = buf->head[0].iov_base; 298 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, 299 RC_CSUMLEN); 300 size_t len = min(buf->head[0].iov_len, csum_len); 301 302 /* rq_arg.head first */ 303 csum = csum_partial(p, len, 0); 304 csum_len -= len; 305 306 /* Continue into page array */ 307 idx = buf->page_base / PAGE_SIZE; 308 base = buf->page_base & ~PAGE_MASK; 309 while (csum_len) { 310 p = page_address(buf->pages[idx]) + base; 311 len = min_t(size_t, PAGE_SIZE - base, csum_len); 312 csum = csum_partial(p, len, csum); 313 csum_len -= len; 314 base = 0; 315 ++idx; 316 } 317 return csum; 318 } 319 320 static bool 321 nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) 322 { 323 /* Check RPC XID first */ 324 if (rqstp->rq_xid != rp->c_xid) 325 return false; 326 /* compare checksum of NFS data */ 327 if (csum != rp->c_csum) { 328 ++payload_misses; 329 return false; 330 } 331 332 /* Other discriminators */ 333 if (rqstp->rq_proc != rp->c_proc || 334 rqstp->rq_prot != rp->c_prot || 335 rqstp->rq_vers != rp->c_vers || 336 rqstp->rq_arg.len != rp->c_len || 337 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || 338 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) 339 return false; 340 341 return true; 342 } 343 344 /* 345 * Search the request hash for an entry that matches the given rqstp. 346 * Must be called with cache_lock held. Returns the found entry or 347 * NULL on failure. 348 */ 349 static struct svc_cacherep * 350 nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp, 351 __wsum csum) 352 { 353 struct svc_cacherep *rp, *ret = NULL; 354 struct list_head *rh = &b->lru_head; 355 unsigned int entries = 0; 356 357 list_for_each_entry(rp, rh, c_lru) { 358 ++entries; 359 if (nfsd_cache_match(rqstp, csum, rp)) { 360 ret = rp; 361 break; 362 } 363 } 364 365 /* tally hash chain length stats */ 366 if (entries > longest_chain) { 367 longest_chain = entries; 368 longest_chain_cachesize = atomic_read(&num_drc_entries); 369 } else if (entries == longest_chain) { 370 /* prefer to keep the smallest cachesize possible here */ 371 longest_chain_cachesize = min_t(unsigned int, 372 longest_chain_cachesize, 373 atomic_read(&num_drc_entries)); 374 } 375 376 return ret; 377 } 378 379 /* 380 * Try to find an entry matching the current call in the cache. When none 381 * is found, we try to grab the oldest expired entry off the LRU list. If 382 * a suitable one isn't there, then drop the cache_lock and allocate a 383 * new one, then search again in case one got inserted while this thread 384 * didn't hold the lock. 385 */ 386 int 387 nfsd_cache_lookup(struct svc_rqst *rqstp) 388 { 389 struct svc_cacherep *rp, *found; 390 __be32 xid = rqstp->rq_xid; 391 u32 proto = rqstp->rq_prot, 392 vers = rqstp->rq_vers, 393 proc = rqstp->rq_proc; 394 __wsum csum; 395 u32 hash = nfsd_cache_hash(xid); 396 struct nfsd_drc_bucket *b = &drc_hashtbl[hash]; 397 unsigned long age; 398 int type = rqstp->rq_cachetype; 399 int rtn = RC_DOIT; 400 401 rqstp->rq_cacherep = NULL; 402 if (type == RC_NOCACHE) { 403 nfsdstats.rcnocache++; 404 return rtn; 405 } 406 407 csum = nfsd_cache_csum(rqstp); 408 409 /* 410 * Since the common case is a cache miss followed by an insert, 411 * preallocate an entry. 412 */ 413 rp = nfsd_reply_cache_alloc(); 414 spin_lock(&b->cache_lock); 415 if (likely(rp)) { 416 atomic_inc(&num_drc_entries); 417 drc_mem_usage += sizeof(*rp); 418 } 419 420 /* go ahead and prune the cache */ 421 prune_bucket(b); 422 423 found = nfsd_cache_search(b, rqstp, csum); 424 if (found) { 425 if (likely(rp)) 426 nfsd_reply_cache_free_locked(rp); 427 rp = found; 428 goto found_entry; 429 } 430 431 if (!rp) { 432 dprintk("nfsd: unable to allocate DRC entry!\n"); 433 goto out; 434 } 435 436 nfsdstats.rcmisses++; 437 rqstp->rq_cacherep = rp; 438 rp->c_state = RC_INPROG; 439 rp->c_xid = xid; 440 rp->c_proc = proc; 441 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); 442 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); 443 rp->c_prot = proto; 444 rp->c_vers = vers; 445 rp->c_len = rqstp->rq_arg.len; 446 rp->c_csum = csum; 447 448 lru_put_end(b, rp); 449 450 /* release any buffer */ 451 if (rp->c_type == RC_REPLBUFF) { 452 drc_mem_usage -= rp->c_replvec.iov_len; 453 kfree(rp->c_replvec.iov_base); 454 rp->c_replvec.iov_base = NULL; 455 } 456 rp->c_type = RC_NOCACHE; 457 out: 458 spin_unlock(&b->cache_lock); 459 return rtn; 460 461 found_entry: 462 nfsdstats.rchits++; 463 /* We found a matching entry which is either in progress or done. */ 464 age = jiffies - rp->c_timestamp; 465 lru_put_end(b, rp); 466 467 rtn = RC_DROPIT; 468 /* Request being processed or excessive rexmits */ 469 if (rp->c_state == RC_INPROG || age < RC_DELAY) 470 goto out; 471 472 /* From the hall of fame of impractical attacks: 473 * Is this a user who tries to snoop on the cache? */ 474 rtn = RC_DOIT; 475 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) 476 goto out; 477 478 /* Compose RPC reply header */ 479 switch (rp->c_type) { 480 case RC_NOCACHE: 481 break; 482 case RC_REPLSTAT: 483 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); 484 rtn = RC_REPLY; 485 break; 486 case RC_REPLBUFF: 487 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) 488 goto out; /* should not happen */ 489 rtn = RC_REPLY; 490 break; 491 default: 492 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); 493 nfsd_reply_cache_free_locked(rp); 494 } 495 496 goto out; 497 } 498 499 /* 500 * Update a cache entry. This is called from nfsd_dispatch when 501 * the procedure has been executed and the complete reply is in 502 * rqstp->rq_res. 503 * 504 * We're copying around data here rather than swapping buffers because 505 * the toplevel loop requires max-sized buffers, which would be a waste 506 * of memory for a cache with a max reply size of 100 bytes (diropokres). 507 * 508 * If we should start to use different types of cache entries tailored 509 * specifically for attrstat and fh's, we may save even more space. 510 * 511 * Also note that a cachetype of RC_NOCACHE can legally be passed when 512 * nfsd failed to encode a reply that otherwise would have been cached. 513 * In this case, nfsd_cache_update is called with statp == NULL. 514 */ 515 void 516 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) 517 { 518 struct svc_cacherep *rp = rqstp->rq_cacherep; 519 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 520 u32 hash; 521 struct nfsd_drc_bucket *b; 522 int len; 523 size_t bufsize = 0; 524 525 if (!rp) 526 return; 527 528 hash = nfsd_cache_hash(rp->c_xid); 529 b = &drc_hashtbl[hash]; 530 531 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 532 len >>= 2; 533 534 /* Don't cache excessive amounts of data and XDR failures */ 535 if (!statp || len > (256 >> 2)) { 536 nfsd_reply_cache_free(b, rp); 537 return; 538 } 539 540 switch (cachetype) { 541 case RC_REPLSTAT: 542 if (len != 1) 543 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); 544 rp->c_replstat = *statp; 545 break; 546 case RC_REPLBUFF: 547 cachv = &rp->c_replvec; 548 bufsize = len << 2; 549 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); 550 if (!cachv->iov_base) { 551 nfsd_reply_cache_free(b, rp); 552 return; 553 } 554 cachv->iov_len = bufsize; 555 memcpy(cachv->iov_base, statp, bufsize); 556 break; 557 case RC_NOCACHE: 558 nfsd_reply_cache_free(b, rp); 559 return; 560 } 561 spin_lock(&b->cache_lock); 562 drc_mem_usage += bufsize; 563 lru_put_end(b, rp); 564 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); 565 rp->c_type = cachetype; 566 rp->c_state = RC_DONE; 567 spin_unlock(&b->cache_lock); 568 return; 569 } 570 571 /* 572 * Copy cached reply to current reply buffer. Should always fit. 573 * FIXME as reply is in a page, we should just attach the page, and 574 * keep a refcount.... 575 */ 576 static int 577 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) 578 { 579 struct kvec *vec = &rqstp->rq_res.head[0]; 580 581 if (vec->iov_len + data->iov_len > PAGE_SIZE) { 582 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", 583 data->iov_len); 584 return 0; 585 } 586 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); 587 vec->iov_len += data->iov_len; 588 return 1; 589 } 590 591 /* 592 * Note that fields may be added, removed or reordered in the future. Programs 593 * scraping this file for info should test the labels to ensure they're 594 * getting the correct field. 595 */ 596 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 597 { 598 seq_printf(m, "max entries: %u\n", max_drc_entries); 599 seq_printf(m, "num entries: %u\n", 600 atomic_read(&num_drc_entries)); 601 seq_printf(m, "hash buckets: %u\n", 1 << maskbits); 602 seq_printf(m, "mem usage: %u\n", drc_mem_usage); 603 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); 604 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); 605 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); 606 seq_printf(m, "payload misses: %u\n", payload_misses); 607 seq_printf(m, "longest chain len: %u\n", longest_chain); 608 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); 609 return 0; 610 } 611 612 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) 613 { 614 return single_open(file, nfsd_reply_cache_stats_show, NULL); 615 } 616