1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Request reply cache. This is currently a global cache, but this may 4 * change in the future and be a per-client cache. 5 * 6 * This code is heavily inspired by the 44BSD implementation, although 7 * it does things a bit differently. 8 * 9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 10 */ 11 12 #include <linux/sunrpc/svc_xprt.h> 13 #include <linux/slab.h> 14 #include <linux/vmalloc.h> 15 #include <linux/sunrpc/addr.h> 16 #include <linux/highmem.h> 17 #include <linux/log2.h> 18 #include <linux/hash.h> 19 #include <net/checksum.h> 20 21 #include "nfsd.h" 22 #include "cache.h" 23 #include "trace.h" 24 25 /* 26 * We use this value to determine the number of hash buckets from the max 27 * cache size, the idea being that when the cache is at its maximum number 28 * of entries, then this should be the average number of entries per bucket. 29 */ 30 #define TARGET_BUCKET_SIZE 64 31 32 struct nfsd_drc_bucket { 33 struct rb_root rb_head; 34 struct list_head lru_head; 35 spinlock_t cache_lock; 36 }; 37 38 static struct kmem_cache *drc_slab; 39 40 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); 41 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, 42 struct shrink_control *sc); 43 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, 44 struct shrink_control *sc); 45 46 /* 47 * Put a cap on the size of the DRC based on the amount of available 48 * low memory in the machine. 49 * 50 * 64MB: 8192 51 * 128MB: 11585 52 * 256MB: 16384 53 * 512MB: 23170 54 * 1GB: 32768 55 * 2GB: 46340 56 * 4GB: 65536 57 * 8GB: 92681 58 * 16GB: 131072 59 * 60 * ...with a hard cap of 256k entries. In the worst case, each entry will be 61 * ~1k, so the above numbers should give a rough max of the amount of memory 62 * used in k. 63 * 64 * XXX: these limits are per-container, so memory used will increase 65 * linearly with number of containers. Maybe that's OK. 66 */ 67 static unsigned int 68 nfsd_cache_size_limit(void) 69 { 70 unsigned int limit; 71 unsigned long low_pages = totalram_pages() - totalhigh_pages(); 72 73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); 74 return min_t(unsigned int, limit, 256*1024); 75 } 76 77 /* 78 * Compute the number of hash buckets we need. Divide the max cachesize by 79 * the "target" max bucket size, and round up to next power of two. 80 */ 81 static unsigned int 82 nfsd_hashsize(unsigned int limit) 83 { 84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); 85 } 86 87 static struct svc_cacherep * 88 nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, 89 struct nfsd_net *nn) 90 { 91 struct svc_cacherep *rp; 92 93 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); 94 if (rp) { 95 rp->c_state = RC_UNUSED; 96 rp->c_type = RC_NOCACHE; 97 RB_CLEAR_NODE(&rp->c_node); 98 INIT_LIST_HEAD(&rp->c_lru); 99 100 memset(&rp->c_key, 0, sizeof(rp->c_key)); 101 rp->c_key.k_xid = rqstp->rq_xid; 102 rp->c_key.k_proc = rqstp->rq_proc; 103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); 104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); 105 rp->c_key.k_prot = rqstp->rq_prot; 106 rp->c_key.k_vers = rqstp->rq_vers; 107 rp->c_key.k_len = rqstp->rq_arg.len; 108 rp->c_key.k_csum = csum; 109 } 110 return rp; 111 } 112 113 static void 114 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, 115 struct nfsd_net *nn) 116 { 117 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { 118 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); 119 kfree(rp->c_replvec.iov_base); 120 } 121 if (rp->c_state != RC_UNUSED) { 122 rb_erase(&rp->c_node, &b->rb_head); 123 list_del(&rp->c_lru); 124 atomic_dec(&nn->num_drc_entries); 125 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp)); 126 } 127 kmem_cache_free(drc_slab, rp); 128 } 129 130 static void 131 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, 132 struct nfsd_net *nn) 133 { 134 spin_lock(&b->cache_lock); 135 nfsd_reply_cache_free_locked(b, rp, nn); 136 spin_unlock(&b->cache_lock); 137 } 138 139 int nfsd_drc_slab_create(void) 140 { 141 drc_slab = kmem_cache_create("nfsd_drc", 142 sizeof(struct svc_cacherep), 0, 0, NULL); 143 return drc_slab ? 0: -ENOMEM; 144 } 145 146 void nfsd_drc_slab_free(void) 147 { 148 kmem_cache_destroy(drc_slab); 149 } 150 151 /** 152 * nfsd_net_reply_cache_init - per net namespace reply cache set-up 153 * @nn: nfsd_net being initialized 154 * 155 * Returns zero on succes; otherwise a negative errno is returned. 156 */ 157 int nfsd_net_reply_cache_init(struct nfsd_net *nn) 158 { 159 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM); 160 } 161 162 /** 163 * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down 164 * @nn: nfsd_net being freed 165 * 166 */ 167 void nfsd_net_reply_cache_destroy(struct nfsd_net *nn) 168 { 169 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM); 170 } 171 172 int nfsd_reply_cache_init(struct nfsd_net *nn) 173 { 174 unsigned int hashsize; 175 unsigned int i; 176 int status = 0; 177 178 nn->max_drc_entries = nfsd_cache_size_limit(); 179 atomic_set(&nn->num_drc_entries, 0); 180 hashsize = nfsd_hashsize(nn->max_drc_entries); 181 nn->maskbits = ilog2(hashsize); 182 183 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan; 184 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count; 185 nn->nfsd_reply_cache_shrinker.seeks = 1; 186 status = register_shrinker(&nn->nfsd_reply_cache_shrinker, 187 "nfsd-reply:%s", nn->nfsd_name); 188 if (status) 189 return status; 190 191 nn->drc_hashtbl = kvzalloc(array_size(hashsize, 192 sizeof(*nn->drc_hashtbl)), GFP_KERNEL); 193 if (!nn->drc_hashtbl) 194 goto out_shrinker; 195 196 for (i = 0; i < hashsize; i++) { 197 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); 198 spin_lock_init(&nn->drc_hashtbl[i].cache_lock); 199 } 200 nn->drc_hashsize = hashsize; 201 202 return 0; 203 out_shrinker: 204 unregister_shrinker(&nn->nfsd_reply_cache_shrinker); 205 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); 206 return -ENOMEM; 207 } 208 209 void nfsd_reply_cache_shutdown(struct nfsd_net *nn) 210 { 211 struct svc_cacherep *rp; 212 unsigned int i; 213 214 unregister_shrinker(&nn->nfsd_reply_cache_shrinker); 215 216 for (i = 0; i < nn->drc_hashsize; i++) { 217 struct list_head *head = &nn->drc_hashtbl[i].lru_head; 218 while (!list_empty(head)) { 219 rp = list_first_entry(head, struct svc_cacherep, c_lru); 220 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], 221 rp, nn); 222 } 223 } 224 225 kvfree(nn->drc_hashtbl); 226 nn->drc_hashtbl = NULL; 227 nn->drc_hashsize = 0; 228 229 } 230 231 /* 232 * Move cache entry to end of LRU list, and queue the cleaner to run if it's 233 * not already scheduled. 234 */ 235 static void 236 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) 237 { 238 rp->c_timestamp = jiffies; 239 list_move_tail(&rp->c_lru, &b->lru_head); 240 } 241 242 static noinline struct nfsd_drc_bucket * 243 nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn) 244 { 245 unsigned int hash = hash_32((__force u32)xid, nn->maskbits); 246 247 return &nn->drc_hashtbl[hash]; 248 } 249 250 static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn, 251 unsigned int max) 252 { 253 struct svc_cacherep *rp, *tmp; 254 long freed = 0; 255 256 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { 257 /* 258 * Don't free entries attached to calls that are still 259 * in-progress, but do keep scanning the list. 260 */ 261 if (rp->c_state == RC_INPROG) 262 continue; 263 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && 264 time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) 265 break; 266 nfsd_reply_cache_free_locked(b, rp, nn); 267 if (max && freed++ > max) 268 break; 269 } 270 return freed; 271 } 272 273 static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn) 274 { 275 return prune_bucket(b, nn, 3); 276 } 277 278 /* 279 * Walk the LRU list and prune off entries that are older than RC_EXPIRE. 280 * Also prune the oldest ones when the total exceeds the max number of entries. 281 */ 282 static long 283 prune_cache_entries(struct nfsd_net *nn) 284 { 285 unsigned int i; 286 long freed = 0; 287 288 for (i = 0; i < nn->drc_hashsize; i++) { 289 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; 290 291 if (list_empty(&b->lru_head)) 292 continue; 293 spin_lock(&b->cache_lock); 294 freed += prune_bucket(b, nn, 0); 295 spin_unlock(&b->cache_lock); 296 } 297 return freed; 298 } 299 300 static unsigned long 301 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) 302 { 303 struct nfsd_net *nn = container_of(shrink, 304 struct nfsd_net, nfsd_reply_cache_shrinker); 305 306 return atomic_read(&nn->num_drc_entries); 307 } 308 309 static unsigned long 310 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) 311 { 312 struct nfsd_net *nn = container_of(shrink, 313 struct nfsd_net, nfsd_reply_cache_shrinker); 314 315 return prune_cache_entries(nn); 316 } 317 /* 318 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes 319 */ 320 static __wsum 321 nfsd_cache_csum(struct svc_rqst *rqstp) 322 { 323 int idx; 324 unsigned int base; 325 __wsum csum; 326 struct xdr_buf *buf = &rqstp->rq_arg; 327 const unsigned char *p = buf->head[0].iov_base; 328 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, 329 RC_CSUMLEN); 330 size_t len = min(buf->head[0].iov_len, csum_len); 331 332 /* rq_arg.head first */ 333 csum = csum_partial(p, len, 0); 334 csum_len -= len; 335 336 /* Continue into page array */ 337 idx = buf->page_base / PAGE_SIZE; 338 base = buf->page_base & ~PAGE_MASK; 339 while (csum_len) { 340 p = page_address(buf->pages[idx]) + base; 341 len = min_t(size_t, PAGE_SIZE - base, csum_len); 342 csum = csum_partial(p, len, csum); 343 csum_len -= len; 344 base = 0; 345 ++idx; 346 } 347 return csum; 348 } 349 350 static int 351 nfsd_cache_key_cmp(const struct svc_cacherep *key, 352 const struct svc_cacherep *rp, struct nfsd_net *nn) 353 { 354 if (key->c_key.k_xid == rp->c_key.k_xid && 355 key->c_key.k_csum != rp->c_key.k_csum) { 356 nfsd_stats_payload_misses_inc(nn); 357 trace_nfsd_drc_mismatch(nn, key, rp); 358 } 359 360 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); 361 } 362 363 /* 364 * Search the request hash for an entry that matches the given rqstp. 365 * Must be called with cache_lock held. Returns the found entry or 366 * inserts an empty key on failure. 367 */ 368 static struct svc_cacherep * 369 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key, 370 struct nfsd_net *nn) 371 { 372 struct svc_cacherep *rp, *ret = key; 373 struct rb_node **p = &b->rb_head.rb_node, 374 *parent = NULL; 375 unsigned int entries = 0; 376 int cmp; 377 378 while (*p != NULL) { 379 ++entries; 380 parent = *p; 381 rp = rb_entry(parent, struct svc_cacherep, c_node); 382 383 cmp = nfsd_cache_key_cmp(key, rp, nn); 384 if (cmp < 0) 385 p = &parent->rb_left; 386 else if (cmp > 0) 387 p = &parent->rb_right; 388 else { 389 ret = rp; 390 goto out; 391 } 392 } 393 rb_link_node(&key->c_node, parent, p); 394 rb_insert_color(&key->c_node, &b->rb_head); 395 out: 396 /* tally hash chain length stats */ 397 if (entries > nn->longest_chain) { 398 nn->longest_chain = entries; 399 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); 400 } else if (entries == nn->longest_chain) { 401 /* prefer to keep the smallest cachesize possible here */ 402 nn->longest_chain_cachesize = min_t(unsigned int, 403 nn->longest_chain_cachesize, 404 atomic_read(&nn->num_drc_entries)); 405 } 406 407 lru_put_end(b, ret); 408 return ret; 409 } 410 411 /** 412 * nfsd_cache_lookup - Find an entry in the duplicate reply cache 413 * @rqstp: Incoming Call to find 414 * 415 * Try to find an entry matching the current call in the cache. When none 416 * is found, we try to grab the oldest expired entry off the LRU list. If 417 * a suitable one isn't there, then drop the cache_lock and allocate a 418 * new one, then search again in case one got inserted while this thread 419 * didn't hold the lock. 420 * 421 * Return values: 422 * %RC_DOIT: Process the request normally 423 * %RC_REPLY: Reply from cache 424 * %RC_DROPIT: Do not process the request further 425 */ 426 int nfsd_cache_lookup(struct svc_rqst *rqstp) 427 { 428 struct nfsd_net *nn; 429 struct svc_cacherep *rp, *found; 430 __wsum csum; 431 struct nfsd_drc_bucket *b; 432 int type = rqstp->rq_cachetype; 433 int rtn = RC_DOIT; 434 435 rqstp->rq_cacherep = NULL; 436 if (type == RC_NOCACHE) { 437 nfsd_stats_rc_nocache_inc(); 438 goto out; 439 } 440 441 csum = nfsd_cache_csum(rqstp); 442 443 /* 444 * Since the common case is a cache miss followed by an insert, 445 * preallocate an entry. 446 */ 447 nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 448 rp = nfsd_reply_cache_alloc(rqstp, csum, nn); 449 if (!rp) 450 goto out; 451 452 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn); 453 spin_lock(&b->cache_lock); 454 found = nfsd_cache_insert(b, rp, nn); 455 if (found != rp) 456 goto found_entry; 457 458 nfsd_stats_rc_misses_inc(); 459 rqstp->rq_cacherep = rp; 460 rp->c_state = RC_INPROG; 461 462 atomic_inc(&nn->num_drc_entries); 463 nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp)); 464 465 nfsd_prune_bucket(b, nn); 466 467 out_unlock: 468 spin_unlock(&b->cache_lock); 469 out: 470 return rtn; 471 472 found_entry: 473 /* We found a matching entry which is either in progress or done. */ 474 nfsd_reply_cache_free_locked(NULL, rp, nn); 475 nfsd_stats_rc_hits_inc(); 476 rtn = RC_DROPIT; 477 rp = found; 478 479 /* Request being processed */ 480 if (rp->c_state == RC_INPROG) 481 goto out_trace; 482 483 /* From the hall of fame of impractical attacks: 484 * Is this a user who tries to snoop on the cache? */ 485 rtn = RC_DOIT; 486 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) 487 goto out_trace; 488 489 /* Compose RPC reply header */ 490 switch (rp->c_type) { 491 case RC_NOCACHE: 492 break; 493 case RC_REPLSTAT: 494 xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat); 495 rtn = RC_REPLY; 496 break; 497 case RC_REPLBUFF: 498 if (!nfsd_cache_append(rqstp, &rp->c_replvec)) 499 goto out_unlock; /* should not happen */ 500 rtn = RC_REPLY; 501 break; 502 default: 503 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); 504 } 505 506 out_trace: 507 trace_nfsd_drc_found(nn, rqstp, rtn); 508 goto out_unlock; 509 } 510 511 /** 512 * nfsd_cache_update - Update an entry in the duplicate reply cache. 513 * @rqstp: svc_rqst with a finished Reply 514 * @cachetype: which cache to update 515 * @statp: pointer to Reply's NFS status code, or NULL 516 * 517 * This is called from nfsd_dispatch when the procedure has been 518 * executed and the complete reply is in rqstp->rq_res. 519 * 520 * We're copying around data here rather than swapping buffers because 521 * the toplevel loop requires max-sized buffers, which would be a waste 522 * of memory for a cache with a max reply size of 100 bytes (diropokres). 523 * 524 * If we should start to use different types of cache entries tailored 525 * specifically for attrstat and fh's, we may save even more space. 526 * 527 * Also note that a cachetype of RC_NOCACHE can legally be passed when 528 * nfsd failed to encode a reply that otherwise would have been cached. 529 * In this case, nfsd_cache_update is called with statp == NULL. 530 */ 531 void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) 532 { 533 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 534 struct svc_cacherep *rp = rqstp->rq_cacherep; 535 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; 536 struct nfsd_drc_bucket *b; 537 int len; 538 size_t bufsize = 0; 539 540 if (!rp) 541 return; 542 543 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn); 544 545 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); 546 len >>= 2; 547 548 /* Don't cache excessive amounts of data and XDR failures */ 549 if (!statp || len > (256 >> 2)) { 550 nfsd_reply_cache_free(b, rp, nn); 551 return; 552 } 553 554 switch (cachetype) { 555 case RC_REPLSTAT: 556 if (len != 1) 557 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); 558 rp->c_replstat = *statp; 559 break; 560 case RC_REPLBUFF: 561 cachv = &rp->c_replvec; 562 bufsize = len << 2; 563 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); 564 if (!cachv->iov_base) { 565 nfsd_reply_cache_free(b, rp, nn); 566 return; 567 } 568 cachv->iov_len = bufsize; 569 memcpy(cachv->iov_base, statp, bufsize); 570 break; 571 case RC_NOCACHE: 572 nfsd_reply_cache_free(b, rp, nn); 573 return; 574 } 575 spin_lock(&b->cache_lock); 576 nfsd_stats_drc_mem_usage_add(nn, bufsize); 577 lru_put_end(b, rp); 578 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); 579 rp->c_type = cachetype; 580 rp->c_state = RC_DONE; 581 spin_unlock(&b->cache_lock); 582 return; 583 } 584 585 /* 586 * Copy cached reply to current reply buffer. Should always fit. 587 * FIXME as reply is in a page, we should just attach the page, and 588 * keep a refcount.... 589 */ 590 static int 591 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) 592 { 593 struct kvec *vec = &rqstp->rq_res.head[0]; 594 595 if (vec->iov_len + data->iov_len > PAGE_SIZE) { 596 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", 597 data->iov_len); 598 return 0; 599 } 600 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); 601 vec->iov_len += data->iov_len; 602 return 1; 603 } 604 605 /* 606 * Note that fields may be added, removed or reordered in the future. Programs 607 * scraping this file for info should test the labels to ensure they're 608 * getting the correct field. 609 */ 610 int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 611 { 612 struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info, 613 nfsd_net_id); 614 615 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); 616 seq_printf(m, "num entries: %u\n", 617 atomic_read(&nn->num_drc_entries)); 618 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); 619 seq_printf(m, "mem usage: %lld\n", 620 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE])); 621 seq_printf(m, "cache hits: %lld\n", 622 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS])); 623 seq_printf(m, "cache misses: %lld\n", 624 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES])); 625 seq_printf(m, "not cached: %lld\n", 626 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE])); 627 seq_printf(m, "payload misses: %lld\n", 628 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES])); 629 seq_printf(m, "longest chain len: %u\n", nn->longest_chain); 630 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); 631 return 0; 632 } 633