1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Daemon interface 3 * 4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/init.h> 10 #include <linux/sched.h> 11 #include <linux/completion.h> 12 #include <linux/slab.h> 13 #include <linux/fs.h> 14 #include <linux/file.h> 15 #include <linux/namei.h> 16 #include <linux/poll.h> 17 #include <linux/mount.h> 18 #include <linux/statfs.h> 19 #include <linux/ctype.h> 20 #include <linux/string.h> 21 #include <linux/fs_struct.h> 22 #include "internal.h" 23 24 static int cachefiles_daemon_open(struct inode *, struct file *); 25 static int cachefiles_daemon_release(struct inode *, struct file *); 26 static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t, 27 loff_t *); 28 static ssize_t cachefiles_daemon_write(struct file *, const char __user *, 29 size_t, loff_t *); 30 static __poll_t cachefiles_daemon_poll(struct file *, 31 struct poll_table_struct *); 32 static int cachefiles_daemon_frun(struct cachefiles_cache *, char *); 33 static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *); 34 static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *); 35 static int cachefiles_daemon_brun(struct cachefiles_cache *, char *); 36 static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *); 37 static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *); 38 static int cachefiles_daemon_cull(struct cachefiles_cache *, char *); 39 static int cachefiles_daemon_debug(struct cachefiles_cache *, char *); 40 static int cachefiles_daemon_dir(struct cachefiles_cache *, char *); 41 static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *); 42 static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *); 43 static int cachefiles_daemon_tag(struct cachefiles_cache *, char *); 44 static int cachefiles_daemon_bind(struct cachefiles_cache *, char *); 45 static void cachefiles_daemon_unbind(struct cachefiles_cache *); 46 47 static unsigned long cachefiles_open; 48 49 const struct file_operations cachefiles_daemon_fops = { 50 .owner = THIS_MODULE, 51 .open = cachefiles_daemon_open, 52 .release = cachefiles_daemon_release, 53 .read = cachefiles_daemon_read, 54 .write = cachefiles_daemon_write, 55 .poll = cachefiles_daemon_poll, 56 .llseek = noop_llseek, 57 }; 58 59 struct cachefiles_daemon_cmd { 60 char name[8]; 61 int (*handler)(struct cachefiles_cache *cache, char *args); 62 }; 63 64 static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { 65 { "bind", cachefiles_daemon_bind }, 66 { "brun", cachefiles_daemon_brun }, 67 { "bcull", cachefiles_daemon_bcull }, 68 { "bstop", cachefiles_daemon_bstop }, 69 { "cull", cachefiles_daemon_cull }, 70 { "debug", cachefiles_daemon_debug }, 71 { "dir", cachefiles_daemon_dir }, 72 { "frun", cachefiles_daemon_frun }, 73 { "fcull", cachefiles_daemon_fcull }, 74 { "fstop", cachefiles_daemon_fstop }, 75 { "inuse", cachefiles_daemon_inuse }, 76 { "secctx", cachefiles_daemon_secctx }, 77 { "tag", cachefiles_daemon_tag }, 78 #ifdef CONFIG_CACHEFILES_ONDEMAND 79 { "copen", cachefiles_ondemand_copen }, 80 { "restore", cachefiles_ondemand_restore }, 81 #endif 82 { "", NULL } 83 }; 84 85 86 /* 87 * Prepare a cache for caching. 88 */ 89 static int cachefiles_daemon_open(struct inode *inode, struct file *file) 90 { 91 struct cachefiles_cache *cache; 92 93 _enter(""); 94 95 /* only the superuser may do this */ 96 if (!capable(CAP_SYS_ADMIN)) 97 return -EPERM; 98 99 /* the cachefiles device may only be open once at a time */ 100 if (xchg(&cachefiles_open, 1) == 1) 101 return -EBUSY; 102 103 /* allocate a cache record */ 104 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL); 105 if (!cache) { 106 cachefiles_open = 0; 107 return -ENOMEM; 108 } 109 110 mutex_init(&cache->daemon_mutex); 111 init_waitqueue_head(&cache->daemon_pollwq); 112 INIT_LIST_HEAD(&cache->volumes); 113 INIT_LIST_HEAD(&cache->object_list); 114 spin_lock_init(&cache->object_list_lock); 115 refcount_set(&cache->unbind_pincount, 1); 116 xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC); 117 xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1); 118 119 /* set default caching limits 120 * - limit at 1% free space and/or free files 121 * - cull below 5% free space and/or free files 122 * - cease culling above 7% free space and/or free files 123 */ 124 cache->frun_percent = 7; 125 cache->fcull_percent = 5; 126 cache->fstop_percent = 1; 127 cache->brun_percent = 7; 128 cache->bcull_percent = 5; 129 cache->bstop_percent = 1; 130 131 file->private_data = cache; 132 cache->cachefilesd = file; 133 return 0; 134 } 135 136 void cachefiles_flush_reqs(struct cachefiles_cache *cache) 137 { 138 struct xarray *xa = &cache->reqs; 139 struct cachefiles_req *req; 140 unsigned long index; 141 142 /* 143 * Make sure the following two operations won't be reordered. 144 * 1) set CACHEFILES_DEAD bit 145 * 2) flush requests in the xarray 146 * Otherwise the request may be enqueued after xarray has been 147 * flushed, leaving the orphan request never being completed. 148 * 149 * CPU 1 CPU 2 150 * ===== ===== 151 * flush requests in the xarray 152 * test CACHEFILES_DEAD bit 153 * enqueue the request 154 * set CACHEFILES_DEAD bit 155 */ 156 smp_mb(); 157 158 xa_lock(xa); 159 xa_for_each(xa, index, req) { 160 req->error = -EIO; 161 complete(&req->done); 162 __xa_erase(xa, index); 163 } 164 xa_unlock(xa); 165 166 xa_destroy(&cache->reqs); 167 xa_destroy(&cache->ondemand_ids); 168 } 169 170 void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache) 171 { 172 if (refcount_dec_and_test(&cache->unbind_pincount)) { 173 cachefiles_daemon_unbind(cache); 174 cachefiles_open = 0; 175 kfree(cache); 176 } 177 } 178 179 void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache) 180 { 181 refcount_inc(&cache->unbind_pincount); 182 } 183 184 /* 185 * Release a cache. 186 */ 187 static int cachefiles_daemon_release(struct inode *inode, struct file *file) 188 { 189 struct cachefiles_cache *cache = file->private_data; 190 191 _enter(""); 192 193 ASSERT(cache); 194 195 set_bit(CACHEFILES_DEAD, &cache->flags); 196 197 if (cachefiles_in_ondemand_mode(cache)) 198 cachefiles_flush_reqs(cache); 199 200 /* clean up the control file interface */ 201 cache->cachefilesd = NULL; 202 file->private_data = NULL; 203 204 cachefiles_put_unbind_pincount(cache); 205 206 _leave(""); 207 return 0; 208 } 209 210 static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache, 211 char __user *_buffer, size_t buflen) 212 { 213 unsigned long long b_released; 214 unsigned f_released; 215 char buffer[256]; 216 int n; 217 218 /* check how much space the cache has */ 219 cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check); 220 221 /* summarise */ 222 f_released = atomic_xchg(&cache->f_released, 0); 223 b_released = atomic_long_xchg(&cache->b_released, 0); 224 clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags); 225 226 n = snprintf(buffer, sizeof(buffer), 227 "cull=%c" 228 " frun=%llx" 229 " fcull=%llx" 230 " fstop=%llx" 231 " brun=%llx" 232 " bcull=%llx" 233 " bstop=%llx" 234 " freleased=%x" 235 " breleased=%llx", 236 test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0', 237 (unsigned long long) cache->frun, 238 (unsigned long long) cache->fcull, 239 (unsigned long long) cache->fstop, 240 (unsigned long long) cache->brun, 241 (unsigned long long) cache->bcull, 242 (unsigned long long) cache->bstop, 243 f_released, 244 b_released); 245 246 if (n > buflen) 247 return -EMSGSIZE; 248 249 if (copy_to_user(_buffer, buffer, n) != 0) 250 return -EFAULT; 251 252 return n; 253 } 254 255 /* 256 * Read the cache state. 257 */ 258 static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, 259 size_t buflen, loff_t *pos) 260 { 261 struct cachefiles_cache *cache = file->private_data; 262 263 //_enter(",,%zu,", buflen); 264 265 if (!test_bit(CACHEFILES_READY, &cache->flags)) 266 return 0; 267 268 if (cachefiles_in_ondemand_mode(cache)) 269 return cachefiles_ondemand_daemon_read(cache, _buffer, buflen); 270 else 271 return cachefiles_do_daemon_read(cache, _buffer, buflen); 272 } 273 274 /* 275 * Take a command from cachefilesd, parse it and act on it. 276 */ 277 static ssize_t cachefiles_daemon_write(struct file *file, 278 const char __user *_data, 279 size_t datalen, 280 loff_t *pos) 281 { 282 const struct cachefiles_daemon_cmd *cmd; 283 struct cachefiles_cache *cache = file->private_data; 284 ssize_t ret; 285 char *data, *args, *cp; 286 287 //_enter(",,%zu,", datalen); 288 289 ASSERT(cache); 290 291 if (test_bit(CACHEFILES_DEAD, &cache->flags)) 292 return -EIO; 293 294 if (datalen > PAGE_SIZE - 1) 295 return -EOPNOTSUPP; 296 297 /* drag the command string into the kernel so we can parse it */ 298 data = memdup_user_nul(_data, datalen); 299 if (IS_ERR(data)) 300 return PTR_ERR(data); 301 302 ret = -EINVAL; 303 if (memchr(data, '\0', datalen)) 304 goto error; 305 306 /* strip any newline */ 307 cp = memchr(data, '\n', datalen); 308 if (cp) { 309 if (cp == data) 310 goto error; 311 312 *cp = '\0'; 313 } 314 315 /* parse the command */ 316 ret = -EOPNOTSUPP; 317 318 for (args = data; *args; args++) 319 if (isspace(*args)) 320 break; 321 if (*args) { 322 if (args == data) 323 goto error; 324 *args = '\0'; 325 args = skip_spaces(++args); 326 } 327 328 /* run the appropriate command handler */ 329 for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++) 330 if (strcmp(cmd->name, data) == 0) 331 goto found_command; 332 333 error: 334 kfree(data); 335 //_leave(" = %zd", ret); 336 return ret; 337 338 found_command: 339 mutex_lock(&cache->daemon_mutex); 340 341 ret = -EIO; 342 if (!test_bit(CACHEFILES_DEAD, &cache->flags)) 343 ret = cmd->handler(cache, args); 344 345 mutex_unlock(&cache->daemon_mutex); 346 347 if (ret == 0) 348 ret = datalen; 349 goto error; 350 } 351 352 /* 353 * Poll for culling state 354 * - use EPOLLOUT to indicate culling state 355 */ 356 static __poll_t cachefiles_daemon_poll(struct file *file, 357 struct poll_table_struct *poll) 358 { 359 struct cachefiles_cache *cache = file->private_data; 360 __poll_t mask; 361 362 poll_wait(file, &cache->daemon_pollwq, poll); 363 mask = 0; 364 365 if (cachefiles_in_ondemand_mode(cache)) { 366 if (!xa_empty(&cache->reqs)) 367 mask |= EPOLLIN; 368 } else { 369 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) 370 mask |= EPOLLIN; 371 } 372 373 if (test_bit(CACHEFILES_CULLING, &cache->flags)) 374 mask |= EPOLLOUT; 375 376 return mask; 377 } 378 379 /* 380 * Give a range error for cache space constraints 381 * - can be tail-called 382 */ 383 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, 384 char *args) 385 { 386 pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n"); 387 388 return -EINVAL; 389 } 390 391 /* 392 * Set the percentage of files at which to stop culling 393 * - command: "frun <N>%" 394 */ 395 static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args) 396 { 397 unsigned long frun; 398 399 _enter(",%s", args); 400 401 if (!*args) 402 return -EINVAL; 403 404 frun = simple_strtoul(args, &args, 10); 405 if (args[0] != '%' || args[1] != '\0') 406 return -EINVAL; 407 408 if (frun <= cache->fcull_percent || frun >= 100) 409 return cachefiles_daemon_range_error(cache, args); 410 411 cache->frun_percent = frun; 412 return 0; 413 } 414 415 /* 416 * Set the percentage of files at which to start culling 417 * - command: "fcull <N>%" 418 */ 419 static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args) 420 { 421 unsigned long fcull; 422 423 _enter(",%s", args); 424 425 if (!*args) 426 return -EINVAL; 427 428 fcull = simple_strtoul(args, &args, 10); 429 if (args[0] != '%' || args[1] != '\0') 430 return -EINVAL; 431 432 if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent) 433 return cachefiles_daemon_range_error(cache, args); 434 435 cache->fcull_percent = fcull; 436 return 0; 437 } 438 439 /* 440 * Set the percentage of files at which to stop allocating 441 * - command: "fstop <N>%" 442 */ 443 static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) 444 { 445 unsigned long fstop; 446 447 _enter(",%s", args); 448 449 if (!*args) 450 return -EINVAL; 451 452 fstop = simple_strtoul(args, &args, 10); 453 if (args[0] != '%' || args[1] != '\0') 454 return -EINVAL; 455 456 if (fstop >= cache->fcull_percent) 457 return cachefiles_daemon_range_error(cache, args); 458 459 cache->fstop_percent = fstop; 460 return 0; 461 } 462 463 /* 464 * Set the percentage of blocks at which to stop culling 465 * - command: "brun <N>%" 466 */ 467 static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args) 468 { 469 unsigned long brun; 470 471 _enter(",%s", args); 472 473 if (!*args) 474 return -EINVAL; 475 476 brun = simple_strtoul(args, &args, 10); 477 if (args[0] != '%' || args[1] != '\0') 478 return -EINVAL; 479 480 if (brun <= cache->bcull_percent || brun >= 100) 481 return cachefiles_daemon_range_error(cache, args); 482 483 cache->brun_percent = brun; 484 return 0; 485 } 486 487 /* 488 * Set the percentage of blocks at which to start culling 489 * - command: "bcull <N>%" 490 */ 491 static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args) 492 { 493 unsigned long bcull; 494 495 _enter(",%s", args); 496 497 if (!*args) 498 return -EINVAL; 499 500 bcull = simple_strtoul(args, &args, 10); 501 if (args[0] != '%' || args[1] != '\0') 502 return -EINVAL; 503 504 if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent) 505 return cachefiles_daemon_range_error(cache, args); 506 507 cache->bcull_percent = bcull; 508 return 0; 509 } 510 511 /* 512 * Set the percentage of blocks at which to stop allocating 513 * - command: "bstop <N>%" 514 */ 515 static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) 516 { 517 unsigned long bstop; 518 519 _enter(",%s", args); 520 521 if (!*args) 522 return -EINVAL; 523 524 bstop = simple_strtoul(args, &args, 10); 525 if (args[0] != '%' || args[1] != '\0') 526 return -EINVAL; 527 528 if (bstop >= cache->bcull_percent) 529 return cachefiles_daemon_range_error(cache, args); 530 531 cache->bstop_percent = bstop; 532 return 0; 533 } 534 535 /* 536 * Set the cache directory 537 * - command: "dir <name>" 538 */ 539 static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) 540 { 541 char *dir; 542 543 _enter(",%s", args); 544 545 if (!*args) { 546 pr_err("Empty directory specified\n"); 547 return -EINVAL; 548 } 549 550 if (cache->rootdirname) { 551 pr_err("Second cache directory specified\n"); 552 return -EEXIST; 553 } 554 555 dir = kstrdup(args, GFP_KERNEL); 556 if (!dir) 557 return -ENOMEM; 558 559 cache->rootdirname = dir; 560 return 0; 561 } 562 563 /* 564 * Set the cache security context 565 * - command: "secctx <ctx>" 566 */ 567 static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) 568 { 569 char *secctx; 570 571 _enter(",%s", args); 572 573 if (!*args) { 574 pr_err("Empty security context specified\n"); 575 return -EINVAL; 576 } 577 578 if (cache->secctx) { 579 pr_err("Second security context specified\n"); 580 return -EINVAL; 581 } 582 583 secctx = kstrdup(args, GFP_KERNEL); 584 if (!secctx) 585 return -ENOMEM; 586 587 cache->secctx = secctx; 588 return 0; 589 } 590 591 /* 592 * Set the cache tag 593 * - command: "tag <name>" 594 */ 595 static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args) 596 { 597 char *tag; 598 599 _enter(",%s", args); 600 601 if (!*args) { 602 pr_err("Empty tag specified\n"); 603 return -EINVAL; 604 } 605 606 if (cache->tag) 607 return -EEXIST; 608 609 tag = kstrdup(args, GFP_KERNEL); 610 if (!tag) 611 return -ENOMEM; 612 613 cache->tag = tag; 614 return 0; 615 } 616 617 /* 618 * Request a node in the cache be culled from the current working directory 619 * - command: "cull <name>" 620 */ 621 static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) 622 { 623 struct path path; 624 const struct cred *saved_cred; 625 int ret; 626 627 _enter(",%s", args); 628 629 if (strchr(args, '/')) 630 goto inval; 631 632 if (!test_bit(CACHEFILES_READY, &cache->flags)) { 633 pr_err("cull applied to unready cache\n"); 634 return -EIO; 635 } 636 637 if (test_bit(CACHEFILES_DEAD, &cache->flags)) { 638 pr_err("cull applied to dead cache\n"); 639 return -EIO; 640 } 641 642 get_fs_pwd(current->fs, &path); 643 644 if (!d_can_lookup(path.dentry)) 645 goto notdir; 646 647 cachefiles_begin_secure(cache, &saved_cred); 648 ret = cachefiles_cull(cache, path.dentry, args); 649 cachefiles_end_secure(cache, saved_cred); 650 651 path_put(&path); 652 _leave(" = %d", ret); 653 return ret; 654 655 notdir: 656 path_put(&path); 657 pr_err("cull command requires dirfd to be a directory\n"); 658 return -ENOTDIR; 659 660 inval: 661 pr_err("cull command requires dirfd and filename\n"); 662 return -EINVAL; 663 } 664 665 /* 666 * Set debugging mode 667 * - command: "debug <mask>" 668 */ 669 static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args) 670 { 671 unsigned long mask; 672 673 _enter(",%s", args); 674 675 mask = simple_strtoul(args, &args, 0); 676 if (args[0] != '\0') 677 goto inval; 678 679 cachefiles_debug = mask; 680 _leave(" = 0"); 681 return 0; 682 683 inval: 684 pr_err("debug command requires mask\n"); 685 return -EINVAL; 686 } 687 688 /* 689 * Find out whether an object in the current working directory is in use or not 690 * - command: "inuse <name>" 691 */ 692 static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) 693 { 694 struct path path; 695 const struct cred *saved_cred; 696 int ret; 697 698 //_enter(",%s", args); 699 700 if (strchr(args, '/')) 701 goto inval; 702 703 if (!test_bit(CACHEFILES_READY, &cache->flags)) { 704 pr_err("inuse applied to unready cache\n"); 705 return -EIO; 706 } 707 708 if (test_bit(CACHEFILES_DEAD, &cache->flags)) { 709 pr_err("inuse applied to dead cache\n"); 710 return -EIO; 711 } 712 713 get_fs_pwd(current->fs, &path); 714 715 if (!d_can_lookup(path.dentry)) 716 goto notdir; 717 718 cachefiles_begin_secure(cache, &saved_cred); 719 ret = cachefiles_check_in_use(cache, path.dentry, args); 720 cachefiles_end_secure(cache, saved_cred); 721 722 path_put(&path); 723 //_leave(" = %d", ret); 724 return ret; 725 726 notdir: 727 path_put(&path); 728 pr_err("inuse command requires dirfd to be a directory\n"); 729 return -ENOTDIR; 730 731 inval: 732 pr_err("inuse command requires dirfd and filename\n"); 733 return -EINVAL; 734 } 735 736 /* 737 * Bind a directory as a cache 738 */ 739 static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) 740 { 741 _enter("{%u,%u,%u,%u,%u,%u},%s", 742 cache->frun_percent, 743 cache->fcull_percent, 744 cache->fstop_percent, 745 cache->brun_percent, 746 cache->bcull_percent, 747 cache->bstop_percent, 748 args); 749 750 if (cache->fstop_percent >= cache->fcull_percent || 751 cache->fcull_percent >= cache->frun_percent || 752 cache->frun_percent >= 100) 753 return -ERANGE; 754 755 if (cache->bstop_percent >= cache->bcull_percent || 756 cache->bcull_percent >= cache->brun_percent || 757 cache->brun_percent >= 100) 758 return -ERANGE; 759 760 if (!cache->rootdirname) { 761 pr_err("No cache directory specified\n"); 762 return -EINVAL; 763 } 764 765 /* Don't permit already bound caches to be re-bound */ 766 if (test_bit(CACHEFILES_READY, &cache->flags)) { 767 pr_err("Cache already bound\n"); 768 return -EBUSY; 769 } 770 771 if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) { 772 if (!strcmp(args, "ondemand")) { 773 set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags); 774 } else if (*args) { 775 pr_err("Invalid argument to the 'bind' command\n"); 776 return -EINVAL; 777 } 778 } else if (*args) { 779 pr_err("'bind' command doesn't take an argument\n"); 780 return -EINVAL; 781 } 782 783 /* Make sure we have copies of the tag string */ 784 if (!cache->tag) { 785 /* 786 * The tag string is released by the fops->release() 787 * function, so we don't release it on error here 788 */ 789 cache->tag = kstrdup("CacheFiles", GFP_KERNEL); 790 if (!cache->tag) 791 return -ENOMEM; 792 } 793 794 return cachefiles_add_cache(cache); 795 } 796 797 /* 798 * Unbind a cache. 799 */ 800 static void cachefiles_daemon_unbind(struct cachefiles_cache *cache) 801 { 802 _enter(""); 803 804 if (test_bit(CACHEFILES_READY, &cache->flags)) 805 cachefiles_withdraw_cache(cache); 806 807 cachefiles_put_directory(cache->graveyard); 808 cachefiles_put_directory(cache->store); 809 mntput(cache->mnt); 810 put_cred(cache->cache_cred); 811 812 kfree(cache->rootdirname); 813 kfree(cache->secctx); 814 kfree(cache->tag); 815 816 _leave(""); 817 } 818