1 /* CacheFiles path walking and related routines 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/file.h> 15 #include <linux/fs.h> 16 #include <linux/fsnotify.h> 17 #include <linux/quotaops.h> 18 #include <linux/xattr.h> 19 #include <linux/mount.h> 20 #include <linux/namei.h> 21 #include <linux/security.h> 22 #include <linux/slab.h> 23 #include <linux/xattr.h> 24 #include "internal.h" 25 26 #define CACHEFILES_KEYBUF_SIZE 512 27 28 /* 29 * dump debugging info about an object 30 */ 31 static noinline 32 void __cachefiles_printk_object(struct cachefiles_object *object, 33 const char *prefix) 34 { 35 struct fscache_cookie *cookie; 36 const u8 *k; 37 unsigned loop; 38 39 pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id); 40 pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", 41 prefix, object->fscache.state->name, 42 object->fscache.flags, work_busy(&object->fscache.work), 43 object->fscache.events, object->fscache.event_mask); 44 pr_err("%sops=%u inp=%u exc=%u\n", 45 prefix, object->fscache.n_ops, object->fscache.n_in_progress, 46 object->fscache.n_exclusive); 47 pr_err("%sparent=%p\n", 48 prefix, object->fscache.parent); 49 50 spin_lock(&object->fscache.lock); 51 cookie = object->fscache.cookie; 52 if (cookie) { 53 pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n", 54 prefix, 55 object->fscache.cookie, 56 object->fscache.cookie->parent, 57 object->fscache.cookie->netfs_data, 58 object->fscache.cookie->flags); 59 pr_err("%skey=[%u] '", prefix, cookie->key_len); 60 k = (cookie->key_len <= sizeof(cookie->inline_key)) ? 61 cookie->inline_key : cookie->key; 62 for (loop = 0; loop < cookie->key_len; loop++) 63 pr_cont("%02x", k[loop]); 64 pr_cont("'\n"); 65 } else { 66 pr_err("%scookie=NULL\n", prefix); 67 } 68 spin_unlock(&object->fscache.lock); 69 } 70 71 /* 72 * dump debugging info about a pair of objects 73 */ 74 static noinline void cachefiles_printk_object(struct cachefiles_object *object, 75 struct cachefiles_object *xobject) 76 { 77 if (object) 78 __cachefiles_printk_object(object, ""); 79 if (xobject) 80 __cachefiles_printk_object(xobject, "x"); 81 } 82 83 /* 84 * mark the owner of a dentry, if there is one, to indicate that that dentry 85 * has been preemptively deleted 86 * - the caller must hold the i_mutex on the dentry's parent as required to 87 * call vfs_unlink(), vfs_rmdir() or vfs_rename() 88 */ 89 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, 90 struct dentry *dentry, 91 enum fscache_why_object_killed why) 92 { 93 struct cachefiles_object *object; 94 struct rb_node *p; 95 96 _enter(",'%pd'", dentry); 97 98 write_lock(&cache->active_lock); 99 100 p = cache->active_nodes.rb_node; 101 while (p) { 102 object = rb_entry(p, struct cachefiles_object, active_node); 103 if (object->dentry > dentry) 104 p = p->rb_left; 105 else if (object->dentry < dentry) 106 p = p->rb_right; 107 else 108 goto found_dentry; 109 } 110 111 write_unlock(&cache->active_lock); 112 trace_cachefiles_mark_buried(NULL, dentry, why); 113 _leave(" [no owner]"); 114 return; 115 116 /* found the dentry for */ 117 found_dentry: 118 kdebug("preemptive burial: OBJ%x [%s] %p", 119 object->fscache.debug_id, 120 object->fscache.state->name, 121 dentry); 122 123 trace_cachefiles_mark_buried(object, dentry, why); 124 125 if (fscache_object_is_live(&object->fscache)) { 126 pr_err("\n"); 127 pr_err("Error: Can't preemptively bury live object\n"); 128 cachefiles_printk_object(object, NULL); 129 } else { 130 if (why != FSCACHE_OBJECT_IS_STALE) 131 fscache_object_mark_killed(&object->fscache, why); 132 } 133 134 write_unlock(&cache->active_lock); 135 _leave(" [owner marked]"); 136 } 137 138 /* 139 * record the fact that an object is now active 140 */ 141 static int cachefiles_mark_object_active(struct cachefiles_cache *cache, 142 struct cachefiles_object *object) 143 { 144 struct cachefiles_object *xobject; 145 struct rb_node **_p, *_parent = NULL; 146 struct dentry *dentry; 147 148 _enter(",%p", object); 149 150 try_again: 151 write_lock(&cache->active_lock); 152 153 dentry = object->dentry; 154 trace_cachefiles_mark_active(object, dentry); 155 156 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) { 157 pr_err("Error: Object already active\n"); 158 cachefiles_printk_object(object, NULL); 159 BUG(); 160 } 161 162 _p = &cache->active_nodes.rb_node; 163 while (*_p) { 164 _parent = *_p; 165 xobject = rb_entry(_parent, 166 struct cachefiles_object, active_node); 167 168 ASSERT(xobject != object); 169 170 if (xobject->dentry > dentry) 171 _p = &(*_p)->rb_left; 172 else if (xobject->dentry < dentry) 173 _p = &(*_p)->rb_right; 174 else 175 goto wait_for_old_object; 176 } 177 178 rb_link_node(&object->active_node, _parent, _p); 179 rb_insert_color(&object->active_node, &cache->active_nodes); 180 181 write_unlock(&cache->active_lock); 182 _leave(" = 0"); 183 return 0; 184 185 /* an old object from a previous incarnation is hogging the slot - we 186 * need to wait for it to be destroyed */ 187 wait_for_old_object: 188 trace_cachefiles_wait_active(object, dentry, xobject); 189 190 if (fscache_object_is_live(&xobject->fscache)) { 191 pr_err("\n"); 192 pr_err("Error: Unexpected object collision\n"); 193 cachefiles_printk_object(object, xobject); 194 BUG(); 195 } 196 atomic_inc(&xobject->usage); 197 write_unlock(&cache->active_lock); 198 199 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { 200 wait_queue_head_t *wq; 201 202 signed long timeout = 60 * HZ; 203 wait_queue_entry_t wait; 204 bool requeue; 205 206 /* if the object we're waiting for is queued for processing, 207 * then just put ourselves on the queue behind it */ 208 if (work_pending(&xobject->fscache.work)) { 209 _debug("queue OBJ%x behind OBJ%x immediately", 210 object->fscache.debug_id, 211 xobject->fscache.debug_id); 212 goto requeue; 213 } 214 215 /* otherwise we sleep until either the object we're waiting for 216 * is done, or the fscache_object is congested */ 217 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); 218 init_wait(&wait); 219 requeue = false; 220 do { 221 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 222 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) 223 break; 224 225 requeue = fscache_object_sleep_till_congested(&timeout); 226 } while (timeout > 0 && !requeue); 227 finish_wait(wq, &wait); 228 229 if (requeue && 230 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { 231 _debug("queue OBJ%x behind OBJ%x after wait", 232 object->fscache.debug_id, 233 xobject->fscache.debug_id); 234 goto requeue; 235 } 236 237 if (timeout <= 0) { 238 pr_err("\n"); 239 pr_err("Error: Overlong wait for old active object to go away\n"); 240 cachefiles_printk_object(object, xobject); 241 goto requeue; 242 } 243 } 244 245 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); 246 247 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry); 248 goto try_again; 249 250 requeue: 251 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 252 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); 253 _leave(" = -ETIMEDOUT"); 254 return -ETIMEDOUT; 255 } 256 257 /* 258 * Mark an object as being inactive. 259 */ 260 void cachefiles_mark_object_inactive(struct cachefiles_cache *cache, 261 struct cachefiles_object *object, 262 blkcnt_t i_blocks) 263 { 264 struct dentry *dentry = object->dentry; 265 struct inode *inode = d_backing_inode(dentry); 266 267 trace_cachefiles_mark_inactive(object, dentry, inode); 268 269 write_lock(&cache->active_lock); 270 rb_erase(&object->active_node, &cache->active_nodes); 271 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 272 write_unlock(&cache->active_lock); 273 274 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); 275 276 /* This object can now be culled, so we need to let the daemon know 277 * that there is something it can remove if it needs to. 278 */ 279 atomic_long_add(i_blocks, &cache->b_released); 280 if (atomic_inc_return(&cache->f_released)) 281 cachefiles_state_changed(cache); 282 } 283 284 /* 285 * delete an object representation from the cache 286 * - file backed objects are unlinked 287 * - directory backed objects are stuffed into the graveyard for userspace to 288 * delete 289 * - unlocks the directory mutex 290 */ 291 static int cachefiles_bury_object(struct cachefiles_cache *cache, 292 struct cachefiles_object *object, 293 struct dentry *dir, 294 struct dentry *rep, 295 bool preemptive, 296 enum fscache_why_object_killed why) 297 { 298 struct dentry *grave, *trap; 299 struct path path, path_to_graveyard; 300 char nbuffer[8 + 8 + 1]; 301 int ret; 302 303 _enter(",'%pd','%pd'", dir, rep); 304 305 _debug("remove %p from %p", rep, dir); 306 307 /* non-directories can just be unlinked */ 308 if (!d_is_dir(rep)) { 309 _debug("unlink stale object"); 310 311 path.mnt = cache->mnt; 312 path.dentry = dir; 313 ret = security_path_unlink(&path, rep); 314 if (ret < 0) { 315 cachefiles_io_error(cache, "Unlink security error"); 316 } else { 317 trace_cachefiles_unlink(object, rep, why); 318 ret = vfs_unlink(d_inode(dir), rep, NULL); 319 320 if (preemptive) 321 cachefiles_mark_object_buried(cache, rep, why); 322 } 323 324 inode_unlock(d_inode(dir)); 325 326 if (ret == -EIO) 327 cachefiles_io_error(cache, "Unlink failed"); 328 329 _leave(" = %d", ret); 330 return ret; 331 } 332 333 /* directories have to be moved to the graveyard */ 334 _debug("move stale object to graveyard"); 335 inode_unlock(d_inode(dir)); 336 337 try_again: 338 /* first step is to make up a grave dentry in the graveyard */ 339 sprintf(nbuffer, "%08x%08x", 340 (uint32_t) get_seconds(), 341 (uint32_t) atomic_inc_return(&cache->gravecounter)); 342 343 /* do the multiway lock magic */ 344 trap = lock_rename(cache->graveyard, dir); 345 346 /* do some checks before getting the grave dentry */ 347 if (rep->d_parent != dir) { 348 /* the entry was probably culled when we dropped the parent dir 349 * lock */ 350 unlock_rename(cache->graveyard, dir); 351 _leave(" = 0 [culled?]"); 352 return 0; 353 } 354 355 if (!d_can_lookup(cache->graveyard)) { 356 unlock_rename(cache->graveyard, dir); 357 cachefiles_io_error(cache, "Graveyard no longer a directory"); 358 return -EIO; 359 } 360 361 if (trap == rep) { 362 unlock_rename(cache->graveyard, dir); 363 cachefiles_io_error(cache, "May not make directory loop"); 364 return -EIO; 365 } 366 367 if (d_mountpoint(rep)) { 368 unlock_rename(cache->graveyard, dir); 369 cachefiles_io_error(cache, "Mountpoint in cache"); 370 return -EIO; 371 } 372 373 grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); 374 if (IS_ERR(grave)) { 375 unlock_rename(cache->graveyard, dir); 376 377 if (PTR_ERR(grave) == -ENOMEM) { 378 _leave(" = -ENOMEM"); 379 return -ENOMEM; 380 } 381 382 cachefiles_io_error(cache, "Lookup error %ld", 383 PTR_ERR(grave)); 384 return -EIO; 385 } 386 387 if (d_is_positive(grave)) { 388 unlock_rename(cache->graveyard, dir); 389 dput(grave); 390 grave = NULL; 391 cond_resched(); 392 goto try_again; 393 } 394 395 if (d_mountpoint(grave)) { 396 unlock_rename(cache->graveyard, dir); 397 dput(grave); 398 cachefiles_io_error(cache, "Mountpoint in graveyard"); 399 return -EIO; 400 } 401 402 /* target should not be an ancestor of source */ 403 if (trap == grave) { 404 unlock_rename(cache->graveyard, dir); 405 dput(grave); 406 cachefiles_io_error(cache, "May not make directory loop"); 407 return -EIO; 408 } 409 410 /* attempt the rename */ 411 path.mnt = cache->mnt; 412 path.dentry = dir; 413 path_to_graveyard.mnt = cache->mnt; 414 path_to_graveyard.dentry = cache->graveyard; 415 ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0); 416 if (ret < 0) { 417 cachefiles_io_error(cache, "Rename security error %d", ret); 418 } else { 419 trace_cachefiles_rename(object, rep, grave, why); 420 ret = vfs_rename(d_inode(dir), rep, 421 d_inode(cache->graveyard), grave, NULL, 0); 422 if (ret != 0 && ret != -ENOMEM) 423 cachefiles_io_error(cache, 424 "Rename failed with error %d", ret); 425 426 if (preemptive) 427 cachefiles_mark_object_buried(cache, rep, why); 428 } 429 430 unlock_rename(cache->graveyard, dir); 431 dput(grave); 432 _leave(" = 0"); 433 return 0; 434 } 435 436 /* 437 * delete an object representation from the cache 438 */ 439 int cachefiles_delete_object(struct cachefiles_cache *cache, 440 struct cachefiles_object *object) 441 { 442 struct dentry *dir; 443 int ret; 444 445 _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry); 446 447 ASSERT(object->dentry); 448 ASSERT(d_backing_inode(object->dentry)); 449 ASSERT(object->dentry->d_parent); 450 451 dir = dget_parent(object->dentry); 452 453 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); 454 455 if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) { 456 /* object allocation for the same key preemptively deleted this 457 * object's file so that it could create its own file */ 458 _debug("object preemptively buried"); 459 inode_unlock(d_inode(dir)); 460 ret = 0; 461 } else { 462 /* we need to check that our parent is _still_ our parent - it 463 * may have been renamed */ 464 if (dir == object->dentry->d_parent) { 465 ret = cachefiles_bury_object(cache, object, dir, 466 object->dentry, false, 467 FSCACHE_OBJECT_WAS_RETIRED); 468 } else { 469 /* it got moved, presumably by cachefilesd culling it, 470 * so it's no longer in the key path and we can ignore 471 * it */ 472 inode_unlock(d_inode(dir)); 473 ret = 0; 474 } 475 } 476 477 dput(dir); 478 _leave(" = %d", ret); 479 return ret; 480 } 481 482 /* 483 * walk from the parent object to the child object through the backing 484 * filesystem, creating directories as we go 485 */ 486 int cachefiles_walk_to_object(struct cachefiles_object *parent, 487 struct cachefiles_object *object, 488 const char *key, 489 struct cachefiles_xattr *auxdata) 490 { 491 struct cachefiles_cache *cache; 492 struct dentry *dir, *next = NULL; 493 struct inode *inode; 494 struct path path; 495 unsigned long start; 496 const char *name; 497 int ret, nlen; 498 499 _enter("OBJ%x{%p},OBJ%x,%s,", 500 parent->fscache.debug_id, parent->dentry, 501 object->fscache.debug_id, key); 502 503 cache = container_of(parent->fscache.cache, 504 struct cachefiles_cache, cache); 505 path.mnt = cache->mnt; 506 507 ASSERT(parent->dentry); 508 ASSERT(d_backing_inode(parent->dentry)); 509 510 if (!(d_is_dir(parent->dentry))) { 511 // TODO: convert file to dir 512 _leave("looking up in none directory"); 513 return -ENOBUFS; 514 } 515 516 dir = dget(parent->dentry); 517 518 advance: 519 /* attempt to transit the first directory component */ 520 name = key; 521 nlen = strlen(key); 522 523 /* key ends in a double NUL */ 524 key = key + nlen + 1; 525 if (!*key) 526 key = NULL; 527 528 lookup_again: 529 /* search the current directory for the element name */ 530 _debug("lookup '%s'", name); 531 532 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); 533 534 start = jiffies; 535 next = lookup_one_len(name, dir, nlen); 536 cachefiles_hist(cachefiles_lookup_histogram, start); 537 if (IS_ERR(next)) { 538 trace_cachefiles_lookup(object, next, NULL); 539 goto lookup_error; 540 } 541 542 inode = d_backing_inode(next); 543 trace_cachefiles_lookup(object, next, inode); 544 _debug("next -> %p %s", next, inode ? "positive" : "negative"); 545 546 if (!key) 547 object->new = !inode; 548 549 /* if this element of the path doesn't exist, then the lookup phase 550 * failed, and we can release any readers in the certain knowledge that 551 * there's nothing for them to actually read */ 552 if (d_is_negative(next)) 553 fscache_object_lookup_negative(&object->fscache); 554 555 /* we need to create the object if it's negative */ 556 if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) { 557 /* index objects and intervening tree levels must be subdirs */ 558 if (d_is_negative(next)) { 559 ret = cachefiles_has_space(cache, 1, 0); 560 if (ret < 0) 561 goto no_space_error; 562 563 path.dentry = dir; 564 ret = security_path_mkdir(&path, next, 0); 565 if (ret < 0) 566 goto create_error; 567 start = jiffies; 568 ret = vfs_mkdir(d_inode(dir), next, 0); 569 cachefiles_hist(cachefiles_mkdir_histogram, start); 570 if (!key) 571 trace_cachefiles_mkdir(object, next, ret); 572 if (ret < 0) 573 goto create_error; 574 575 if (unlikely(d_unhashed(next))) { 576 dput(next); 577 inode_unlock(d_inode(dir)); 578 goto lookup_again; 579 } 580 ASSERT(d_backing_inode(next)); 581 582 _debug("mkdir -> %p{%p{ino=%lu}}", 583 next, d_backing_inode(next), d_backing_inode(next)->i_ino); 584 585 } else if (!d_can_lookup(next)) { 586 pr_err("inode %lu is not a directory\n", 587 d_backing_inode(next)->i_ino); 588 ret = -ENOBUFS; 589 goto error; 590 } 591 592 } else { 593 /* non-index objects start out life as files */ 594 if (d_is_negative(next)) { 595 ret = cachefiles_has_space(cache, 1, 0); 596 if (ret < 0) 597 goto no_space_error; 598 599 path.dentry = dir; 600 ret = security_path_mknod(&path, next, S_IFREG, 0); 601 if (ret < 0) 602 goto create_error; 603 start = jiffies; 604 ret = vfs_create(d_inode(dir), next, S_IFREG, true); 605 cachefiles_hist(cachefiles_create_histogram, start); 606 trace_cachefiles_create(object, next, ret); 607 if (ret < 0) 608 goto create_error; 609 610 ASSERT(d_backing_inode(next)); 611 612 _debug("create -> %p{%p{ino=%lu}}", 613 next, d_backing_inode(next), d_backing_inode(next)->i_ino); 614 615 } else if (!d_can_lookup(next) && 616 !d_is_reg(next) 617 ) { 618 pr_err("inode %lu is not a file or directory\n", 619 d_backing_inode(next)->i_ino); 620 ret = -ENOBUFS; 621 goto error; 622 } 623 } 624 625 /* process the next component */ 626 if (key) { 627 _debug("advance"); 628 inode_unlock(d_inode(dir)); 629 dput(dir); 630 dir = next; 631 next = NULL; 632 goto advance; 633 } 634 635 /* we've found the object we were looking for */ 636 object->dentry = next; 637 638 /* if we've found that the terminal object exists, then we need to 639 * check its attributes and delete it if it's out of date */ 640 if (!object->new) { 641 _debug("validate '%pd'", next); 642 643 ret = cachefiles_check_object_xattr(object, auxdata); 644 if (ret == -ESTALE) { 645 /* delete the object (the deleter drops the directory 646 * mutex) */ 647 object->dentry = NULL; 648 649 ret = cachefiles_bury_object(cache, object, dir, next, 650 true, 651 FSCACHE_OBJECT_IS_STALE); 652 dput(next); 653 next = NULL; 654 655 if (ret < 0) 656 goto delete_error; 657 658 _debug("redo lookup"); 659 fscache_object_retrying_stale(&object->fscache); 660 goto lookup_again; 661 } 662 } 663 664 /* note that we're now using this object */ 665 ret = cachefiles_mark_object_active(cache, object); 666 667 inode_unlock(d_inode(dir)); 668 dput(dir); 669 dir = NULL; 670 671 if (ret == -ETIMEDOUT) 672 goto mark_active_timed_out; 673 674 _debug("=== OBTAINED_OBJECT ==="); 675 676 if (object->new) { 677 /* attach data to a newly constructed terminal object */ 678 ret = cachefiles_set_object_xattr(object, auxdata); 679 if (ret < 0) 680 goto check_error; 681 } else { 682 /* always update the atime on an object we've just looked up 683 * (this is used to keep track of culling, and atimes are only 684 * updated by read, write and readdir but not lookup or 685 * open) */ 686 path.dentry = next; 687 touch_atime(&path); 688 } 689 690 /* open a file interface onto a data file */ 691 if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { 692 if (d_is_reg(object->dentry)) { 693 const struct address_space_operations *aops; 694 695 ret = -EPERM; 696 aops = d_backing_inode(object->dentry)->i_mapping->a_ops; 697 if (!aops->bmap) 698 goto check_error; 699 if (object->dentry->d_sb->s_blocksize > PAGE_SIZE) 700 goto check_error; 701 702 object->backer = object->dentry; 703 } else { 704 BUG(); // TODO: open file in data-class subdir 705 } 706 } 707 708 object->new = 0; 709 fscache_obtained_object(&object->fscache); 710 711 _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino); 712 return 0; 713 714 no_space_error: 715 fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE); 716 create_error: 717 _debug("create error %d", ret); 718 if (ret == -EIO) 719 cachefiles_io_error(cache, "Create/mkdir failed"); 720 goto error; 721 722 mark_active_timed_out: 723 _debug("mark active timed out"); 724 goto release_dentry; 725 726 check_error: 727 _debug("check error %d", ret); 728 cachefiles_mark_object_inactive( 729 cache, object, d_backing_inode(object->dentry)->i_blocks); 730 release_dentry: 731 dput(object->dentry); 732 object->dentry = NULL; 733 goto error_out; 734 735 delete_error: 736 _debug("delete error %d", ret); 737 goto error_out2; 738 739 lookup_error: 740 _debug("lookup error %ld", PTR_ERR(next)); 741 ret = PTR_ERR(next); 742 if (ret == -EIO) 743 cachefiles_io_error(cache, "Lookup failed"); 744 next = NULL; 745 error: 746 inode_unlock(d_inode(dir)); 747 dput(next); 748 error_out2: 749 dput(dir); 750 error_out: 751 _leave(" = error %d", -ret); 752 return ret; 753 } 754 755 /* 756 * get a subdirectory 757 */ 758 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, 759 struct dentry *dir, 760 const char *dirname) 761 { 762 struct dentry *subdir; 763 unsigned long start; 764 struct path path; 765 int ret; 766 767 _enter(",,%s", dirname); 768 769 /* search the current directory for the element name */ 770 inode_lock(d_inode(dir)); 771 772 retry: 773 start = jiffies; 774 subdir = lookup_one_len(dirname, dir, strlen(dirname)); 775 cachefiles_hist(cachefiles_lookup_histogram, start); 776 if (IS_ERR(subdir)) { 777 if (PTR_ERR(subdir) == -ENOMEM) 778 goto nomem_d_alloc; 779 goto lookup_error; 780 } 781 782 _debug("subdir -> %p %s", 783 subdir, d_backing_inode(subdir) ? "positive" : "negative"); 784 785 /* we need to create the subdir if it doesn't exist yet */ 786 if (d_is_negative(subdir)) { 787 ret = cachefiles_has_space(cache, 1, 0); 788 if (ret < 0) 789 goto mkdir_error; 790 791 _debug("attempt mkdir"); 792 793 path.mnt = cache->mnt; 794 path.dentry = dir; 795 ret = security_path_mkdir(&path, subdir, 0700); 796 if (ret < 0) 797 goto mkdir_error; 798 ret = vfs_mkdir(d_inode(dir), subdir, 0700); 799 if (ret < 0) 800 goto mkdir_error; 801 802 if (unlikely(d_unhashed(subdir))) { 803 dput(subdir); 804 goto retry; 805 } 806 ASSERT(d_backing_inode(subdir)); 807 808 _debug("mkdir -> %p{%p{ino=%lu}}", 809 subdir, 810 d_backing_inode(subdir), 811 d_backing_inode(subdir)->i_ino); 812 } 813 814 inode_unlock(d_inode(dir)); 815 816 /* we need to make sure the subdir is a directory */ 817 ASSERT(d_backing_inode(subdir)); 818 819 if (!d_can_lookup(subdir)) { 820 pr_err("%s is not a directory\n", dirname); 821 ret = -EIO; 822 goto check_error; 823 } 824 825 ret = -EPERM; 826 if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) || 827 !d_backing_inode(subdir)->i_op->lookup || 828 !d_backing_inode(subdir)->i_op->mkdir || 829 !d_backing_inode(subdir)->i_op->create || 830 !d_backing_inode(subdir)->i_op->rename || 831 !d_backing_inode(subdir)->i_op->rmdir || 832 !d_backing_inode(subdir)->i_op->unlink) 833 goto check_error; 834 835 _leave(" = [%lu]", d_backing_inode(subdir)->i_ino); 836 return subdir; 837 838 check_error: 839 dput(subdir); 840 _leave(" = %d [check]", ret); 841 return ERR_PTR(ret); 842 843 mkdir_error: 844 inode_unlock(d_inode(dir)); 845 dput(subdir); 846 pr_err("mkdir %s failed with error %d\n", dirname, ret); 847 return ERR_PTR(ret); 848 849 lookup_error: 850 inode_unlock(d_inode(dir)); 851 ret = PTR_ERR(subdir); 852 pr_err("Lookup %s failed with error %d\n", dirname, ret); 853 return ERR_PTR(ret); 854 855 nomem_d_alloc: 856 inode_unlock(d_inode(dir)); 857 _leave(" = -ENOMEM"); 858 return ERR_PTR(-ENOMEM); 859 } 860 861 /* 862 * find out if an object is in use or not 863 * - if finds object and it's not in use: 864 * - returns a pointer to the object and a reference on it 865 * - returns with the directory locked 866 */ 867 static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, 868 struct dentry *dir, 869 char *filename) 870 { 871 struct cachefiles_object *object; 872 struct rb_node *_n; 873 struct dentry *victim; 874 unsigned long start; 875 int ret; 876 877 //_enter(",%pd/,%s", 878 // dir, filename); 879 880 /* look up the victim */ 881 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); 882 883 start = jiffies; 884 victim = lookup_one_len(filename, dir, strlen(filename)); 885 cachefiles_hist(cachefiles_lookup_histogram, start); 886 if (IS_ERR(victim)) 887 goto lookup_error; 888 889 //_debug("victim -> %p %s", 890 // victim, d_backing_inode(victim) ? "positive" : "negative"); 891 892 /* if the object is no longer there then we probably retired the object 893 * at the netfs's request whilst the cull was in progress 894 */ 895 if (d_is_negative(victim)) { 896 inode_unlock(d_inode(dir)); 897 dput(victim); 898 _leave(" = -ENOENT [absent]"); 899 return ERR_PTR(-ENOENT); 900 } 901 902 /* check to see if we're using this object */ 903 read_lock(&cache->active_lock); 904 905 _n = cache->active_nodes.rb_node; 906 907 while (_n) { 908 object = rb_entry(_n, struct cachefiles_object, active_node); 909 910 if (object->dentry > victim) 911 _n = _n->rb_left; 912 else if (object->dentry < victim) 913 _n = _n->rb_right; 914 else 915 goto object_in_use; 916 } 917 918 read_unlock(&cache->active_lock); 919 920 //_leave(" = %p", victim); 921 return victim; 922 923 object_in_use: 924 read_unlock(&cache->active_lock); 925 inode_unlock(d_inode(dir)); 926 dput(victim); 927 //_leave(" = -EBUSY [in use]"); 928 return ERR_PTR(-EBUSY); 929 930 lookup_error: 931 inode_unlock(d_inode(dir)); 932 ret = PTR_ERR(victim); 933 if (ret == -ENOENT) { 934 /* file or dir now absent - probably retired by netfs */ 935 _leave(" = -ESTALE [absent]"); 936 return ERR_PTR(-ESTALE); 937 } 938 939 if (ret == -EIO) { 940 cachefiles_io_error(cache, "Lookup failed"); 941 } else if (ret != -ENOMEM) { 942 pr_err("Internal error: %d\n", ret); 943 ret = -EIO; 944 } 945 946 _leave(" = %d", ret); 947 return ERR_PTR(ret); 948 } 949 950 /* 951 * cull an object if it's not in use 952 * - called only by cache manager daemon 953 */ 954 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, 955 char *filename) 956 { 957 struct dentry *victim; 958 int ret; 959 960 _enter(",%pd/,%s", dir, filename); 961 962 victim = cachefiles_check_active(cache, dir, filename); 963 if (IS_ERR(victim)) 964 return PTR_ERR(victim); 965 966 _debug("victim -> %p %s", 967 victim, d_backing_inode(victim) ? "positive" : "negative"); 968 969 /* okay... the victim is not being used so we can cull it 970 * - start by marking it as stale 971 */ 972 _debug("victim is cullable"); 973 974 ret = cachefiles_remove_object_xattr(cache, victim); 975 if (ret < 0) 976 goto error_unlock; 977 978 /* actually remove the victim (drops the dir mutex) */ 979 _debug("bury"); 980 981 ret = cachefiles_bury_object(cache, NULL, dir, victim, false, 982 FSCACHE_OBJECT_WAS_CULLED); 983 if (ret < 0) 984 goto error; 985 986 dput(victim); 987 _leave(" = 0"); 988 return 0; 989 990 error_unlock: 991 inode_unlock(d_inode(dir)); 992 error: 993 dput(victim); 994 if (ret == -ENOENT) { 995 /* file or dir now absent - probably retired by netfs */ 996 _leave(" = -ESTALE [absent]"); 997 return -ESTALE; 998 } 999 1000 if (ret != -ENOMEM) { 1001 pr_err("Internal error: %d\n", ret); 1002 ret = -EIO; 1003 } 1004 1005 _leave(" = %d", ret); 1006 return ret; 1007 } 1008 1009 /* 1010 * find out if an object is in use or not 1011 * - called only by cache manager daemon 1012 * - returns -EBUSY or 0 to indicate whether an object is in use or not 1013 */ 1014 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, 1015 char *filename) 1016 { 1017 struct dentry *victim; 1018 1019 //_enter(",%pd/,%s", 1020 // dir, filename); 1021 1022 victim = cachefiles_check_active(cache, dir, filename); 1023 if (IS_ERR(victim)) 1024 return PTR_ERR(victim); 1025 1026 inode_unlock(d_inode(dir)); 1027 dput(victim); 1028 //_leave(" = 0"); 1029 return 0; 1030 } 1031