1 /* CacheFiles path walking and related routines 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/file.h> 15 #include <linux/fs.h> 16 #include <linux/fsnotify.h> 17 #include <linux/quotaops.h> 18 #include <linux/xattr.h> 19 #include <linux/mount.h> 20 #include <linux/namei.h> 21 #include <linux/security.h> 22 #include <linux/slab.h> 23 #include "internal.h" 24 25 #define CACHEFILES_KEYBUF_SIZE 512 26 27 /* 28 * dump debugging info about an object 29 */ 30 static noinline 31 void __cachefiles_printk_object(struct cachefiles_object *object, 32 const char *prefix, 33 u8 *keybuf) 34 { 35 struct fscache_cookie *cookie; 36 unsigned keylen, loop; 37 38 pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id); 39 pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", 40 prefix, object->fscache.state->name, 41 object->fscache.flags, work_busy(&object->fscache.work), 42 object->fscache.events, object->fscache.event_mask); 43 pr_err("%sops=%u inp=%u exc=%u\n", 44 prefix, object->fscache.n_ops, object->fscache.n_in_progress, 45 object->fscache.n_exclusive); 46 pr_err("%sparent=%p\n", 47 prefix, object->fscache.parent); 48 49 spin_lock(&object->fscache.lock); 50 cookie = object->fscache.cookie; 51 if (cookie) { 52 pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n", 53 prefix, 54 object->fscache.cookie, 55 object->fscache.cookie->parent, 56 object->fscache.cookie->netfs_data, 57 object->fscache.cookie->flags); 58 if (keybuf && cookie->def) 59 keylen = cookie->def->get_key(cookie->netfs_data, keybuf, 60 CACHEFILES_KEYBUF_SIZE); 61 else 62 keylen = 0; 63 } else { 64 pr_err("%scookie=NULL\n", prefix); 65 keylen = 0; 66 } 67 spin_unlock(&object->fscache.lock); 68 69 if (keylen) { 70 pr_err("%skey=[%u] '", prefix, keylen); 71 for (loop = 0; loop < keylen; loop++) 72 pr_cont("%02x", keybuf[loop]); 73 pr_cont("'\n"); 74 } 75 } 76 77 /* 78 * dump debugging info about a pair of objects 79 */ 80 static noinline void cachefiles_printk_object(struct cachefiles_object *object, 81 struct cachefiles_object *xobject) 82 { 83 u8 *keybuf; 84 85 keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO); 86 if (object) 87 __cachefiles_printk_object(object, "", keybuf); 88 if (xobject) 89 __cachefiles_printk_object(xobject, "x", keybuf); 90 kfree(keybuf); 91 } 92 93 /* 94 * mark the owner of a dentry, if there is one, to indicate that that dentry 95 * has been preemptively deleted 96 * - the caller must hold the i_mutex on the dentry's parent as required to 97 * call vfs_unlink(), vfs_rmdir() or vfs_rename() 98 */ 99 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, 100 struct dentry *dentry, 101 enum fscache_why_object_killed why) 102 { 103 struct cachefiles_object *object; 104 struct rb_node *p; 105 106 _enter(",'%pd'", dentry); 107 108 write_lock(&cache->active_lock); 109 110 p = cache->active_nodes.rb_node; 111 while (p) { 112 object = rb_entry(p, struct cachefiles_object, active_node); 113 if (object->dentry > dentry) 114 p = p->rb_left; 115 else if (object->dentry < dentry) 116 p = p->rb_right; 117 else 118 goto found_dentry; 119 } 120 121 write_unlock(&cache->active_lock); 122 _leave(" [no owner]"); 123 return; 124 125 /* found the dentry for */ 126 found_dentry: 127 kdebug("preemptive burial: OBJ%x [%s] %p", 128 object->fscache.debug_id, 129 object->fscache.state->name, 130 dentry); 131 132 if (fscache_object_is_live(&object->fscache)) { 133 pr_err("\n"); 134 pr_err("Error: Can't preemptively bury live object\n"); 135 cachefiles_printk_object(object, NULL); 136 } else { 137 if (why != FSCACHE_OBJECT_IS_STALE) 138 fscache_object_mark_killed(&object->fscache, why); 139 } 140 141 write_unlock(&cache->active_lock); 142 _leave(" [owner marked]"); 143 } 144 145 /* 146 * record the fact that an object is now active 147 */ 148 static int cachefiles_mark_object_active(struct cachefiles_cache *cache, 149 struct cachefiles_object *object) 150 { 151 struct cachefiles_object *xobject; 152 struct rb_node **_p, *_parent = NULL; 153 struct dentry *dentry; 154 155 _enter(",%p", object); 156 157 try_again: 158 write_lock(&cache->active_lock); 159 160 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) { 161 pr_err("Error: Object already active\n"); 162 cachefiles_printk_object(object, NULL); 163 BUG(); 164 } 165 166 dentry = object->dentry; 167 _p = &cache->active_nodes.rb_node; 168 while (*_p) { 169 _parent = *_p; 170 xobject = rb_entry(_parent, 171 struct cachefiles_object, active_node); 172 173 ASSERT(xobject != object); 174 175 if (xobject->dentry > dentry) 176 _p = &(*_p)->rb_left; 177 else if (xobject->dentry < dentry) 178 _p = &(*_p)->rb_right; 179 else 180 goto wait_for_old_object; 181 } 182 183 rb_link_node(&object->active_node, _parent, _p); 184 rb_insert_color(&object->active_node, &cache->active_nodes); 185 186 write_unlock(&cache->active_lock); 187 _leave(" = 0"); 188 return 0; 189 190 /* an old object from a previous incarnation is hogging the slot - we 191 * need to wait for it to be destroyed */ 192 wait_for_old_object: 193 if (fscache_object_is_live(&xobject->fscache)) { 194 pr_err("\n"); 195 pr_err("Error: Unexpected object collision\n"); 196 cachefiles_printk_object(object, xobject); 197 BUG(); 198 } 199 atomic_inc(&xobject->usage); 200 write_unlock(&cache->active_lock); 201 202 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { 203 wait_queue_head_t *wq; 204 205 signed long timeout = 60 * HZ; 206 wait_queue_t wait; 207 bool requeue; 208 209 /* if the object we're waiting for is queued for processing, 210 * then just put ourselves on the queue behind it */ 211 if (work_pending(&xobject->fscache.work)) { 212 _debug("queue OBJ%x behind OBJ%x immediately", 213 object->fscache.debug_id, 214 xobject->fscache.debug_id); 215 goto requeue; 216 } 217 218 /* otherwise we sleep until either the object we're waiting for 219 * is done, or the fscache_object is congested */ 220 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); 221 init_wait(&wait); 222 requeue = false; 223 do { 224 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 225 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) 226 break; 227 228 requeue = fscache_object_sleep_till_congested(&timeout); 229 } while (timeout > 0 && !requeue); 230 finish_wait(wq, &wait); 231 232 if (requeue && 233 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { 234 _debug("queue OBJ%x behind OBJ%x after wait", 235 object->fscache.debug_id, 236 xobject->fscache.debug_id); 237 goto requeue; 238 } 239 240 if (timeout <= 0) { 241 pr_err("\n"); 242 pr_err("Error: Overlong wait for old active object to go away\n"); 243 cachefiles_printk_object(object, xobject); 244 goto requeue; 245 } 246 } 247 248 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); 249 250 cache->cache.ops->put_object(&xobject->fscache); 251 goto try_again; 252 253 requeue: 254 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 255 cache->cache.ops->put_object(&xobject->fscache); 256 _leave(" = -ETIMEDOUT"); 257 return -ETIMEDOUT; 258 } 259 260 /* 261 * Mark an object as being inactive. 262 */ 263 void cachefiles_mark_object_inactive(struct cachefiles_cache *cache, 264 struct cachefiles_object *object) 265 { 266 write_lock(&cache->active_lock); 267 rb_erase(&object->active_node, &cache->active_nodes); 268 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 269 write_unlock(&cache->active_lock); 270 271 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); 272 273 /* This object can now be culled, so we need to let the daemon know 274 * that there is something it can remove if it needs to. 275 */ 276 atomic_long_add(d_backing_inode(object->dentry)->i_blocks, 277 &cache->b_released); 278 if (atomic_inc_return(&cache->f_released)) 279 cachefiles_state_changed(cache); 280 } 281 282 /* 283 * delete an object representation from the cache 284 * - file backed objects are unlinked 285 * - directory backed objects are stuffed into the graveyard for userspace to 286 * delete 287 * - unlocks the directory mutex 288 */ 289 static int cachefiles_bury_object(struct cachefiles_cache *cache, 290 struct dentry *dir, 291 struct dentry *rep, 292 bool preemptive, 293 enum fscache_why_object_killed why) 294 { 295 struct dentry *grave, *trap; 296 struct path path, path_to_graveyard; 297 char nbuffer[8 + 8 + 1]; 298 int ret; 299 300 _enter(",'%pd','%pd'", dir, rep); 301 302 _debug("remove %p from %p", rep, dir); 303 304 /* non-directories can just be unlinked */ 305 if (!d_is_dir(rep)) { 306 _debug("unlink stale object"); 307 308 path.mnt = cache->mnt; 309 path.dentry = dir; 310 ret = security_path_unlink(&path, rep); 311 if (ret < 0) { 312 cachefiles_io_error(cache, "Unlink security error"); 313 } else { 314 ret = vfs_unlink(d_inode(dir), rep, NULL); 315 316 if (preemptive) 317 cachefiles_mark_object_buried(cache, rep, why); 318 } 319 320 inode_unlock(d_inode(dir)); 321 322 if (ret == -EIO) 323 cachefiles_io_error(cache, "Unlink failed"); 324 325 _leave(" = %d", ret); 326 return ret; 327 } 328 329 /* directories have to be moved to the graveyard */ 330 _debug("move stale object to graveyard"); 331 inode_unlock(d_inode(dir)); 332 333 try_again: 334 /* first step is to make up a grave dentry in the graveyard */ 335 sprintf(nbuffer, "%08x%08x", 336 (uint32_t) get_seconds(), 337 (uint32_t) atomic_inc_return(&cache->gravecounter)); 338 339 /* do the multiway lock magic */ 340 trap = lock_rename(cache->graveyard, dir); 341 342 /* do some checks before getting the grave dentry */ 343 if (rep->d_parent != dir) { 344 /* the entry was probably culled when we dropped the parent dir 345 * lock */ 346 unlock_rename(cache->graveyard, dir); 347 _leave(" = 0 [culled?]"); 348 return 0; 349 } 350 351 if (!d_can_lookup(cache->graveyard)) { 352 unlock_rename(cache->graveyard, dir); 353 cachefiles_io_error(cache, "Graveyard no longer a directory"); 354 return -EIO; 355 } 356 357 if (trap == rep) { 358 unlock_rename(cache->graveyard, dir); 359 cachefiles_io_error(cache, "May not make directory loop"); 360 return -EIO; 361 } 362 363 if (d_mountpoint(rep)) { 364 unlock_rename(cache->graveyard, dir); 365 cachefiles_io_error(cache, "Mountpoint in cache"); 366 return -EIO; 367 } 368 369 grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); 370 if (IS_ERR(grave)) { 371 unlock_rename(cache->graveyard, dir); 372 373 if (PTR_ERR(grave) == -ENOMEM) { 374 _leave(" = -ENOMEM"); 375 return -ENOMEM; 376 } 377 378 cachefiles_io_error(cache, "Lookup error %ld", 379 PTR_ERR(grave)); 380 return -EIO; 381 } 382 383 if (d_is_positive(grave)) { 384 unlock_rename(cache->graveyard, dir); 385 dput(grave); 386 grave = NULL; 387 cond_resched(); 388 goto try_again; 389 } 390 391 if (d_mountpoint(grave)) { 392 unlock_rename(cache->graveyard, dir); 393 dput(grave); 394 cachefiles_io_error(cache, "Mountpoint in graveyard"); 395 return -EIO; 396 } 397 398 /* target should not be an ancestor of source */ 399 if (trap == grave) { 400 unlock_rename(cache->graveyard, dir); 401 dput(grave); 402 cachefiles_io_error(cache, "May not make directory loop"); 403 return -EIO; 404 } 405 406 /* attempt the rename */ 407 path.mnt = cache->mnt; 408 path.dentry = dir; 409 path_to_graveyard.mnt = cache->mnt; 410 path_to_graveyard.dentry = cache->graveyard; 411 ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0); 412 if (ret < 0) { 413 cachefiles_io_error(cache, "Rename security error %d", ret); 414 } else { 415 ret = vfs_rename(d_inode(dir), rep, 416 d_inode(cache->graveyard), grave, NULL, 0); 417 if (ret != 0 && ret != -ENOMEM) 418 cachefiles_io_error(cache, 419 "Rename failed with error %d", ret); 420 421 if (preemptive) 422 cachefiles_mark_object_buried(cache, rep, why); 423 } 424 425 unlock_rename(cache->graveyard, dir); 426 dput(grave); 427 _leave(" = 0"); 428 return 0; 429 } 430 431 /* 432 * delete an object representation from the cache 433 */ 434 int cachefiles_delete_object(struct cachefiles_cache *cache, 435 struct cachefiles_object *object) 436 { 437 struct dentry *dir; 438 int ret; 439 440 _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry); 441 442 ASSERT(object->dentry); 443 ASSERT(d_backing_inode(object->dentry)); 444 ASSERT(object->dentry->d_parent); 445 446 dir = dget_parent(object->dentry); 447 448 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); 449 450 if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) { 451 /* object allocation for the same key preemptively deleted this 452 * object's file so that it could create its own file */ 453 _debug("object preemptively buried"); 454 inode_unlock(d_inode(dir)); 455 ret = 0; 456 } else { 457 /* we need to check that our parent is _still_ our parent - it 458 * may have been renamed */ 459 if (dir == object->dentry->d_parent) { 460 ret = cachefiles_bury_object(cache, dir, 461 object->dentry, false, 462 FSCACHE_OBJECT_WAS_RETIRED); 463 } else { 464 /* it got moved, presumably by cachefilesd culling it, 465 * so it's no longer in the key path and we can ignore 466 * it */ 467 inode_unlock(d_inode(dir)); 468 ret = 0; 469 } 470 } 471 472 dput(dir); 473 _leave(" = %d", ret); 474 return ret; 475 } 476 477 /* 478 * walk from the parent object to the child object through the backing 479 * filesystem, creating directories as we go 480 */ 481 int cachefiles_walk_to_object(struct cachefiles_object *parent, 482 struct cachefiles_object *object, 483 const char *key, 484 struct cachefiles_xattr *auxdata) 485 { 486 struct cachefiles_cache *cache; 487 struct dentry *dir, *next = NULL; 488 struct path path; 489 unsigned long start; 490 const char *name; 491 int ret, nlen; 492 493 _enter("OBJ%x{%p},OBJ%x,%s,", 494 parent->fscache.debug_id, parent->dentry, 495 object->fscache.debug_id, key); 496 497 cache = container_of(parent->fscache.cache, 498 struct cachefiles_cache, cache); 499 path.mnt = cache->mnt; 500 501 ASSERT(parent->dentry); 502 ASSERT(d_backing_inode(parent->dentry)); 503 504 if (!(d_is_dir(parent->dentry))) { 505 // TODO: convert file to dir 506 _leave("looking up in none directory"); 507 return -ENOBUFS; 508 } 509 510 dir = dget(parent->dentry); 511 512 advance: 513 /* attempt to transit the first directory component */ 514 name = key; 515 nlen = strlen(key); 516 517 /* key ends in a double NUL */ 518 key = key + nlen + 1; 519 if (!*key) 520 key = NULL; 521 522 lookup_again: 523 /* search the current directory for the element name */ 524 _debug("lookup '%s'", name); 525 526 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); 527 528 start = jiffies; 529 next = lookup_one_len(name, dir, nlen); 530 cachefiles_hist(cachefiles_lookup_histogram, start); 531 if (IS_ERR(next)) 532 goto lookup_error; 533 534 _debug("next -> %p %s", next, d_backing_inode(next) ? "positive" : "negative"); 535 536 if (!key) 537 object->new = !d_backing_inode(next); 538 539 /* if this element of the path doesn't exist, then the lookup phase 540 * failed, and we can release any readers in the certain knowledge that 541 * there's nothing for them to actually read */ 542 if (d_is_negative(next)) 543 fscache_object_lookup_negative(&object->fscache); 544 545 /* we need to create the object if it's negative */ 546 if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) { 547 /* index objects and intervening tree levels must be subdirs */ 548 if (d_is_negative(next)) { 549 ret = cachefiles_has_space(cache, 1, 0); 550 if (ret < 0) 551 goto no_space_error; 552 553 path.dentry = dir; 554 ret = security_path_mkdir(&path, next, 0); 555 if (ret < 0) 556 goto create_error; 557 start = jiffies; 558 ret = vfs_mkdir(d_inode(dir), next, 0); 559 cachefiles_hist(cachefiles_mkdir_histogram, start); 560 if (ret < 0) 561 goto create_error; 562 563 ASSERT(d_backing_inode(next)); 564 565 _debug("mkdir -> %p{%p{ino=%lu}}", 566 next, d_backing_inode(next), d_backing_inode(next)->i_ino); 567 568 } else if (!d_can_lookup(next)) { 569 pr_err("inode %lu is not a directory\n", 570 d_backing_inode(next)->i_ino); 571 ret = -ENOBUFS; 572 goto error; 573 } 574 575 } else { 576 /* non-index objects start out life as files */ 577 if (d_is_negative(next)) { 578 ret = cachefiles_has_space(cache, 1, 0); 579 if (ret < 0) 580 goto no_space_error; 581 582 path.dentry = dir; 583 ret = security_path_mknod(&path, next, S_IFREG, 0); 584 if (ret < 0) 585 goto create_error; 586 start = jiffies; 587 ret = vfs_create(d_inode(dir), next, S_IFREG, true); 588 cachefiles_hist(cachefiles_create_histogram, start); 589 if (ret < 0) 590 goto create_error; 591 592 ASSERT(d_backing_inode(next)); 593 594 _debug("create -> %p{%p{ino=%lu}}", 595 next, d_backing_inode(next), d_backing_inode(next)->i_ino); 596 597 } else if (!d_can_lookup(next) && 598 !d_is_reg(next) 599 ) { 600 pr_err("inode %lu is not a file or directory\n", 601 d_backing_inode(next)->i_ino); 602 ret = -ENOBUFS; 603 goto error; 604 } 605 } 606 607 /* process the next component */ 608 if (key) { 609 _debug("advance"); 610 inode_unlock(d_inode(dir)); 611 dput(dir); 612 dir = next; 613 next = NULL; 614 goto advance; 615 } 616 617 /* we've found the object we were looking for */ 618 object->dentry = next; 619 620 /* if we've found that the terminal object exists, then we need to 621 * check its attributes and delete it if it's out of date */ 622 if (!object->new) { 623 _debug("validate '%pd'", next); 624 625 ret = cachefiles_check_object_xattr(object, auxdata); 626 if (ret == -ESTALE) { 627 /* delete the object (the deleter drops the directory 628 * mutex) */ 629 object->dentry = NULL; 630 631 ret = cachefiles_bury_object(cache, dir, next, true, 632 FSCACHE_OBJECT_IS_STALE); 633 dput(next); 634 next = NULL; 635 636 if (ret < 0) 637 goto delete_error; 638 639 _debug("redo lookup"); 640 fscache_object_retrying_stale(&object->fscache); 641 goto lookup_again; 642 } 643 } 644 645 /* note that we're now using this object */ 646 ret = cachefiles_mark_object_active(cache, object); 647 648 inode_unlock(d_inode(dir)); 649 dput(dir); 650 dir = NULL; 651 652 if (ret == -ETIMEDOUT) 653 goto mark_active_timed_out; 654 655 _debug("=== OBTAINED_OBJECT ==="); 656 657 if (object->new) { 658 /* attach data to a newly constructed terminal object */ 659 ret = cachefiles_set_object_xattr(object, auxdata); 660 if (ret < 0) 661 goto check_error; 662 } else { 663 /* always update the atime on an object we've just looked up 664 * (this is used to keep track of culling, and atimes are only 665 * updated by read, write and readdir but not lookup or 666 * open) */ 667 path.dentry = next; 668 touch_atime(&path); 669 } 670 671 /* open a file interface onto a data file */ 672 if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { 673 if (d_is_reg(object->dentry)) { 674 const struct address_space_operations *aops; 675 676 ret = -EPERM; 677 aops = d_backing_inode(object->dentry)->i_mapping->a_ops; 678 if (!aops->bmap) 679 goto check_error; 680 if (object->dentry->d_sb->s_blocksize > PAGE_SIZE) 681 goto check_error; 682 683 object->backer = object->dentry; 684 } else { 685 BUG(); // TODO: open file in data-class subdir 686 } 687 } 688 689 object->new = 0; 690 fscache_obtained_object(&object->fscache); 691 692 _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino); 693 return 0; 694 695 no_space_error: 696 fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE); 697 create_error: 698 _debug("create error %d", ret); 699 if (ret == -EIO) 700 cachefiles_io_error(cache, "Create/mkdir failed"); 701 goto error; 702 703 mark_active_timed_out: 704 _debug("mark active timed out"); 705 goto release_dentry; 706 707 check_error: 708 _debug("check error %d", ret); 709 cachefiles_mark_object_inactive(cache, object); 710 release_dentry: 711 dput(object->dentry); 712 object->dentry = NULL; 713 goto error_out; 714 715 delete_error: 716 _debug("delete error %d", ret); 717 goto error_out2; 718 719 lookup_error: 720 _debug("lookup error %ld", PTR_ERR(next)); 721 ret = PTR_ERR(next); 722 if (ret == -EIO) 723 cachefiles_io_error(cache, "Lookup failed"); 724 next = NULL; 725 error: 726 inode_unlock(d_inode(dir)); 727 dput(next); 728 error_out2: 729 dput(dir); 730 error_out: 731 _leave(" = error %d", -ret); 732 return ret; 733 } 734 735 /* 736 * get a subdirectory 737 */ 738 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, 739 struct dentry *dir, 740 const char *dirname) 741 { 742 struct dentry *subdir; 743 unsigned long start; 744 struct path path; 745 int ret; 746 747 _enter(",,%s", dirname); 748 749 /* search the current directory for the element name */ 750 inode_lock(d_inode(dir)); 751 752 start = jiffies; 753 subdir = lookup_one_len(dirname, dir, strlen(dirname)); 754 cachefiles_hist(cachefiles_lookup_histogram, start); 755 if (IS_ERR(subdir)) { 756 if (PTR_ERR(subdir) == -ENOMEM) 757 goto nomem_d_alloc; 758 goto lookup_error; 759 } 760 761 _debug("subdir -> %p %s", 762 subdir, d_backing_inode(subdir) ? "positive" : "negative"); 763 764 /* we need to create the subdir if it doesn't exist yet */ 765 if (d_is_negative(subdir)) { 766 ret = cachefiles_has_space(cache, 1, 0); 767 if (ret < 0) 768 goto mkdir_error; 769 770 _debug("attempt mkdir"); 771 772 path.mnt = cache->mnt; 773 path.dentry = dir; 774 ret = security_path_mkdir(&path, subdir, 0700); 775 if (ret < 0) 776 goto mkdir_error; 777 ret = vfs_mkdir(d_inode(dir), subdir, 0700); 778 if (ret < 0) 779 goto mkdir_error; 780 781 ASSERT(d_backing_inode(subdir)); 782 783 _debug("mkdir -> %p{%p{ino=%lu}}", 784 subdir, 785 d_backing_inode(subdir), 786 d_backing_inode(subdir)->i_ino); 787 } 788 789 inode_unlock(d_inode(dir)); 790 791 /* we need to make sure the subdir is a directory */ 792 ASSERT(d_backing_inode(subdir)); 793 794 if (!d_can_lookup(subdir)) { 795 pr_err("%s is not a directory\n", dirname); 796 ret = -EIO; 797 goto check_error; 798 } 799 800 ret = -EPERM; 801 if (!d_backing_inode(subdir)->i_op->setxattr || 802 !d_backing_inode(subdir)->i_op->getxattr || 803 !d_backing_inode(subdir)->i_op->lookup || 804 !d_backing_inode(subdir)->i_op->mkdir || 805 !d_backing_inode(subdir)->i_op->create || 806 (!d_backing_inode(subdir)->i_op->rename && 807 !d_backing_inode(subdir)->i_op->rename2) || 808 !d_backing_inode(subdir)->i_op->rmdir || 809 !d_backing_inode(subdir)->i_op->unlink) 810 goto check_error; 811 812 _leave(" = [%lu]", d_backing_inode(subdir)->i_ino); 813 return subdir; 814 815 check_error: 816 dput(subdir); 817 _leave(" = %d [check]", ret); 818 return ERR_PTR(ret); 819 820 mkdir_error: 821 inode_unlock(d_inode(dir)); 822 dput(subdir); 823 pr_err("mkdir %s failed with error %d\n", dirname, ret); 824 return ERR_PTR(ret); 825 826 lookup_error: 827 inode_unlock(d_inode(dir)); 828 ret = PTR_ERR(subdir); 829 pr_err("Lookup %s failed with error %d\n", dirname, ret); 830 return ERR_PTR(ret); 831 832 nomem_d_alloc: 833 inode_unlock(d_inode(dir)); 834 _leave(" = -ENOMEM"); 835 return ERR_PTR(-ENOMEM); 836 } 837 838 /* 839 * find out if an object is in use or not 840 * - if finds object and it's not in use: 841 * - returns a pointer to the object and a reference on it 842 * - returns with the directory locked 843 */ 844 static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, 845 struct dentry *dir, 846 char *filename) 847 { 848 struct cachefiles_object *object; 849 struct rb_node *_n; 850 struct dentry *victim; 851 unsigned long start; 852 int ret; 853 854 //_enter(",%pd/,%s", 855 // dir, filename); 856 857 /* look up the victim */ 858 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); 859 860 start = jiffies; 861 victim = lookup_one_len(filename, dir, strlen(filename)); 862 cachefiles_hist(cachefiles_lookup_histogram, start); 863 if (IS_ERR(victim)) 864 goto lookup_error; 865 866 //_debug("victim -> %p %s", 867 // victim, d_backing_inode(victim) ? "positive" : "negative"); 868 869 /* if the object is no longer there then we probably retired the object 870 * at the netfs's request whilst the cull was in progress 871 */ 872 if (d_is_negative(victim)) { 873 inode_unlock(d_inode(dir)); 874 dput(victim); 875 _leave(" = -ENOENT [absent]"); 876 return ERR_PTR(-ENOENT); 877 } 878 879 /* check to see if we're using this object */ 880 read_lock(&cache->active_lock); 881 882 _n = cache->active_nodes.rb_node; 883 884 while (_n) { 885 object = rb_entry(_n, struct cachefiles_object, active_node); 886 887 if (object->dentry > victim) 888 _n = _n->rb_left; 889 else if (object->dentry < victim) 890 _n = _n->rb_right; 891 else 892 goto object_in_use; 893 } 894 895 read_unlock(&cache->active_lock); 896 897 //_leave(" = %p", victim); 898 return victim; 899 900 object_in_use: 901 read_unlock(&cache->active_lock); 902 inode_unlock(d_inode(dir)); 903 dput(victim); 904 //_leave(" = -EBUSY [in use]"); 905 return ERR_PTR(-EBUSY); 906 907 lookup_error: 908 inode_unlock(d_inode(dir)); 909 ret = PTR_ERR(victim); 910 if (ret == -ENOENT) { 911 /* file or dir now absent - probably retired by netfs */ 912 _leave(" = -ESTALE [absent]"); 913 return ERR_PTR(-ESTALE); 914 } 915 916 if (ret == -EIO) { 917 cachefiles_io_error(cache, "Lookup failed"); 918 } else if (ret != -ENOMEM) { 919 pr_err("Internal error: %d\n", ret); 920 ret = -EIO; 921 } 922 923 _leave(" = %d", ret); 924 return ERR_PTR(ret); 925 } 926 927 /* 928 * cull an object if it's not in use 929 * - called only by cache manager daemon 930 */ 931 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, 932 char *filename) 933 { 934 struct dentry *victim; 935 int ret; 936 937 _enter(",%pd/,%s", dir, filename); 938 939 victim = cachefiles_check_active(cache, dir, filename); 940 if (IS_ERR(victim)) 941 return PTR_ERR(victim); 942 943 _debug("victim -> %p %s", 944 victim, d_backing_inode(victim) ? "positive" : "negative"); 945 946 /* okay... the victim is not being used so we can cull it 947 * - start by marking it as stale 948 */ 949 _debug("victim is cullable"); 950 951 ret = cachefiles_remove_object_xattr(cache, victim); 952 if (ret < 0) 953 goto error_unlock; 954 955 /* actually remove the victim (drops the dir mutex) */ 956 _debug("bury"); 957 958 ret = cachefiles_bury_object(cache, dir, victim, false, 959 FSCACHE_OBJECT_WAS_CULLED); 960 if (ret < 0) 961 goto error; 962 963 dput(victim); 964 _leave(" = 0"); 965 return 0; 966 967 error_unlock: 968 inode_unlock(d_inode(dir)); 969 error: 970 dput(victim); 971 if (ret == -ENOENT) { 972 /* file or dir now absent - probably retired by netfs */ 973 _leave(" = -ESTALE [absent]"); 974 return -ESTALE; 975 } 976 977 if (ret != -ENOMEM) { 978 pr_err("Internal error: %d\n", ret); 979 ret = -EIO; 980 } 981 982 _leave(" = %d", ret); 983 return ret; 984 } 985 986 /* 987 * find out if an object is in use or not 988 * - called only by cache manager daemon 989 * - returns -EBUSY or 0 to indicate whether an object is in use or not 990 */ 991 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, 992 char *filename) 993 { 994 struct dentry *victim; 995 996 //_enter(",%pd/,%s", 997 // dir, filename); 998 999 victim = cachefiles_check_active(cache, dir, filename); 1000 if (IS_ERR(victim)) 1001 return PTR_ERR(victim); 1002 1003 inode_unlock(d_inode(dir)); 1004 dput(victim); 1005 //_leave(" = 0"); 1006 return 0; 1007 } 1008