1 /* 2 * 3 * Copyright (C) 2011 Novell Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/fs.h> 11 #include <linux/slab.h> 12 #include <linux/namei.h> 13 #include <linux/file.h> 14 #include <linux/xattr.h> 15 #include <linux/rbtree.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/ratelimit.h> 19 #include "overlayfs.h" 20 21 struct ovl_cache_entry { 22 unsigned int len; 23 unsigned int type; 24 u64 real_ino; 25 u64 ino; 26 struct list_head l_node; 27 struct rb_node node; 28 struct ovl_cache_entry *next_maybe_whiteout; 29 bool is_upper; 30 bool is_whiteout; 31 char name[]; 32 }; 33 34 struct ovl_dir_cache { 35 long refcount; 36 u64 version; 37 struct list_head entries; 38 struct rb_root root; 39 }; 40 41 struct ovl_readdir_data { 42 struct dir_context ctx; 43 struct dentry *dentry; 44 bool is_lowest; 45 struct rb_root *root; 46 struct list_head *list; 47 struct list_head middle; 48 struct ovl_cache_entry *first_maybe_whiteout; 49 int count; 50 int err; 51 bool is_upper; 52 bool d_type_supported; 53 }; 54 55 struct ovl_dir_file { 56 bool is_real; 57 bool is_upper; 58 struct ovl_dir_cache *cache; 59 struct list_head *cursor; 60 struct file *realfile; 61 struct file *upperfile; 62 }; 63 64 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) 65 { 66 return rb_entry(n, struct ovl_cache_entry, node); 67 } 68 69 static bool ovl_cache_entry_find_link(const char *name, int len, 70 struct rb_node ***link, 71 struct rb_node **parent) 72 { 73 bool found = false; 74 struct rb_node **newp = *link; 75 76 while (!found && *newp) { 77 int cmp; 78 struct ovl_cache_entry *tmp; 79 80 *parent = *newp; 81 tmp = ovl_cache_entry_from_node(*newp); 82 cmp = strncmp(name, tmp->name, len); 83 if (cmp > 0) 84 newp = &tmp->node.rb_right; 85 else if (cmp < 0 || len < tmp->len) 86 newp = &tmp->node.rb_left; 87 else 88 found = true; 89 } 90 *link = newp; 91 92 return found; 93 } 94 95 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, 96 const char *name, int len) 97 { 98 struct rb_node *node = root->rb_node; 99 int cmp; 100 101 while (node) { 102 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); 103 104 cmp = strncmp(name, p->name, len); 105 if (cmp > 0) 106 node = p->node.rb_right; 107 else if (cmp < 0 || len < p->len) 108 node = p->node.rb_left; 109 else 110 return p; 111 } 112 113 return NULL; 114 } 115 116 static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd, 117 struct ovl_cache_entry *p) 118 { 119 /* Don't care if not doing ovl_iter() */ 120 if (!rdd->dentry) 121 return false; 122 123 /* Always recalc d_ino when remapping lower inode numbers */ 124 if (ovl_xino_bits(rdd->dentry->d_sb)) 125 return true; 126 127 /* Always recalc d_ino for parent */ 128 if (strcmp(p->name, "..") == 0) 129 return true; 130 131 /* If this is lower, then native d_ino will do */ 132 if (!rdd->is_upper) 133 return false; 134 135 /* 136 * Recalc d_ino for '.' and for all entries if dir is impure (contains 137 * copied up entries) 138 */ 139 if ((p->name[0] == '.' && p->len == 1) || 140 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry))) 141 return true; 142 143 return false; 144 } 145 146 static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd, 147 const char *name, int len, 148 u64 ino, unsigned int d_type) 149 { 150 struct ovl_cache_entry *p; 151 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); 152 153 p = kmalloc(size, GFP_KERNEL); 154 if (!p) 155 return NULL; 156 157 memcpy(p->name, name, len); 158 p->name[len] = '\0'; 159 p->len = len; 160 p->type = d_type; 161 p->real_ino = ino; 162 p->ino = ino; 163 /* Defer setting d_ino for upper entry to ovl_iterate() */ 164 if (ovl_calc_d_ino(rdd, p)) 165 p->ino = 0; 166 p->is_upper = rdd->is_upper; 167 p->is_whiteout = false; 168 169 if (d_type == DT_CHR) { 170 p->next_maybe_whiteout = rdd->first_maybe_whiteout; 171 rdd->first_maybe_whiteout = p; 172 } 173 return p; 174 } 175 176 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, 177 const char *name, int len, u64 ino, 178 unsigned int d_type) 179 { 180 struct rb_node **newp = &rdd->root->rb_node; 181 struct rb_node *parent = NULL; 182 struct ovl_cache_entry *p; 183 184 if (ovl_cache_entry_find_link(name, len, &newp, &parent)) 185 return 0; 186 187 p = ovl_cache_entry_new(rdd, name, len, ino, d_type); 188 if (p == NULL) { 189 rdd->err = -ENOMEM; 190 return -ENOMEM; 191 } 192 193 list_add_tail(&p->l_node, rdd->list); 194 rb_link_node(&p->node, parent, newp); 195 rb_insert_color(&p->node, rdd->root); 196 197 return 0; 198 } 199 200 static int ovl_fill_lowest(struct ovl_readdir_data *rdd, 201 const char *name, int namelen, 202 loff_t offset, u64 ino, unsigned int d_type) 203 { 204 struct ovl_cache_entry *p; 205 206 p = ovl_cache_entry_find(rdd->root, name, namelen); 207 if (p) { 208 list_move_tail(&p->l_node, &rdd->middle); 209 } else { 210 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); 211 if (p == NULL) 212 rdd->err = -ENOMEM; 213 else 214 list_add_tail(&p->l_node, &rdd->middle); 215 } 216 217 return rdd->err; 218 } 219 220 void ovl_cache_free(struct list_head *list) 221 { 222 struct ovl_cache_entry *p; 223 struct ovl_cache_entry *n; 224 225 list_for_each_entry_safe(p, n, list, l_node) 226 kfree(p); 227 228 INIT_LIST_HEAD(list); 229 } 230 231 void ovl_dir_cache_free(struct inode *inode) 232 { 233 struct ovl_dir_cache *cache = ovl_dir_cache(inode); 234 235 if (cache) { 236 ovl_cache_free(&cache->entries); 237 kfree(cache); 238 } 239 } 240 241 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry) 242 { 243 struct ovl_dir_cache *cache = od->cache; 244 245 WARN_ON(cache->refcount <= 0); 246 cache->refcount--; 247 if (!cache->refcount) { 248 if (ovl_dir_cache(d_inode(dentry)) == cache) 249 ovl_set_dir_cache(d_inode(dentry), NULL); 250 251 ovl_cache_free(&cache->entries); 252 kfree(cache); 253 } 254 } 255 256 static int ovl_fill_merge(struct dir_context *ctx, const char *name, 257 int namelen, loff_t offset, u64 ino, 258 unsigned int d_type) 259 { 260 struct ovl_readdir_data *rdd = 261 container_of(ctx, struct ovl_readdir_data, ctx); 262 263 rdd->count++; 264 if (!rdd->is_lowest) 265 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); 266 else 267 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type); 268 } 269 270 static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd) 271 { 272 int err; 273 struct ovl_cache_entry *p; 274 struct dentry *dentry; 275 const struct cred *old_cred; 276 277 old_cred = ovl_override_creds(rdd->dentry->d_sb); 278 279 err = down_write_killable(&dir->d_inode->i_rwsem); 280 if (!err) { 281 while (rdd->first_maybe_whiteout) { 282 p = rdd->first_maybe_whiteout; 283 rdd->first_maybe_whiteout = p->next_maybe_whiteout; 284 dentry = lookup_one_len(p->name, dir, p->len); 285 if (!IS_ERR(dentry)) { 286 p->is_whiteout = ovl_is_whiteout(dentry); 287 dput(dentry); 288 } 289 } 290 inode_unlock(dir->d_inode); 291 } 292 revert_creds(old_cred); 293 294 return err; 295 } 296 297 static inline int ovl_dir_read(struct path *realpath, 298 struct ovl_readdir_data *rdd) 299 { 300 struct file *realfile; 301 int err; 302 303 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY); 304 if (IS_ERR(realfile)) 305 return PTR_ERR(realfile); 306 307 rdd->first_maybe_whiteout = NULL; 308 rdd->ctx.pos = 0; 309 do { 310 rdd->count = 0; 311 rdd->err = 0; 312 err = iterate_dir(realfile, &rdd->ctx); 313 if (err >= 0) 314 err = rdd->err; 315 } while (!err && rdd->count); 316 317 if (!err && rdd->first_maybe_whiteout && rdd->dentry) 318 err = ovl_check_whiteouts(realpath->dentry, rdd); 319 320 fput(realfile); 321 322 return err; 323 } 324 325 /* 326 * Can we iterate real dir directly? 327 * 328 * Non-merge dir may contain whiteouts from a time it was a merge upper, before 329 * lower dir was removed under it and possibly before it was rotated from upper 330 * to lower layer. 331 */ 332 static bool ovl_dir_is_real(struct dentry *dir) 333 { 334 return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir)); 335 } 336 337 static void ovl_dir_reset(struct file *file) 338 { 339 struct ovl_dir_file *od = file->private_data; 340 struct ovl_dir_cache *cache = od->cache; 341 struct dentry *dentry = file->f_path.dentry; 342 bool is_real; 343 344 if (cache && ovl_dentry_version_get(dentry) != cache->version) { 345 ovl_cache_put(od, dentry); 346 od->cache = NULL; 347 od->cursor = NULL; 348 } 349 is_real = ovl_dir_is_real(dentry); 350 if (od->is_real != is_real) { 351 /* is_real can only become false when dir is copied up */ 352 if (WARN_ON(is_real)) 353 return; 354 od->is_real = false; 355 } 356 } 357 358 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list, 359 struct rb_root *root) 360 { 361 int err; 362 struct path realpath; 363 struct ovl_readdir_data rdd = { 364 .ctx.actor = ovl_fill_merge, 365 .dentry = dentry, 366 .list = list, 367 .root = root, 368 .is_lowest = false, 369 }; 370 int idx, next; 371 372 for (idx = 0; idx != -1; idx = next) { 373 next = ovl_path_next(idx, dentry, &realpath); 374 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry; 375 376 if (next != -1) { 377 err = ovl_dir_read(&realpath, &rdd); 378 if (err) 379 break; 380 } else { 381 /* 382 * Insert lowest layer entries before upper ones, this 383 * allows offsets to be reasonably constant 384 */ 385 list_add(&rdd.middle, rdd.list); 386 rdd.is_lowest = true; 387 err = ovl_dir_read(&realpath, &rdd); 388 list_del(&rdd.middle); 389 } 390 } 391 return err; 392 } 393 394 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) 395 { 396 struct list_head *p; 397 loff_t off = 0; 398 399 list_for_each(p, &od->cache->entries) { 400 if (off >= pos) 401 break; 402 off++; 403 } 404 /* Cursor is safe since the cache is stable */ 405 od->cursor = p; 406 } 407 408 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) 409 { 410 int res; 411 struct ovl_dir_cache *cache; 412 413 cache = ovl_dir_cache(d_inode(dentry)); 414 if (cache && ovl_dentry_version_get(dentry) == cache->version) { 415 WARN_ON(!cache->refcount); 416 cache->refcount++; 417 return cache; 418 } 419 ovl_set_dir_cache(d_inode(dentry), NULL); 420 421 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); 422 if (!cache) 423 return ERR_PTR(-ENOMEM); 424 425 cache->refcount = 1; 426 INIT_LIST_HEAD(&cache->entries); 427 cache->root = RB_ROOT; 428 429 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); 430 if (res) { 431 ovl_cache_free(&cache->entries); 432 kfree(cache); 433 return ERR_PTR(res); 434 } 435 436 cache->version = ovl_dentry_version_get(dentry); 437 ovl_set_dir_cache(d_inode(dentry), cache); 438 439 return cache; 440 } 441 442 /* Map inode number to lower fs unique range */ 443 static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid, 444 const char *name, int namelen) 445 { 446 if (ino >> (64 - xinobits)) { 447 pr_warn_ratelimited("overlayfs: d_ino too big (%.*s, ino=%llu, xinobits=%d)\n", 448 namelen, name, ino, xinobits); 449 return ino; 450 } 451 452 return ino | ((u64)fsid) << (64 - xinobits); 453 } 454 455 /* 456 * Set d_ino for upper entries. Non-upper entries should always report 457 * the uppermost real inode ino and should not call this function. 458 * 459 * When not all layer are on same fs, report real ino also for upper. 460 * 461 * When all layers are on the same fs, and upper has a reference to 462 * copy up origin, call vfs_getattr() on the overlay entry to make 463 * sure that d_ino will be consistent with st_ino from stat(2). 464 */ 465 static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p) 466 467 { 468 struct dentry *dir = path->dentry; 469 struct dentry *this = NULL; 470 enum ovl_path_type type; 471 u64 ino = p->real_ino; 472 int xinobits = ovl_xino_bits(dir->d_sb); 473 int err = 0; 474 475 if (!ovl_same_sb(dir->d_sb) && !xinobits) 476 goto out; 477 478 if (p->name[0] == '.') { 479 if (p->len == 1) { 480 this = dget(dir); 481 goto get; 482 } 483 if (p->len == 2 && p->name[1] == '.') { 484 /* we shall not be moved */ 485 this = dget(dir->d_parent); 486 goto get; 487 } 488 } 489 this = lookup_one_len(p->name, dir, p->len); 490 if (IS_ERR_OR_NULL(this) || !this->d_inode) { 491 if (IS_ERR(this)) { 492 err = PTR_ERR(this); 493 this = NULL; 494 goto fail; 495 } 496 goto out; 497 } 498 499 get: 500 type = ovl_path_type(this); 501 if (OVL_TYPE_ORIGIN(type)) { 502 struct kstat stat; 503 struct path statpath = *path; 504 505 statpath.dentry = this; 506 err = vfs_getattr(&statpath, &stat, STATX_INO, 0); 507 if (err) 508 goto fail; 509 510 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); 511 ino = stat.ino; 512 } else if (xinobits && !OVL_TYPE_UPPER(type)) { 513 ino = ovl_remap_lower_ino(ino, xinobits, 514 ovl_layer_lower(this)->fsid, 515 p->name, p->len); 516 } 517 518 out: 519 p->ino = ino; 520 dput(this); 521 return err; 522 523 fail: 524 pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n", 525 p->name, err); 526 goto out; 527 } 528 529 static int ovl_fill_plain(struct dir_context *ctx, const char *name, 530 int namelen, loff_t offset, u64 ino, 531 unsigned int d_type) 532 { 533 struct ovl_cache_entry *p; 534 struct ovl_readdir_data *rdd = 535 container_of(ctx, struct ovl_readdir_data, ctx); 536 537 rdd->count++; 538 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); 539 if (p == NULL) { 540 rdd->err = -ENOMEM; 541 return -ENOMEM; 542 } 543 list_add_tail(&p->l_node, rdd->list); 544 545 return 0; 546 } 547 548 static int ovl_dir_read_impure(struct path *path, struct list_head *list, 549 struct rb_root *root) 550 { 551 int err; 552 struct path realpath; 553 struct ovl_cache_entry *p, *n; 554 struct ovl_readdir_data rdd = { 555 .ctx.actor = ovl_fill_plain, 556 .list = list, 557 .root = root, 558 }; 559 560 INIT_LIST_HEAD(list); 561 *root = RB_ROOT; 562 ovl_path_upper(path->dentry, &realpath); 563 564 err = ovl_dir_read(&realpath, &rdd); 565 if (err) 566 return err; 567 568 list_for_each_entry_safe(p, n, list, l_node) { 569 if (strcmp(p->name, ".") != 0 && 570 strcmp(p->name, "..") != 0) { 571 err = ovl_cache_update_ino(path, p); 572 if (err) 573 return err; 574 } 575 if (p->ino == p->real_ino) { 576 list_del(&p->l_node); 577 kfree(p); 578 } else { 579 struct rb_node **newp = &root->rb_node; 580 struct rb_node *parent = NULL; 581 582 if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len, 583 &newp, &parent))) 584 return -EIO; 585 586 rb_link_node(&p->node, parent, newp); 587 rb_insert_color(&p->node, root); 588 } 589 } 590 return 0; 591 } 592 593 static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path) 594 { 595 int res; 596 struct dentry *dentry = path->dentry; 597 struct ovl_dir_cache *cache; 598 599 cache = ovl_dir_cache(d_inode(dentry)); 600 if (cache && ovl_dentry_version_get(dentry) == cache->version) 601 return cache; 602 603 /* Impure cache is not refcounted, free it here */ 604 ovl_dir_cache_free(d_inode(dentry)); 605 ovl_set_dir_cache(d_inode(dentry), NULL); 606 607 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); 608 if (!cache) 609 return ERR_PTR(-ENOMEM); 610 611 res = ovl_dir_read_impure(path, &cache->entries, &cache->root); 612 if (res) { 613 ovl_cache_free(&cache->entries); 614 kfree(cache); 615 return ERR_PTR(res); 616 } 617 if (list_empty(&cache->entries)) { 618 /* 619 * A good opportunity to get rid of an unneeded "impure" flag. 620 * Removing the "impure" xattr is best effort. 621 */ 622 if (!ovl_want_write(dentry)) { 623 ovl_do_removexattr(ovl_dentry_upper(dentry), 624 OVL_XATTR_IMPURE); 625 ovl_drop_write(dentry); 626 } 627 ovl_clear_flag(OVL_IMPURE, d_inode(dentry)); 628 kfree(cache); 629 return NULL; 630 } 631 632 cache->version = ovl_dentry_version_get(dentry); 633 ovl_set_dir_cache(d_inode(dentry), cache); 634 635 return cache; 636 } 637 638 struct ovl_readdir_translate { 639 struct dir_context *orig_ctx; 640 struct ovl_dir_cache *cache; 641 struct dir_context ctx; 642 u64 parent_ino; 643 int fsid; 644 int xinobits; 645 }; 646 647 static int ovl_fill_real(struct dir_context *ctx, const char *name, 648 int namelen, loff_t offset, u64 ino, 649 unsigned int d_type) 650 { 651 struct ovl_readdir_translate *rdt = 652 container_of(ctx, struct ovl_readdir_translate, ctx); 653 struct dir_context *orig_ctx = rdt->orig_ctx; 654 655 if (rdt->parent_ino && strcmp(name, "..") == 0) { 656 ino = rdt->parent_ino; 657 } else if (rdt->cache) { 658 struct ovl_cache_entry *p; 659 660 p = ovl_cache_entry_find(&rdt->cache->root, name, namelen); 661 if (p) 662 ino = p->ino; 663 } else if (rdt->xinobits) { 664 ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid, 665 name, namelen); 666 } 667 668 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); 669 } 670 671 static int ovl_iterate_real(struct file *file, struct dir_context *ctx) 672 { 673 int err; 674 struct ovl_dir_file *od = file->private_data; 675 struct dentry *dir = file->f_path.dentry; 676 struct ovl_layer *lower_layer = ovl_layer_lower(dir); 677 struct ovl_readdir_translate rdt = { 678 .ctx.actor = ovl_fill_real, 679 .orig_ctx = ctx, 680 .xinobits = ovl_xino_bits(dir->d_sb), 681 }; 682 683 if (rdt.xinobits && lower_layer) 684 rdt.fsid = lower_layer->fsid; 685 686 if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) { 687 struct kstat stat; 688 struct path statpath = file->f_path; 689 690 statpath.dentry = dir->d_parent; 691 err = vfs_getattr(&statpath, &stat, STATX_INO, 0); 692 if (err) 693 return err; 694 695 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); 696 rdt.parent_ino = stat.ino; 697 } 698 699 if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) { 700 rdt.cache = ovl_cache_get_impure(&file->f_path); 701 if (IS_ERR(rdt.cache)) 702 return PTR_ERR(rdt.cache); 703 } 704 705 err = iterate_dir(od->realfile, &rdt.ctx); 706 ctx->pos = rdt.ctx.pos; 707 708 return err; 709 } 710 711 712 static int ovl_iterate(struct file *file, struct dir_context *ctx) 713 { 714 struct ovl_dir_file *od = file->private_data; 715 struct dentry *dentry = file->f_path.dentry; 716 struct ovl_cache_entry *p; 717 int err; 718 719 if (!ctx->pos) 720 ovl_dir_reset(file); 721 722 if (od->is_real) { 723 /* 724 * If parent is merge, then need to adjust d_ino for '..', if 725 * dir is impure then need to adjust d_ino for copied up 726 * entries. 727 */ 728 if (ovl_xino_bits(dentry->d_sb) || 729 (ovl_same_sb(dentry->d_sb) && 730 (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) || 731 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) { 732 return ovl_iterate_real(file, ctx); 733 } 734 return iterate_dir(od->realfile, ctx); 735 } 736 737 if (!od->cache) { 738 struct ovl_dir_cache *cache; 739 740 cache = ovl_cache_get(dentry); 741 if (IS_ERR(cache)) 742 return PTR_ERR(cache); 743 744 od->cache = cache; 745 ovl_seek_cursor(od, ctx->pos); 746 } 747 748 while (od->cursor != &od->cache->entries) { 749 p = list_entry(od->cursor, struct ovl_cache_entry, l_node); 750 if (!p->is_whiteout) { 751 if (!p->ino) { 752 err = ovl_cache_update_ino(&file->f_path, p); 753 if (err) 754 return err; 755 } 756 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) 757 break; 758 } 759 od->cursor = p->l_node.next; 760 ctx->pos++; 761 } 762 return 0; 763 } 764 765 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) 766 { 767 loff_t res; 768 struct ovl_dir_file *od = file->private_data; 769 770 inode_lock(file_inode(file)); 771 if (!file->f_pos) 772 ovl_dir_reset(file); 773 774 if (od->is_real) { 775 res = vfs_llseek(od->realfile, offset, origin); 776 file->f_pos = od->realfile->f_pos; 777 } else { 778 res = -EINVAL; 779 780 switch (origin) { 781 case SEEK_CUR: 782 offset += file->f_pos; 783 break; 784 case SEEK_SET: 785 break; 786 default: 787 goto out_unlock; 788 } 789 if (offset < 0) 790 goto out_unlock; 791 792 if (offset != file->f_pos) { 793 file->f_pos = offset; 794 if (od->cache) 795 ovl_seek_cursor(od, offset); 796 } 797 res = offset; 798 } 799 out_unlock: 800 inode_unlock(file_inode(file)); 801 802 return res; 803 } 804 805 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, 806 int datasync) 807 { 808 struct ovl_dir_file *od = file->private_data; 809 struct dentry *dentry = file->f_path.dentry; 810 struct file *realfile = od->realfile; 811 812 /* Nothing to sync for lower */ 813 if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) 814 return 0; 815 816 /* 817 * Need to check if we started out being a lower dir, but got copied up 818 */ 819 if (!od->is_upper) { 820 struct inode *inode = file_inode(file); 821 822 realfile = READ_ONCE(od->upperfile); 823 if (!realfile) { 824 struct path upperpath; 825 826 ovl_path_upper(dentry, &upperpath); 827 realfile = ovl_path_open(&upperpath, O_RDONLY); 828 829 inode_lock(inode); 830 if (!od->upperfile) { 831 if (IS_ERR(realfile)) { 832 inode_unlock(inode); 833 return PTR_ERR(realfile); 834 } 835 smp_store_release(&od->upperfile, realfile); 836 } else { 837 /* somebody has beaten us to it */ 838 if (!IS_ERR(realfile)) 839 fput(realfile); 840 realfile = od->upperfile; 841 } 842 inode_unlock(inode); 843 } 844 } 845 846 return vfs_fsync_range(realfile, start, end, datasync); 847 } 848 849 static int ovl_dir_release(struct inode *inode, struct file *file) 850 { 851 struct ovl_dir_file *od = file->private_data; 852 853 if (od->cache) { 854 inode_lock(inode); 855 ovl_cache_put(od, file->f_path.dentry); 856 inode_unlock(inode); 857 } 858 fput(od->realfile); 859 if (od->upperfile) 860 fput(od->upperfile); 861 kfree(od); 862 863 return 0; 864 } 865 866 static int ovl_dir_open(struct inode *inode, struct file *file) 867 { 868 struct path realpath; 869 struct file *realfile; 870 struct ovl_dir_file *od; 871 enum ovl_path_type type; 872 873 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); 874 if (!od) 875 return -ENOMEM; 876 877 type = ovl_path_real(file->f_path.dentry, &realpath); 878 realfile = ovl_path_open(&realpath, file->f_flags); 879 if (IS_ERR(realfile)) { 880 kfree(od); 881 return PTR_ERR(realfile); 882 } 883 od->realfile = realfile; 884 od->is_real = ovl_dir_is_real(file->f_path.dentry); 885 od->is_upper = OVL_TYPE_UPPER(type); 886 file->private_data = od; 887 888 return 0; 889 } 890 891 const struct file_operations ovl_dir_operations = { 892 .read = generic_read_dir, 893 .open = ovl_dir_open, 894 .iterate = ovl_iterate, 895 .llseek = ovl_dir_llseek, 896 .fsync = ovl_dir_fsync, 897 .release = ovl_dir_release, 898 }; 899 900 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) 901 { 902 int err; 903 struct ovl_cache_entry *p, *n; 904 struct rb_root root = RB_ROOT; 905 const struct cred *old_cred; 906 907 old_cred = ovl_override_creds(dentry->d_sb); 908 err = ovl_dir_read_merged(dentry, list, &root); 909 revert_creds(old_cred); 910 if (err) 911 return err; 912 913 err = 0; 914 915 list_for_each_entry_safe(p, n, list, l_node) { 916 /* 917 * Select whiteouts in upperdir, they should 918 * be cleared when deleting this directory. 919 */ 920 if (p->is_whiteout) { 921 if (p->is_upper) 922 continue; 923 goto del_entry; 924 } 925 926 if (p->name[0] == '.') { 927 if (p->len == 1) 928 goto del_entry; 929 if (p->len == 2 && p->name[1] == '.') 930 goto del_entry; 931 } 932 err = -ENOTEMPTY; 933 break; 934 935 del_entry: 936 list_del(&p->l_node); 937 kfree(p); 938 } 939 940 return err; 941 } 942 943 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list) 944 { 945 struct ovl_cache_entry *p; 946 947 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD); 948 list_for_each_entry(p, list, l_node) { 949 struct dentry *dentry; 950 951 if (WARN_ON(!p->is_whiteout || !p->is_upper)) 952 continue; 953 954 dentry = lookup_one_len(p->name, upper, p->len); 955 if (IS_ERR(dentry)) { 956 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n", 957 upper->d_name.name, p->len, p->name, 958 (int) PTR_ERR(dentry)); 959 continue; 960 } 961 if (dentry->d_inode) 962 ovl_cleanup(upper->d_inode, dentry); 963 dput(dentry); 964 } 965 inode_unlock(upper->d_inode); 966 } 967 968 static int ovl_check_d_type(struct dir_context *ctx, const char *name, 969 int namelen, loff_t offset, u64 ino, 970 unsigned int d_type) 971 { 972 struct ovl_readdir_data *rdd = 973 container_of(ctx, struct ovl_readdir_data, ctx); 974 975 /* Even if d_type is not supported, DT_DIR is returned for . and .. */ 976 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen)) 977 return 0; 978 979 if (d_type != DT_UNKNOWN) 980 rdd->d_type_supported = true; 981 982 return 0; 983 } 984 985 /* 986 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values 987 * if error is encountered. 988 */ 989 int ovl_check_d_type_supported(struct path *realpath) 990 { 991 int err; 992 struct ovl_readdir_data rdd = { 993 .ctx.actor = ovl_check_d_type, 994 .d_type_supported = false, 995 }; 996 997 err = ovl_dir_read(realpath, &rdd); 998 if (err) 999 return err; 1000 1001 return rdd.d_type_supported; 1002 } 1003 1004 static void ovl_workdir_cleanup_recurse(struct path *path, int level) 1005 { 1006 int err; 1007 struct inode *dir = path->dentry->d_inode; 1008 LIST_HEAD(list); 1009 struct rb_root root = RB_ROOT; 1010 struct ovl_cache_entry *p; 1011 struct ovl_readdir_data rdd = { 1012 .ctx.actor = ovl_fill_merge, 1013 .dentry = NULL, 1014 .list = &list, 1015 .root = &root, 1016 .is_lowest = false, 1017 }; 1018 1019 err = ovl_dir_read(path, &rdd); 1020 if (err) 1021 goto out; 1022 1023 inode_lock_nested(dir, I_MUTEX_PARENT); 1024 list_for_each_entry(p, &list, l_node) { 1025 struct dentry *dentry; 1026 1027 if (p->name[0] == '.') { 1028 if (p->len == 1) 1029 continue; 1030 if (p->len == 2 && p->name[1] == '.') 1031 continue; 1032 } 1033 dentry = lookup_one_len(p->name, path->dentry, p->len); 1034 if (IS_ERR(dentry)) 1035 continue; 1036 if (dentry->d_inode) 1037 ovl_workdir_cleanup(dir, path->mnt, dentry, level); 1038 dput(dentry); 1039 } 1040 inode_unlock(dir); 1041 out: 1042 ovl_cache_free(&list); 1043 } 1044 1045 void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, 1046 struct dentry *dentry, int level) 1047 { 1048 int err; 1049 1050 if (!d_is_dir(dentry) || level > 1) { 1051 ovl_cleanup(dir, dentry); 1052 return; 1053 } 1054 1055 err = ovl_do_rmdir(dir, dentry); 1056 if (err) { 1057 struct path path = { .mnt = mnt, .dentry = dentry }; 1058 1059 inode_unlock(dir); 1060 ovl_workdir_cleanup_recurse(&path, level + 1); 1061 inode_lock_nested(dir, I_MUTEX_PARENT); 1062 ovl_cleanup(dir, dentry); 1063 } 1064 } 1065 1066 int ovl_indexdir_cleanup(struct ovl_fs *ofs) 1067 { 1068 int err; 1069 struct dentry *indexdir = ofs->indexdir; 1070 struct dentry *index = NULL; 1071 struct inode *dir = indexdir->d_inode; 1072 struct path path = { .mnt = ofs->upper_mnt, .dentry = indexdir }; 1073 LIST_HEAD(list); 1074 struct rb_root root = RB_ROOT; 1075 struct ovl_cache_entry *p; 1076 struct ovl_readdir_data rdd = { 1077 .ctx.actor = ovl_fill_merge, 1078 .dentry = NULL, 1079 .list = &list, 1080 .root = &root, 1081 .is_lowest = false, 1082 }; 1083 1084 err = ovl_dir_read(&path, &rdd); 1085 if (err) 1086 goto out; 1087 1088 inode_lock_nested(dir, I_MUTEX_PARENT); 1089 list_for_each_entry(p, &list, l_node) { 1090 if (p->name[0] == '.') { 1091 if (p->len == 1) 1092 continue; 1093 if (p->len == 2 && p->name[1] == '.') 1094 continue; 1095 } 1096 index = lookup_one_len(p->name, indexdir, p->len); 1097 if (IS_ERR(index)) { 1098 err = PTR_ERR(index); 1099 index = NULL; 1100 break; 1101 } 1102 err = ovl_verify_index(ofs, index); 1103 if (!err) { 1104 goto next; 1105 } else if (err == -ESTALE) { 1106 /* Cleanup stale index entries */ 1107 err = ovl_cleanup(dir, index); 1108 } else if (err != -ENOENT) { 1109 /* 1110 * Abort mount to avoid corrupting the index if 1111 * an incompatible index entry was found or on out 1112 * of memory. 1113 */ 1114 break; 1115 } else if (ofs->config.nfs_export) { 1116 /* 1117 * Whiteout orphan index to block future open by 1118 * handle after overlay nlink dropped to zero. 1119 */ 1120 err = ovl_cleanup_and_whiteout(indexdir, dir, index); 1121 } else { 1122 /* Cleanup orphan index entries */ 1123 err = ovl_cleanup(dir, index); 1124 } 1125 1126 if (err) 1127 break; 1128 1129 next: 1130 dput(index); 1131 index = NULL; 1132 } 1133 dput(index); 1134 inode_unlock(dir); 1135 out: 1136 ovl_cache_free(&list); 1137 if (err) 1138 pr_err("overlayfs: failed index dir cleanup (%i)\n", err); 1139 return err; 1140 } 1141