1 /* 2 * 3 * Copyright (C) 2011 Novell Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/fs.h> 11 #include <linux/slab.h> 12 #include <linux/namei.h> 13 #include <linux/file.h> 14 #include <linux/xattr.h> 15 #include <linux/rbtree.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/ratelimit.h> 19 #include "overlayfs.h" 20 21 struct ovl_cache_entry { 22 unsigned int len; 23 unsigned int type; 24 u64 real_ino; 25 u64 ino; 26 struct list_head l_node; 27 struct rb_node node; 28 struct ovl_cache_entry *next_maybe_whiteout; 29 bool is_upper; 30 bool is_whiteout; 31 char name[]; 32 }; 33 34 struct ovl_dir_cache { 35 long refcount; 36 u64 version; 37 struct list_head entries; 38 struct rb_root root; 39 }; 40 41 struct ovl_readdir_data { 42 struct dir_context ctx; 43 struct dentry *dentry; 44 bool is_lowest; 45 struct rb_root *root; 46 struct list_head *list; 47 struct list_head middle; 48 struct ovl_cache_entry *first_maybe_whiteout; 49 int count; 50 int err; 51 bool is_upper; 52 bool d_type_supported; 53 }; 54 55 struct ovl_dir_file { 56 bool is_real; 57 bool is_upper; 58 struct ovl_dir_cache *cache; 59 struct list_head *cursor; 60 struct file *realfile; 61 struct file *upperfile; 62 }; 63 64 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) 65 { 66 return rb_entry(n, struct ovl_cache_entry, node); 67 } 68 69 static bool ovl_cache_entry_find_link(const char *name, int len, 70 struct rb_node ***link, 71 struct rb_node **parent) 72 { 73 bool found = false; 74 struct rb_node **newp = *link; 75 76 while (!found && *newp) { 77 int cmp; 78 struct ovl_cache_entry *tmp; 79 80 *parent = *newp; 81 tmp = ovl_cache_entry_from_node(*newp); 82 cmp = strncmp(name, tmp->name, len); 83 if (cmp > 0) 84 newp = &tmp->node.rb_right; 85 else if (cmp < 0 || len < tmp->len) 86 newp = &tmp->node.rb_left; 87 else 88 found = true; 89 } 90 *link = newp; 91 92 return found; 93 } 94 95 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, 96 const char *name, int len) 97 { 98 struct rb_node *node = root->rb_node; 99 int cmp; 100 101 while (node) { 102 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); 103 104 cmp = strncmp(name, p->name, len); 105 if (cmp > 0) 106 node = p->node.rb_right; 107 else if (cmp < 0 || len < p->len) 108 node = p->node.rb_left; 109 else 110 return p; 111 } 112 113 return NULL; 114 } 115 116 static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd, 117 struct ovl_cache_entry *p) 118 { 119 /* Don't care if not doing ovl_iter() */ 120 if (!rdd->dentry) 121 return false; 122 123 /* Always recalc d_ino when remapping lower inode numbers */ 124 if (ovl_xino_bits(rdd->dentry->d_sb)) 125 return true; 126 127 /* Always recalc d_ino for parent */ 128 if (strcmp(p->name, "..") == 0) 129 return true; 130 131 /* If this is lower, then native d_ino will do */ 132 if (!rdd->is_upper) 133 return false; 134 135 /* 136 * Recalc d_ino for '.' and for all entries if dir is impure (contains 137 * copied up entries) 138 */ 139 if ((p->name[0] == '.' && p->len == 1) || 140 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry))) 141 return true; 142 143 return false; 144 } 145 146 static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd, 147 const char *name, int len, 148 u64 ino, unsigned int d_type) 149 { 150 struct ovl_cache_entry *p; 151 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); 152 153 p = kmalloc(size, GFP_KERNEL); 154 if (!p) 155 return NULL; 156 157 memcpy(p->name, name, len); 158 p->name[len] = '\0'; 159 p->len = len; 160 p->type = d_type; 161 p->real_ino = ino; 162 p->ino = ino; 163 /* Defer setting d_ino for upper entry to ovl_iterate() */ 164 if (ovl_calc_d_ino(rdd, p)) 165 p->ino = 0; 166 p->is_upper = rdd->is_upper; 167 p->is_whiteout = false; 168 169 if (d_type == DT_CHR) { 170 p->next_maybe_whiteout = rdd->first_maybe_whiteout; 171 rdd->first_maybe_whiteout = p; 172 } 173 return p; 174 } 175 176 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, 177 const char *name, int len, u64 ino, 178 unsigned int d_type) 179 { 180 struct rb_node **newp = &rdd->root->rb_node; 181 struct rb_node *parent = NULL; 182 struct ovl_cache_entry *p; 183 184 if (ovl_cache_entry_find_link(name, len, &newp, &parent)) 185 return 0; 186 187 p = ovl_cache_entry_new(rdd, name, len, ino, d_type); 188 if (p == NULL) { 189 rdd->err = -ENOMEM; 190 return -ENOMEM; 191 } 192 193 list_add_tail(&p->l_node, rdd->list); 194 rb_link_node(&p->node, parent, newp); 195 rb_insert_color(&p->node, rdd->root); 196 197 return 0; 198 } 199 200 static int ovl_fill_lowest(struct ovl_readdir_data *rdd, 201 const char *name, int namelen, 202 loff_t offset, u64 ino, unsigned int d_type) 203 { 204 struct ovl_cache_entry *p; 205 206 p = ovl_cache_entry_find(rdd->root, name, namelen); 207 if (p) { 208 list_move_tail(&p->l_node, &rdd->middle); 209 } else { 210 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); 211 if (p == NULL) 212 rdd->err = -ENOMEM; 213 else 214 list_add_tail(&p->l_node, &rdd->middle); 215 } 216 217 return rdd->err; 218 } 219 220 void ovl_cache_free(struct list_head *list) 221 { 222 struct ovl_cache_entry *p; 223 struct ovl_cache_entry *n; 224 225 list_for_each_entry_safe(p, n, list, l_node) 226 kfree(p); 227 228 INIT_LIST_HEAD(list); 229 } 230 231 void ovl_dir_cache_free(struct inode *inode) 232 { 233 struct ovl_dir_cache *cache = ovl_dir_cache(inode); 234 235 if (cache) { 236 ovl_cache_free(&cache->entries); 237 kfree(cache); 238 } 239 } 240 241 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry) 242 { 243 struct ovl_dir_cache *cache = od->cache; 244 245 WARN_ON(cache->refcount <= 0); 246 cache->refcount--; 247 if (!cache->refcount) { 248 if (ovl_dir_cache(d_inode(dentry)) == cache) 249 ovl_set_dir_cache(d_inode(dentry), NULL); 250 251 ovl_cache_free(&cache->entries); 252 kfree(cache); 253 } 254 } 255 256 static int ovl_fill_merge(struct dir_context *ctx, const char *name, 257 int namelen, loff_t offset, u64 ino, 258 unsigned int d_type) 259 { 260 struct ovl_readdir_data *rdd = 261 container_of(ctx, struct ovl_readdir_data, ctx); 262 263 rdd->count++; 264 if (!rdd->is_lowest) 265 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); 266 else 267 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type); 268 } 269 270 static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd) 271 { 272 int err; 273 struct ovl_cache_entry *p; 274 struct dentry *dentry; 275 const struct cred *old_cred; 276 277 old_cred = ovl_override_creds(rdd->dentry->d_sb); 278 279 err = down_write_killable(&dir->d_inode->i_rwsem); 280 if (!err) { 281 while (rdd->first_maybe_whiteout) { 282 p = rdd->first_maybe_whiteout; 283 rdd->first_maybe_whiteout = p->next_maybe_whiteout; 284 dentry = lookup_one_len(p->name, dir, p->len); 285 if (!IS_ERR(dentry)) { 286 p->is_whiteout = ovl_is_whiteout(dentry); 287 dput(dentry); 288 } 289 } 290 inode_unlock(dir->d_inode); 291 } 292 revert_creds(old_cred); 293 294 return err; 295 } 296 297 static inline int ovl_dir_read(struct path *realpath, 298 struct ovl_readdir_data *rdd) 299 { 300 struct file *realfile; 301 int err; 302 303 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY); 304 if (IS_ERR(realfile)) 305 return PTR_ERR(realfile); 306 307 rdd->first_maybe_whiteout = NULL; 308 rdd->ctx.pos = 0; 309 do { 310 rdd->count = 0; 311 rdd->err = 0; 312 err = iterate_dir(realfile, &rdd->ctx); 313 if (err >= 0) 314 err = rdd->err; 315 } while (!err && rdd->count); 316 317 if (!err && rdd->first_maybe_whiteout && rdd->dentry) 318 err = ovl_check_whiteouts(realpath->dentry, rdd); 319 320 fput(realfile); 321 322 return err; 323 } 324 325 /* 326 * Can we iterate real dir directly? 327 * 328 * Non-merge dir may contain whiteouts from a time it was a merge upper, before 329 * lower dir was removed under it and possibly before it was rotated from upper 330 * to lower layer. 331 */ 332 static bool ovl_dir_is_real(struct dentry *dir) 333 { 334 return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir)); 335 } 336 337 static void ovl_dir_reset(struct file *file) 338 { 339 struct ovl_dir_file *od = file->private_data; 340 struct ovl_dir_cache *cache = od->cache; 341 struct dentry *dentry = file->f_path.dentry; 342 bool is_real; 343 344 if (cache && ovl_dentry_version_get(dentry) != cache->version) { 345 ovl_cache_put(od, dentry); 346 od->cache = NULL; 347 od->cursor = NULL; 348 } 349 is_real = ovl_dir_is_real(dentry); 350 if (od->is_real != is_real) { 351 /* is_real can only become false when dir is copied up */ 352 if (WARN_ON(is_real)) 353 return; 354 od->is_real = false; 355 } 356 } 357 358 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list, 359 struct rb_root *root) 360 { 361 int err; 362 struct path realpath; 363 struct ovl_readdir_data rdd = { 364 .ctx.actor = ovl_fill_merge, 365 .dentry = dentry, 366 .list = list, 367 .root = root, 368 .is_lowest = false, 369 }; 370 int idx, next; 371 372 for (idx = 0; idx != -1; idx = next) { 373 next = ovl_path_next(idx, dentry, &realpath); 374 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry; 375 376 if (next != -1) { 377 err = ovl_dir_read(&realpath, &rdd); 378 if (err) 379 break; 380 } else { 381 /* 382 * Insert lowest layer entries before upper ones, this 383 * allows offsets to be reasonably constant 384 */ 385 list_add(&rdd.middle, rdd.list); 386 rdd.is_lowest = true; 387 err = ovl_dir_read(&realpath, &rdd); 388 list_del(&rdd.middle); 389 } 390 } 391 return err; 392 } 393 394 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) 395 { 396 struct list_head *p; 397 loff_t off = 0; 398 399 list_for_each(p, &od->cache->entries) { 400 if (off >= pos) 401 break; 402 off++; 403 } 404 /* Cursor is safe since the cache is stable */ 405 od->cursor = p; 406 } 407 408 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) 409 { 410 int res; 411 struct ovl_dir_cache *cache; 412 413 cache = ovl_dir_cache(d_inode(dentry)); 414 if (cache && ovl_dentry_version_get(dentry) == cache->version) { 415 WARN_ON(!cache->refcount); 416 cache->refcount++; 417 return cache; 418 } 419 ovl_set_dir_cache(d_inode(dentry), NULL); 420 421 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); 422 if (!cache) 423 return ERR_PTR(-ENOMEM); 424 425 cache->refcount = 1; 426 INIT_LIST_HEAD(&cache->entries); 427 cache->root = RB_ROOT; 428 429 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); 430 if (res) { 431 ovl_cache_free(&cache->entries); 432 kfree(cache); 433 return ERR_PTR(res); 434 } 435 436 cache->version = ovl_dentry_version_get(dentry); 437 ovl_set_dir_cache(d_inode(dentry), cache); 438 439 return cache; 440 } 441 442 /* Map inode number to lower fs unique range */ 443 static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid, 444 const char *name, int namelen) 445 { 446 if (ino >> (64 - xinobits)) { 447 pr_warn_ratelimited("overlayfs: d_ino too big (%.*s, ino=%llu, xinobits=%d)\n", 448 namelen, name, ino, xinobits); 449 return ino; 450 } 451 452 return ino | ((u64)fsid) << (64 - xinobits); 453 } 454 455 /* 456 * Set d_ino for upper entries. Non-upper entries should always report 457 * the uppermost real inode ino and should not call this function. 458 * 459 * When not all layer are on same fs, report real ino also for upper. 460 * 461 * When all layers are on the same fs, and upper has a reference to 462 * copy up origin, call vfs_getattr() on the overlay entry to make 463 * sure that d_ino will be consistent with st_ino from stat(2). 464 */ 465 static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p) 466 467 { 468 struct dentry *dir = path->dentry; 469 struct dentry *this = NULL; 470 enum ovl_path_type type; 471 u64 ino = p->real_ino; 472 int xinobits = ovl_xino_bits(dir->d_sb); 473 int err = 0; 474 475 if (!ovl_same_sb(dir->d_sb) && !xinobits) 476 goto out; 477 478 if (p->name[0] == '.') { 479 if (p->len == 1) { 480 this = dget(dir); 481 goto get; 482 } 483 if (p->len == 2 && p->name[1] == '.') { 484 /* we shall not be moved */ 485 this = dget(dir->d_parent); 486 goto get; 487 } 488 } 489 this = lookup_one_len(p->name, dir, p->len); 490 if (IS_ERR_OR_NULL(this) || !this->d_inode) { 491 if (IS_ERR(this)) { 492 err = PTR_ERR(this); 493 this = NULL; 494 goto fail; 495 } 496 goto out; 497 } 498 499 get: 500 type = ovl_path_type(this); 501 if (OVL_TYPE_ORIGIN(type)) { 502 struct kstat stat; 503 struct path statpath = *path; 504 505 statpath.dentry = this; 506 err = vfs_getattr(&statpath, &stat, STATX_INO, 0); 507 if (err) 508 goto fail; 509 510 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); 511 ino = stat.ino; 512 } else if (xinobits && !OVL_TYPE_UPPER(type)) { 513 ino = ovl_remap_lower_ino(ino, xinobits, 514 ovl_layer_lower(this)->fsid, 515 p->name, p->len); 516 } 517 518 out: 519 p->ino = ino; 520 dput(this); 521 return err; 522 523 fail: 524 pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n", 525 p->name, err); 526 goto out; 527 } 528 529 static int ovl_fill_plain(struct dir_context *ctx, const char *name, 530 int namelen, loff_t offset, u64 ino, 531 unsigned int d_type) 532 { 533 struct ovl_cache_entry *p; 534 struct ovl_readdir_data *rdd = 535 container_of(ctx, struct ovl_readdir_data, ctx); 536 537 rdd->count++; 538 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); 539 if (p == NULL) { 540 rdd->err = -ENOMEM; 541 return -ENOMEM; 542 } 543 list_add_tail(&p->l_node, rdd->list); 544 545 return 0; 546 } 547 548 static int ovl_dir_read_impure(struct path *path, struct list_head *list, 549 struct rb_root *root) 550 { 551 int err; 552 struct path realpath; 553 struct ovl_cache_entry *p, *n; 554 struct ovl_readdir_data rdd = { 555 .ctx.actor = ovl_fill_plain, 556 .list = list, 557 .root = root, 558 }; 559 560 INIT_LIST_HEAD(list); 561 *root = RB_ROOT; 562 ovl_path_upper(path->dentry, &realpath); 563 564 err = ovl_dir_read(&realpath, &rdd); 565 if (err) 566 return err; 567 568 list_for_each_entry_safe(p, n, list, l_node) { 569 if (strcmp(p->name, ".") != 0 && 570 strcmp(p->name, "..") != 0) { 571 err = ovl_cache_update_ino(path, p); 572 if (err) 573 return err; 574 } 575 if (p->ino == p->real_ino) { 576 list_del(&p->l_node); 577 kfree(p); 578 } else { 579 struct rb_node **newp = &root->rb_node; 580 struct rb_node *parent = NULL; 581 582 if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len, 583 &newp, &parent))) 584 return -EIO; 585 586 rb_link_node(&p->node, parent, newp); 587 rb_insert_color(&p->node, root); 588 } 589 } 590 return 0; 591 } 592 593 static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path) 594 { 595 int res; 596 struct dentry *dentry = path->dentry; 597 struct ovl_dir_cache *cache; 598 599 cache = ovl_dir_cache(d_inode(dentry)); 600 if (cache && ovl_dentry_version_get(dentry) == cache->version) 601 return cache; 602 603 /* Impure cache is not refcounted, free it here */ 604 ovl_dir_cache_free(d_inode(dentry)); 605 ovl_set_dir_cache(d_inode(dentry), NULL); 606 607 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); 608 if (!cache) 609 return ERR_PTR(-ENOMEM); 610 611 res = ovl_dir_read_impure(path, &cache->entries, &cache->root); 612 if (res) { 613 ovl_cache_free(&cache->entries); 614 kfree(cache); 615 return ERR_PTR(res); 616 } 617 if (list_empty(&cache->entries)) { 618 /* 619 * A good opportunity to get rid of an unneeded "impure" flag. 620 * Removing the "impure" xattr is best effort. 621 */ 622 if (!ovl_want_write(dentry)) { 623 ovl_do_removexattr(ovl_dentry_upper(dentry), 624 OVL_XATTR_IMPURE); 625 ovl_drop_write(dentry); 626 } 627 ovl_clear_flag(OVL_IMPURE, d_inode(dentry)); 628 kfree(cache); 629 return NULL; 630 } 631 632 cache->version = ovl_dentry_version_get(dentry); 633 ovl_set_dir_cache(d_inode(dentry), cache); 634 635 return cache; 636 } 637 638 struct ovl_readdir_translate { 639 struct dir_context *orig_ctx; 640 struct ovl_dir_cache *cache; 641 struct dir_context ctx; 642 u64 parent_ino; 643 int fsid; 644 int xinobits; 645 }; 646 647 static int ovl_fill_real(struct dir_context *ctx, const char *name, 648 int namelen, loff_t offset, u64 ino, 649 unsigned int d_type) 650 { 651 struct ovl_readdir_translate *rdt = 652 container_of(ctx, struct ovl_readdir_translate, ctx); 653 struct dir_context *orig_ctx = rdt->orig_ctx; 654 655 if (rdt->parent_ino && strcmp(name, "..") == 0) { 656 ino = rdt->parent_ino; 657 } else if (rdt->cache) { 658 struct ovl_cache_entry *p; 659 660 p = ovl_cache_entry_find(&rdt->cache->root, name, namelen); 661 if (p) 662 ino = p->ino; 663 } else if (rdt->xinobits) { 664 ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid, 665 name, namelen); 666 } 667 668 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); 669 } 670 671 static bool ovl_is_impure_dir(struct file *file) 672 { 673 struct ovl_dir_file *od = file->private_data; 674 struct inode *dir = d_inode(file->f_path.dentry); 675 676 /* 677 * Only upper dir can be impure, but if we are in the middle of 678 * iterating a lower real dir, dir could be copied up and marked 679 * impure. We only want the impure cache if we started iterating 680 * a real upper dir to begin with. 681 */ 682 return od->is_upper && ovl_test_flag(OVL_IMPURE, dir); 683 684 } 685 686 static int ovl_iterate_real(struct file *file, struct dir_context *ctx) 687 { 688 int err; 689 struct ovl_dir_file *od = file->private_data; 690 struct dentry *dir = file->f_path.dentry; 691 struct ovl_layer *lower_layer = ovl_layer_lower(dir); 692 struct ovl_readdir_translate rdt = { 693 .ctx.actor = ovl_fill_real, 694 .orig_ctx = ctx, 695 .xinobits = ovl_xino_bits(dir->d_sb), 696 }; 697 698 if (rdt.xinobits && lower_layer) 699 rdt.fsid = lower_layer->fsid; 700 701 if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) { 702 struct kstat stat; 703 struct path statpath = file->f_path; 704 705 statpath.dentry = dir->d_parent; 706 err = vfs_getattr(&statpath, &stat, STATX_INO, 0); 707 if (err) 708 return err; 709 710 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); 711 rdt.parent_ino = stat.ino; 712 } 713 714 if (ovl_is_impure_dir(file)) { 715 rdt.cache = ovl_cache_get_impure(&file->f_path); 716 if (IS_ERR(rdt.cache)) 717 return PTR_ERR(rdt.cache); 718 } 719 720 err = iterate_dir(od->realfile, &rdt.ctx); 721 ctx->pos = rdt.ctx.pos; 722 723 return err; 724 } 725 726 727 static int ovl_iterate(struct file *file, struct dir_context *ctx) 728 { 729 struct ovl_dir_file *od = file->private_data; 730 struct dentry *dentry = file->f_path.dentry; 731 struct ovl_cache_entry *p; 732 int err; 733 734 if (!ctx->pos) 735 ovl_dir_reset(file); 736 737 if (od->is_real) { 738 /* 739 * If parent is merge, then need to adjust d_ino for '..', if 740 * dir is impure then need to adjust d_ino for copied up 741 * entries. 742 */ 743 if (ovl_xino_bits(dentry->d_sb) || 744 (ovl_same_sb(dentry->d_sb) && 745 (ovl_is_impure_dir(file) || 746 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) { 747 return ovl_iterate_real(file, ctx); 748 } 749 return iterate_dir(od->realfile, ctx); 750 } 751 752 if (!od->cache) { 753 struct ovl_dir_cache *cache; 754 755 cache = ovl_cache_get(dentry); 756 if (IS_ERR(cache)) 757 return PTR_ERR(cache); 758 759 od->cache = cache; 760 ovl_seek_cursor(od, ctx->pos); 761 } 762 763 while (od->cursor != &od->cache->entries) { 764 p = list_entry(od->cursor, struct ovl_cache_entry, l_node); 765 if (!p->is_whiteout) { 766 if (!p->ino) { 767 err = ovl_cache_update_ino(&file->f_path, p); 768 if (err) 769 return err; 770 } 771 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) 772 break; 773 } 774 od->cursor = p->l_node.next; 775 ctx->pos++; 776 } 777 return 0; 778 } 779 780 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) 781 { 782 loff_t res; 783 struct ovl_dir_file *od = file->private_data; 784 785 inode_lock(file_inode(file)); 786 if (!file->f_pos) 787 ovl_dir_reset(file); 788 789 if (od->is_real) { 790 res = vfs_llseek(od->realfile, offset, origin); 791 file->f_pos = od->realfile->f_pos; 792 } else { 793 res = -EINVAL; 794 795 switch (origin) { 796 case SEEK_CUR: 797 offset += file->f_pos; 798 break; 799 case SEEK_SET: 800 break; 801 default: 802 goto out_unlock; 803 } 804 if (offset < 0) 805 goto out_unlock; 806 807 if (offset != file->f_pos) { 808 file->f_pos = offset; 809 if (od->cache) 810 ovl_seek_cursor(od, offset); 811 } 812 res = offset; 813 } 814 out_unlock: 815 inode_unlock(file_inode(file)); 816 817 return res; 818 } 819 820 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, 821 int datasync) 822 { 823 struct ovl_dir_file *od = file->private_data; 824 struct dentry *dentry = file->f_path.dentry; 825 struct file *realfile = od->realfile; 826 827 /* Nothing to sync for lower */ 828 if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) 829 return 0; 830 831 /* 832 * Need to check if we started out being a lower dir, but got copied up 833 */ 834 if (!od->is_upper) { 835 struct inode *inode = file_inode(file); 836 837 realfile = READ_ONCE(od->upperfile); 838 if (!realfile) { 839 struct path upperpath; 840 841 ovl_path_upper(dentry, &upperpath); 842 realfile = ovl_path_open(&upperpath, O_RDONLY); 843 844 inode_lock(inode); 845 if (!od->upperfile) { 846 if (IS_ERR(realfile)) { 847 inode_unlock(inode); 848 return PTR_ERR(realfile); 849 } 850 smp_store_release(&od->upperfile, realfile); 851 } else { 852 /* somebody has beaten us to it */ 853 if (!IS_ERR(realfile)) 854 fput(realfile); 855 realfile = od->upperfile; 856 } 857 inode_unlock(inode); 858 } 859 } 860 861 return vfs_fsync_range(realfile, start, end, datasync); 862 } 863 864 static int ovl_dir_release(struct inode *inode, struct file *file) 865 { 866 struct ovl_dir_file *od = file->private_data; 867 868 if (od->cache) { 869 inode_lock(inode); 870 ovl_cache_put(od, file->f_path.dentry); 871 inode_unlock(inode); 872 } 873 fput(od->realfile); 874 if (od->upperfile) 875 fput(od->upperfile); 876 kfree(od); 877 878 return 0; 879 } 880 881 static int ovl_dir_open(struct inode *inode, struct file *file) 882 { 883 struct path realpath; 884 struct file *realfile; 885 struct ovl_dir_file *od; 886 enum ovl_path_type type; 887 888 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); 889 if (!od) 890 return -ENOMEM; 891 892 type = ovl_path_real(file->f_path.dentry, &realpath); 893 realfile = ovl_path_open(&realpath, file->f_flags); 894 if (IS_ERR(realfile)) { 895 kfree(od); 896 return PTR_ERR(realfile); 897 } 898 od->realfile = realfile; 899 od->is_real = ovl_dir_is_real(file->f_path.dentry); 900 od->is_upper = OVL_TYPE_UPPER(type); 901 file->private_data = od; 902 903 return 0; 904 } 905 906 const struct file_operations ovl_dir_operations = { 907 .read = generic_read_dir, 908 .open = ovl_dir_open, 909 .iterate = ovl_iterate, 910 .llseek = ovl_dir_llseek, 911 .fsync = ovl_dir_fsync, 912 .release = ovl_dir_release, 913 }; 914 915 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) 916 { 917 int err; 918 struct ovl_cache_entry *p, *n; 919 struct rb_root root = RB_ROOT; 920 const struct cred *old_cred; 921 922 old_cred = ovl_override_creds(dentry->d_sb); 923 err = ovl_dir_read_merged(dentry, list, &root); 924 revert_creds(old_cred); 925 if (err) 926 return err; 927 928 err = 0; 929 930 list_for_each_entry_safe(p, n, list, l_node) { 931 /* 932 * Select whiteouts in upperdir, they should 933 * be cleared when deleting this directory. 934 */ 935 if (p->is_whiteout) { 936 if (p->is_upper) 937 continue; 938 goto del_entry; 939 } 940 941 if (p->name[0] == '.') { 942 if (p->len == 1) 943 goto del_entry; 944 if (p->len == 2 && p->name[1] == '.') 945 goto del_entry; 946 } 947 err = -ENOTEMPTY; 948 break; 949 950 del_entry: 951 list_del(&p->l_node); 952 kfree(p); 953 } 954 955 return err; 956 } 957 958 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list) 959 { 960 struct ovl_cache_entry *p; 961 962 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD); 963 list_for_each_entry(p, list, l_node) { 964 struct dentry *dentry; 965 966 if (WARN_ON(!p->is_whiteout || !p->is_upper)) 967 continue; 968 969 dentry = lookup_one_len(p->name, upper, p->len); 970 if (IS_ERR(dentry)) { 971 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n", 972 upper->d_name.name, p->len, p->name, 973 (int) PTR_ERR(dentry)); 974 continue; 975 } 976 if (dentry->d_inode) 977 ovl_cleanup(upper->d_inode, dentry); 978 dput(dentry); 979 } 980 inode_unlock(upper->d_inode); 981 } 982 983 static int ovl_check_d_type(struct dir_context *ctx, const char *name, 984 int namelen, loff_t offset, u64 ino, 985 unsigned int d_type) 986 { 987 struct ovl_readdir_data *rdd = 988 container_of(ctx, struct ovl_readdir_data, ctx); 989 990 /* Even if d_type is not supported, DT_DIR is returned for . and .. */ 991 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen)) 992 return 0; 993 994 if (d_type != DT_UNKNOWN) 995 rdd->d_type_supported = true; 996 997 return 0; 998 } 999 1000 /* 1001 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values 1002 * if error is encountered. 1003 */ 1004 int ovl_check_d_type_supported(struct path *realpath) 1005 { 1006 int err; 1007 struct ovl_readdir_data rdd = { 1008 .ctx.actor = ovl_check_d_type, 1009 .d_type_supported = false, 1010 }; 1011 1012 err = ovl_dir_read(realpath, &rdd); 1013 if (err) 1014 return err; 1015 1016 return rdd.d_type_supported; 1017 } 1018 1019 static void ovl_workdir_cleanup_recurse(struct path *path, int level) 1020 { 1021 int err; 1022 struct inode *dir = path->dentry->d_inode; 1023 LIST_HEAD(list); 1024 struct rb_root root = RB_ROOT; 1025 struct ovl_cache_entry *p; 1026 struct ovl_readdir_data rdd = { 1027 .ctx.actor = ovl_fill_merge, 1028 .dentry = NULL, 1029 .list = &list, 1030 .root = &root, 1031 .is_lowest = false, 1032 }; 1033 1034 err = ovl_dir_read(path, &rdd); 1035 if (err) 1036 goto out; 1037 1038 inode_lock_nested(dir, I_MUTEX_PARENT); 1039 list_for_each_entry(p, &list, l_node) { 1040 struct dentry *dentry; 1041 1042 if (p->name[0] == '.') { 1043 if (p->len == 1) 1044 continue; 1045 if (p->len == 2 && p->name[1] == '.') 1046 continue; 1047 } 1048 dentry = lookup_one_len(p->name, path->dentry, p->len); 1049 if (IS_ERR(dentry)) 1050 continue; 1051 if (dentry->d_inode) 1052 ovl_workdir_cleanup(dir, path->mnt, dentry, level); 1053 dput(dentry); 1054 } 1055 inode_unlock(dir); 1056 out: 1057 ovl_cache_free(&list); 1058 } 1059 1060 void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, 1061 struct dentry *dentry, int level) 1062 { 1063 int err; 1064 1065 if (!d_is_dir(dentry) || level > 1) { 1066 ovl_cleanup(dir, dentry); 1067 return; 1068 } 1069 1070 err = ovl_do_rmdir(dir, dentry); 1071 if (err) { 1072 struct path path = { .mnt = mnt, .dentry = dentry }; 1073 1074 inode_unlock(dir); 1075 ovl_workdir_cleanup_recurse(&path, level + 1); 1076 inode_lock_nested(dir, I_MUTEX_PARENT); 1077 ovl_cleanup(dir, dentry); 1078 } 1079 } 1080 1081 int ovl_indexdir_cleanup(struct ovl_fs *ofs) 1082 { 1083 int err; 1084 struct dentry *indexdir = ofs->indexdir; 1085 struct dentry *index = NULL; 1086 struct inode *dir = indexdir->d_inode; 1087 struct path path = { .mnt = ofs->upper_mnt, .dentry = indexdir }; 1088 LIST_HEAD(list); 1089 struct rb_root root = RB_ROOT; 1090 struct ovl_cache_entry *p; 1091 struct ovl_readdir_data rdd = { 1092 .ctx.actor = ovl_fill_merge, 1093 .dentry = NULL, 1094 .list = &list, 1095 .root = &root, 1096 .is_lowest = false, 1097 }; 1098 1099 err = ovl_dir_read(&path, &rdd); 1100 if (err) 1101 goto out; 1102 1103 inode_lock_nested(dir, I_MUTEX_PARENT); 1104 list_for_each_entry(p, &list, l_node) { 1105 if (p->name[0] == '.') { 1106 if (p->len == 1) 1107 continue; 1108 if (p->len == 2 && p->name[1] == '.') 1109 continue; 1110 } 1111 index = lookup_one_len(p->name, indexdir, p->len); 1112 if (IS_ERR(index)) { 1113 err = PTR_ERR(index); 1114 index = NULL; 1115 break; 1116 } 1117 err = ovl_verify_index(ofs, index); 1118 if (!err) { 1119 goto next; 1120 } else if (err == -ESTALE) { 1121 /* Cleanup stale index entries */ 1122 err = ovl_cleanup(dir, index); 1123 } else if (err != -ENOENT) { 1124 /* 1125 * Abort mount to avoid corrupting the index if 1126 * an incompatible index entry was found or on out 1127 * of memory. 1128 */ 1129 break; 1130 } else if (ofs->config.nfs_export) { 1131 /* 1132 * Whiteout orphan index to block future open by 1133 * handle after overlay nlink dropped to zero. 1134 */ 1135 err = ovl_cleanup_and_whiteout(indexdir, dir, index); 1136 } else { 1137 /* Cleanup orphan index entries */ 1138 err = ovl_cleanup(dir, index); 1139 } 1140 1141 if (err) 1142 break; 1143 1144 next: 1145 dput(index); 1146 index = NULL; 1147 } 1148 dput(index); 1149 inode_unlock(dir); 1150 out: 1151 ovl_cache_free(&list); 1152 if (err) 1153 pr_err("overlayfs: failed index dir cleanup (%i)\n", err); 1154 return err; 1155 } 1156