1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (C) 2011 Novell Inc.
5 */
6
7 #include <linux/fs.h>
8 #include <linux/slab.h>
9 #include <linux/namei.h>
10 #include <linux/file.h>
11 #include <linux/xattr.h>
12 #include <linux/rbtree.h>
13 #include <linux/security.h>
14 #include <linux/cred.h>
15 #include <linux/ratelimit.h>
16 #include "overlayfs.h"
17
18 struct ovl_cache_entry {
19 unsigned int len;
20 unsigned int type;
21 u64 real_ino;
22 u64 ino;
23 struct list_head l_node;
24 struct rb_node node;
25 struct ovl_cache_entry *next_maybe_whiteout;
26 bool is_upper;
27 bool is_whiteout;
28 char name[];
29 };
30
31 struct ovl_dir_cache {
32 long refcount;
33 u64 version;
34 struct list_head entries;
35 struct rb_root root;
36 };
37
38 struct ovl_readdir_data {
39 struct dir_context ctx;
40 struct dentry *dentry;
41 bool is_lowest;
42 struct rb_root *root;
43 struct list_head *list;
44 struct list_head middle;
45 struct ovl_cache_entry *first_maybe_whiteout;
46 int count;
47 int err;
48 bool is_upper;
49 bool d_type_supported;
50 };
51
52 struct ovl_dir_file {
53 bool is_real;
54 bool is_upper;
55 struct ovl_dir_cache *cache;
56 struct list_head *cursor;
57 struct file *realfile;
58 struct file *upperfile;
59 };
60
ovl_cache_entry_from_node(struct rb_node * n)61 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
62 {
63 return rb_entry(n, struct ovl_cache_entry, node);
64 }
65
ovl_cache_entry_find_link(const char * name,int len,struct rb_node *** link,struct rb_node ** parent)66 static bool ovl_cache_entry_find_link(const char *name, int len,
67 struct rb_node ***link,
68 struct rb_node **parent)
69 {
70 bool found = false;
71 struct rb_node **newp = *link;
72
73 while (!found && *newp) {
74 int cmp;
75 struct ovl_cache_entry *tmp;
76
77 *parent = *newp;
78 tmp = ovl_cache_entry_from_node(*newp);
79 cmp = strncmp(name, tmp->name, len);
80 if (cmp > 0)
81 newp = &tmp->node.rb_right;
82 else if (cmp < 0 || len < tmp->len)
83 newp = &tmp->node.rb_left;
84 else
85 found = true;
86 }
87 *link = newp;
88
89 return found;
90 }
91
ovl_cache_entry_find(struct rb_root * root,const char * name,int len)92 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
93 const char *name, int len)
94 {
95 struct rb_node *node = root->rb_node;
96 int cmp;
97
98 while (node) {
99 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
100
101 cmp = strncmp(name, p->name, len);
102 if (cmp > 0)
103 node = p->node.rb_right;
104 else if (cmp < 0 || len < p->len)
105 node = p->node.rb_left;
106 else
107 return p;
108 }
109
110 return NULL;
111 }
112
ovl_calc_d_ino(struct ovl_readdir_data * rdd,struct ovl_cache_entry * p)113 static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
114 struct ovl_cache_entry *p)
115 {
116 /* Don't care if not doing ovl_iter() */
117 if (!rdd->dentry)
118 return false;
119
120 /* Always recalc d_ino when remapping lower inode numbers */
121 if (ovl_xino_bits(OVL_FS(rdd->dentry->d_sb)))
122 return true;
123
124 /* Always recalc d_ino for parent */
125 if (strcmp(p->name, "..") == 0)
126 return true;
127
128 /* If this is lower, then native d_ino will do */
129 if (!rdd->is_upper)
130 return false;
131
132 /*
133 * Recalc d_ino for '.' and for all entries if dir is impure (contains
134 * copied up entries)
135 */
136 if ((p->name[0] == '.' && p->len == 1) ||
137 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry)))
138 return true;
139
140 return false;
141 }
142
ovl_cache_entry_new(struct ovl_readdir_data * rdd,const char * name,int len,u64 ino,unsigned int d_type)143 static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
144 const char *name, int len,
145 u64 ino, unsigned int d_type)
146 {
147 struct ovl_cache_entry *p;
148 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
149
150 p = kmalloc(size, GFP_KERNEL);
151 if (!p)
152 return NULL;
153
154 memcpy(p->name, name, len);
155 p->name[len] = '\0';
156 p->len = len;
157 p->type = d_type;
158 p->real_ino = ino;
159 p->ino = ino;
160 /* Defer setting d_ino for upper entry to ovl_iterate() */
161 if (ovl_calc_d_ino(rdd, p))
162 p->ino = 0;
163 p->is_upper = rdd->is_upper;
164 p->is_whiteout = false;
165
166 if (d_type == DT_CHR) {
167 p->next_maybe_whiteout = rdd->first_maybe_whiteout;
168 rdd->first_maybe_whiteout = p;
169 }
170 return p;
171 }
172
ovl_cache_entry_add_rb(struct ovl_readdir_data * rdd,const char * name,int len,u64 ino,unsigned int d_type)173 static bool ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
174 const char *name, int len, u64 ino,
175 unsigned int d_type)
176 {
177 struct rb_node **newp = &rdd->root->rb_node;
178 struct rb_node *parent = NULL;
179 struct ovl_cache_entry *p;
180
181 if (ovl_cache_entry_find_link(name, len, &newp, &parent))
182 return true;
183
184 p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
185 if (p == NULL) {
186 rdd->err = -ENOMEM;
187 return false;
188 }
189
190 list_add_tail(&p->l_node, rdd->list);
191 rb_link_node(&p->node, parent, newp);
192 rb_insert_color(&p->node, rdd->root);
193
194 return true;
195 }
196
ovl_fill_lowest(struct ovl_readdir_data * rdd,const char * name,int namelen,loff_t offset,u64 ino,unsigned int d_type)197 static bool ovl_fill_lowest(struct ovl_readdir_data *rdd,
198 const char *name, int namelen,
199 loff_t offset, u64 ino, unsigned int d_type)
200 {
201 struct ovl_cache_entry *p;
202
203 p = ovl_cache_entry_find(rdd->root, name, namelen);
204 if (p) {
205 list_move_tail(&p->l_node, &rdd->middle);
206 } else {
207 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
208 if (p == NULL)
209 rdd->err = -ENOMEM;
210 else
211 list_add_tail(&p->l_node, &rdd->middle);
212 }
213
214 return rdd->err == 0;
215 }
216
ovl_cache_free(struct list_head * list)217 void ovl_cache_free(struct list_head *list)
218 {
219 struct ovl_cache_entry *p;
220 struct ovl_cache_entry *n;
221
222 list_for_each_entry_safe(p, n, list, l_node)
223 kfree(p);
224
225 INIT_LIST_HEAD(list);
226 }
227
ovl_dir_cache_free(struct inode * inode)228 void ovl_dir_cache_free(struct inode *inode)
229 {
230 struct ovl_dir_cache *cache = ovl_dir_cache(inode);
231
232 if (cache) {
233 ovl_cache_free(&cache->entries);
234 kfree(cache);
235 }
236 }
237
ovl_cache_put(struct ovl_dir_file * od,struct inode * inode)238 static void ovl_cache_put(struct ovl_dir_file *od, struct inode *inode)
239 {
240 struct ovl_dir_cache *cache = od->cache;
241
242 WARN_ON(cache->refcount <= 0);
243 cache->refcount--;
244 if (!cache->refcount) {
245 if (ovl_dir_cache(inode) == cache)
246 ovl_set_dir_cache(inode, NULL);
247
248 ovl_cache_free(&cache->entries);
249 kfree(cache);
250 }
251 }
252
ovl_fill_merge(struct dir_context * ctx,const char * name,int namelen,loff_t offset,u64 ino,unsigned int d_type)253 static bool ovl_fill_merge(struct dir_context *ctx, const char *name,
254 int namelen, loff_t offset, u64 ino,
255 unsigned int d_type)
256 {
257 struct ovl_readdir_data *rdd =
258 container_of(ctx, struct ovl_readdir_data, ctx);
259
260 rdd->count++;
261 if (!rdd->is_lowest)
262 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
263 else
264 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
265 }
266
ovl_check_whiteouts(const struct path * path,struct ovl_readdir_data * rdd)267 static int ovl_check_whiteouts(const struct path *path, struct ovl_readdir_data *rdd)
268 {
269 int err;
270 struct ovl_cache_entry *p;
271 struct dentry *dentry, *dir = path->dentry;
272 const struct cred *old_cred;
273
274 old_cred = ovl_override_creds(rdd->dentry->d_sb);
275
276 err = down_write_killable(&dir->d_inode->i_rwsem);
277 if (!err) {
278 while (rdd->first_maybe_whiteout) {
279 p = rdd->first_maybe_whiteout;
280 rdd->first_maybe_whiteout = p->next_maybe_whiteout;
281 dentry = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len);
282 if (!IS_ERR(dentry)) {
283 p->is_whiteout = ovl_is_whiteout(dentry);
284 dput(dentry);
285 }
286 }
287 inode_unlock(dir->d_inode);
288 }
289 revert_creds(old_cred);
290
291 return err;
292 }
293
ovl_dir_read(const struct path * realpath,struct ovl_readdir_data * rdd)294 static inline int ovl_dir_read(const struct path *realpath,
295 struct ovl_readdir_data *rdd)
296 {
297 struct file *realfile;
298 int err;
299
300 realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE);
301 if (IS_ERR(realfile))
302 return PTR_ERR(realfile);
303
304 rdd->first_maybe_whiteout = NULL;
305 rdd->ctx.pos = 0;
306 do {
307 rdd->count = 0;
308 rdd->err = 0;
309 err = iterate_dir(realfile, &rdd->ctx);
310 if (err >= 0)
311 err = rdd->err;
312 } while (!err && rdd->count);
313
314 if (!err && rdd->first_maybe_whiteout && rdd->dentry)
315 err = ovl_check_whiteouts(realpath, rdd);
316
317 fput(realfile);
318
319 return err;
320 }
321
ovl_dir_reset(struct file * file)322 static void ovl_dir_reset(struct file *file)
323 {
324 struct ovl_dir_file *od = file->private_data;
325 struct ovl_dir_cache *cache = od->cache;
326 struct inode *inode = file_inode(file);
327 bool is_real;
328
329 if (cache && ovl_inode_version_get(inode) != cache->version) {
330 ovl_cache_put(od, inode);
331 od->cache = NULL;
332 od->cursor = NULL;
333 }
334 is_real = ovl_dir_is_real(inode);
335 if (od->is_real != is_real) {
336 /* is_real can only become false when dir is copied up */
337 if (WARN_ON(is_real))
338 return;
339 od->is_real = false;
340 }
341 }
342
ovl_dir_read_merged(struct dentry * dentry,struct list_head * list,struct rb_root * root)343 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
344 struct rb_root *root)
345 {
346 int err;
347 struct path realpath;
348 struct ovl_readdir_data rdd = {
349 .ctx.actor = ovl_fill_merge,
350 .dentry = dentry,
351 .list = list,
352 .root = root,
353 .is_lowest = false,
354 };
355 int idx, next;
356
357 for (idx = 0; idx != -1; idx = next) {
358 next = ovl_path_next(idx, dentry, &realpath);
359 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
360
361 if (next != -1) {
362 err = ovl_dir_read(&realpath, &rdd);
363 if (err)
364 break;
365 } else {
366 /*
367 * Insert lowest layer entries before upper ones, this
368 * allows offsets to be reasonably constant
369 */
370 list_add(&rdd.middle, rdd.list);
371 rdd.is_lowest = true;
372 err = ovl_dir_read(&realpath, &rdd);
373 list_del(&rdd.middle);
374 }
375 }
376 return err;
377 }
378
ovl_seek_cursor(struct ovl_dir_file * od,loff_t pos)379 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
380 {
381 struct list_head *p;
382 loff_t off = 0;
383
384 list_for_each(p, &od->cache->entries) {
385 if (off >= pos)
386 break;
387 off++;
388 }
389 /* Cursor is safe since the cache is stable */
390 od->cursor = p;
391 }
392
ovl_cache_get(struct dentry * dentry)393 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
394 {
395 int res;
396 struct ovl_dir_cache *cache;
397 struct inode *inode = d_inode(dentry);
398
399 cache = ovl_dir_cache(inode);
400 if (cache && ovl_inode_version_get(inode) == cache->version) {
401 WARN_ON(!cache->refcount);
402 cache->refcount++;
403 return cache;
404 }
405 ovl_set_dir_cache(d_inode(dentry), NULL);
406
407 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
408 if (!cache)
409 return ERR_PTR(-ENOMEM);
410
411 cache->refcount = 1;
412 INIT_LIST_HEAD(&cache->entries);
413 cache->root = RB_ROOT;
414
415 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root);
416 if (res) {
417 ovl_cache_free(&cache->entries);
418 kfree(cache);
419 return ERR_PTR(res);
420 }
421
422 cache->version = ovl_inode_version_get(inode);
423 ovl_set_dir_cache(inode, cache);
424
425 return cache;
426 }
427
428 /* Map inode number to lower fs unique range */
ovl_remap_lower_ino(u64 ino,int xinobits,int fsid,const char * name,int namelen,bool warn)429 static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
430 const char *name, int namelen, bool warn)
431 {
432 unsigned int xinoshift = 64 - xinobits;
433
434 if (unlikely(ino >> xinoshift)) {
435 if (warn) {
436 pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
437 namelen, name, ino, xinobits);
438 }
439 return ino;
440 }
441
442 /*
443 * The lowest xinobit is reserved for mapping the non-peresistent inode
444 * numbers range, but this range is only exposed via st_ino, not here.
445 */
446 return ino | ((u64)fsid) << (xinoshift + 1);
447 }
448
449 /*
450 * Set d_ino for upper entries. Non-upper entries should always report
451 * the uppermost real inode ino and should not call this function.
452 *
453 * When not all layer are on same fs, report real ino also for upper.
454 *
455 * When all layers are on the same fs, and upper has a reference to
456 * copy up origin, call vfs_getattr() on the overlay entry to make
457 * sure that d_ino will be consistent with st_ino from stat(2).
458 */
ovl_cache_update_ino(const struct path * path,struct ovl_cache_entry * p)459 static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry *p)
460
461 {
462 struct dentry *dir = path->dentry;
463 struct ovl_fs *ofs = OVL_FS(dir->d_sb);
464 struct dentry *this = NULL;
465 enum ovl_path_type type;
466 u64 ino = p->real_ino;
467 int xinobits = ovl_xino_bits(ofs);
468 int err = 0;
469
470 if (!ovl_same_dev(ofs))
471 goto out;
472
473 if (p->name[0] == '.') {
474 if (p->len == 1) {
475 this = dget(dir);
476 goto get;
477 }
478 if (p->len == 2 && p->name[1] == '.') {
479 /* we shall not be moved */
480 this = dget(dir->d_parent);
481 goto get;
482 }
483 }
484 this = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len);
485 if (IS_ERR_OR_NULL(this) || !this->d_inode) {
486 /* Mark a stale entry */
487 p->is_whiteout = true;
488 if (IS_ERR(this)) {
489 err = PTR_ERR(this);
490 this = NULL;
491 goto fail;
492 }
493 goto out;
494 }
495
496 get:
497 type = ovl_path_type(this);
498 if (OVL_TYPE_ORIGIN(type)) {
499 struct kstat stat;
500 struct path statpath = *path;
501
502 statpath.dentry = this;
503 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
504 if (err)
505 goto fail;
506
507 /*
508 * Directory inode is always on overlay st_dev.
509 * Non-dir with ovl_same_dev() could be on pseudo st_dev in case
510 * of xino bits overflow.
511 */
512 WARN_ON_ONCE(S_ISDIR(stat.mode) &&
513 dir->d_sb->s_dev != stat.dev);
514 ino = stat.ino;
515 } else if (xinobits && !OVL_TYPE_UPPER(type)) {
516 ino = ovl_remap_lower_ino(ino, xinobits,
517 ovl_layer_lower(this)->fsid,
518 p->name, p->len,
519 ovl_xino_warn(ofs));
520 }
521
522 out:
523 p->ino = ino;
524 dput(this);
525 return err;
526
527 fail:
528 pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n",
529 p->name, err);
530 goto out;
531 }
532
ovl_fill_plain(struct dir_context * ctx,const char * name,int namelen,loff_t offset,u64 ino,unsigned int d_type)533 static bool ovl_fill_plain(struct dir_context *ctx, const char *name,
534 int namelen, loff_t offset, u64 ino,
535 unsigned int d_type)
536 {
537 struct ovl_cache_entry *p;
538 struct ovl_readdir_data *rdd =
539 container_of(ctx, struct ovl_readdir_data, ctx);
540
541 rdd->count++;
542 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
543 if (p == NULL) {
544 rdd->err = -ENOMEM;
545 return false;
546 }
547 list_add_tail(&p->l_node, rdd->list);
548
549 return true;
550 }
551
ovl_dir_read_impure(const struct path * path,struct list_head * list,struct rb_root * root)552 static int ovl_dir_read_impure(const struct path *path, struct list_head *list,
553 struct rb_root *root)
554 {
555 int err;
556 struct path realpath;
557 struct ovl_cache_entry *p, *n;
558 struct ovl_readdir_data rdd = {
559 .ctx.actor = ovl_fill_plain,
560 .list = list,
561 .root = root,
562 };
563
564 INIT_LIST_HEAD(list);
565 *root = RB_ROOT;
566 ovl_path_upper(path->dentry, &realpath);
567
568 err = ovl_dir_read(&realpath, &rdd);
569 if (err)
570 return err;
571
572 list_for_each_entry_safe(p, n, list, l_node) {
573 if (strcmp(p->name, ".") != 0 &&
574 strcmp(p->name, "..") != 0) {
575 err = ovl_cache_update_ino(path, p);
576 if (err)
577 return err;
578 }
579 if (p->ino == p->real_ino) {
580 list_del(&p->l_node);
581 kfree(p);
582 } else {
583 struct rb_node **newp = &root->rb_node;
584 struct rb_node *parent = NULL;
585
586 if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len,
587 &newp, &parent)))
588 return -EIO;
589
590 rb_link_node(&p->node, parent, newp);
591 rb_insert_color(&p->node, root);
592 }
593 }
594 return 0;
595 }
596
ovl_cache_get_impure(const struct path * path)597 static struct ovl_dir_cache *ovl_cache_get_impure(const struct path *path)
598 {
599 int res;
600 struct dentry *dentry = path->dentry;
601 struct inode *inode = d_inode(dentry);
602 struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
603 struct ovl_dir_cache *cache;
604
605 cache = ovl_dir_cache(inode);
606 if (cache && ovl_inode_version_get(inode) == cache->version)
607 return cache;
608
609 /* Impure cache is not refcounted, free it here */
610 ovl_dir_cache_free(inode);
611 ovl_set_dir_cache(inode, NULL);
612
613 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
614 if (!cache)
615 return ERR_PTR(-ENOMEM);
616
617 res = ovl_dir_read_impure(path, &cache->entries, &cache->root);
618 if (res) {
619 ovl_cache_free(&cache->entries);
620 kfree(cache);
621 return ERR_PTR(res);
622 }
623 if (list_empty(&cache->entries)) {
624 /*
625 * A good opportunity to get rid of an unneeded "impure" flag.
626 * Removing the "impure" xattr is best effort.
627 */
628 if (!ovl_want_write(dentry)) {
629 ovl_removexattr(ofs, ovl_dentry_upper(dentry),
630 OVL_XATTR_IMPURE);
631 ovl_drop_write(dentry);
632 }
633 ovl_clear_flag(OVL_IMPURE, inode);
634 kfree(cache);
635 return NULL;
636 }
637
638 cache->version = ovl_inode_version_get(inode);
639 ovl_set_dir_cache(inode, cache);
640
641 return cache;
642 }
643
644 struct ovl_readdir_translate {
645 struct dir_context *orig_ctx;
646 struct ovl_dir_cache *cache;
647 struct dir_context ctx;
648 u64 parent_ino;
649 int fsid;
650 int xinobits;
651 bool xinowarn;
652 };
653
ovl_fill_real(struct dir_context * ctx,const char * name,int namelen,loff_t offset,u64 ino,unsigned int d_type)654 static bool ovl_fill_real(struct dir_context *ctx, const char *name,
655 int namelen, loff_t offset, u64 ino,
656 unsigned int d_type)
657 {
658 struct ovl_readdir_translate *rdt =
659 container_of(ctx, struct ovl_readdir_translate, ctx);
660 struct dir_context *orig_ctx = rdt->orig_ctx;
661
662 if (rdt->parent_ino && strcmp(name, "..") == 0) {
663 ino = rdt->parent_ino;
664 } else if (rdt->cache) {
665 struct ovl_cache_entry *p;
666
667 p = ovl_cache_entry_find(&rdt->cache->root, name, namelen);
668 if (p)
669 ino = p->ino;
670 } else if (rdt->xinobits) {
671 ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid,
672 name, namelen, rdt->xinowarn);
673 }
674
675 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
676 }
677
ovl_is_impure_dir(struct file * file)678 static bool ovl_is_impure_dir(struct file *file)
679 {
680 struct ovl_dir_file *od = file->private_data;
681 struct inode *dir = file_inode(file);
682
683 /*
684 * Only upper dir can be impure, but if we are in the middle of
685 * iterating a lower real dir, dir could be copied up and marked
686 * impure. We only want the impure cache if we started iterating
687 * a real upper dir to begin with.
688 */
689 return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
690
691 }
692
ovl_iterate_real(struct file * file,struct dir_context * ctx)693 static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
694 {
695 int err;
696 struct ovl_dir_file *od = file->private_data;
697 struct dentry *dir = file->f_path.dentry;
698 struct ovl_fs *ofs = OVL_FS(dir->d_sb);
699 const struct ovl_layer *lower_layer = ovl_layer_lower(dir);
700 struct ovl_readdir_translate rdt = {
701 .ctx.actor = ovl_fill_real,
702 .orig_ctx = ctx,
703 .xinobits = ovl_xino_bits(ofs),
704 .xinowarn = ovl_xino_warn(ofs),
705 };
706
707 if (rdt.xinobits && lower_layer)
708 rdt.fsid = lower_layer->fsid;
709
710 if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) {
711 struct kstat stat;
712 struct path statpath = file->f_path;
713
714 statpath.dentry = dir->d_parent;
715 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
716 if (err)
717 return err;
718
719 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
720 rdt.parent_ino = stat.ino;
721 }
722
723 if (ovl_is_impure_dir(file)) {
724 rdt.cache = ovl_cache_get_impure(&file->f_path);
725 if (IS_ERR(rdt.cache))
726 return PTR_ERR(rdt.cache);
727 }
728
729 err = iterate_dir(od->realfile, &rdt.ctx);
730 ctx->pos = rdt.ctx.pos;
731
732 return err;
733 }
734
735
ovl_iterate(struct file * file,struct dir_context * ctx)736 static int ovl_iterate(struct file *file, struct dir_context *ctx)
737 {
738 struct ovl_dir_file *od = file->private_data;
739 struct dentry *dentry = file->f_path.dentry;
740 struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
741 struct ovl_cache_entry *p;
742 const struct cred *old_cred;
743 int err;
744
745 old_cred = ovl_override_creds(dentry->d_sb);
746 if (!ctx->pos)
747 ovl_dir_reset(file);
748
749 if (od->is_real) {
750 /*
751 * If parent is merge, then need to adjust d_ino for '..', if
752 * dir is impure then need to adjust d_ino for copied up
753 * entries.
754 */
755 if (ovl_xino_bits(ofs) ||
756 (ovl_same_fs(ofs) &&
757 (ovl_is_impure_dir(file) ||
758 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
759 err = ovl_iterate_real(file, ctx);
760 } else {
761 err = iterate_dir(od->realfile, ctx);
762 }
763 goto out;
764 }
765
766 if (!od->cache) {
767 struct ovl_dir_cache *cache;
768
769 cache = ovl_cache_get(dentry);
770 err = PTR_ERR(cache);
771 if (IS_ERR(cache))
772 goto out;
773
774 od->cache = cache;
775 ovl_seek_cursor(od, ctx->pos);
776 }
777
778 while (od->cursor != &od->cache->entries) {
779 p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
780 if (!p->is_whiteout) {
781 if (!p->ino) {
782 err = ovl_cache_update_ino(&file->f_path, p);
783 if (err)
784 goto out;
785 }
786 }
787 /* ovl_cache_update_ino() sets is_whiteout on stale entry */
788 if (!p->is_whiteout) {
789 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
790 break;
791 }
792 od->cursor = p->l_node.next;
793 ctx->pos++;
794 }
795 err = 0;
796 out:
797 revert_creds(old_cred);
798 return err;
799 }
800
ovl_dir_llseek(struct file * file,loff_t offset,int origin)801 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
802 {
803 loff_t res;
804 struct ovl_dir_file *od = file->private_data;
805
806 inode_lock(file_inode(file));
807 if (!file->f_pos)
808 ovl_dir_reset(file);
809
810 if (od->is_real) {
811 res = vfs_llseek(od->realfile, offset, origin);
812 file->f_pos = od->realfile->f_pos;
813 } else {
814 res = -EINVAL;
815
816 switch (origin) {
817 case SEEK_CUR:
818 offset += file->f_pos;
819 break;
820 case SEEK_SET:
821 break;
822 default:
823 goto out_unlock;
824 }
825 if (offset < 0)
826 goto out_unlock;
827
828 if (offset != file->f_pos) {
829 file->f_pos = offset;
830 if (od->cache)
831 ovl_seek_cursor(od, offset);
832 }
833 res = offset;
834 }
835 out_unlock:
836 inode_unlock(file_inode(file));
837
838 return res;
839 }
840
ovl_dir_open_realfile(const struct file * file,const struct path * realpath)841 static struct file *ovl_dir_open_realfile(const struct file *file,
842 const struct path *realpath)
843 {
844 struct file *res;
845 const struct cred *old_cred;
846
847 old_cred = ovl_override_creds(file_inode(file)->i_sb);
848 res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE));
849 revert_creds(old_cred);
850
851 return res;
852 }
853
854 /*
855 * Like ovl_real_fdget(), returns upperfile if dir was copied up since open.
856 * Unlike ovl_real_fdget(), this caches upperfile in file->private_data.
857 *
858 * TODO: use same abstract type for file->private_data of dir and file so
859 * upperfile could also be cached for files as well.
860 */
ovl_dir_real_file(const struct file * file,bool want_upper)861 struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
862 {
863
864 struct ovl_dir_file *od = file->private_data;
865 struct dentry *dentry = file->f_path.dentry;
866 struct file *old, *realfile = od->realfile;
867
868 if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
869 return want_upper ? NULL : realfile;
870
871 /*
872 * Need to check if we started out being a lower dir, but got copied up
873 */
874 if (!od->is_upper) {
875 realfile = READ_ONCE(od->upperfile);
876 if (!realfile) {
877 struct path upperpath;
878
879 ovl_path_upper(dentry, &upperpath);
880 realfile = ovl_dir_open_realfile(file, &upperpath);
881 if (IS_ERR(realfile))
882 return realfile;
883
884 old = cmpxchg_release(&od->upperfile, NULL, realfile);
885 if (old) {
886 fput(realfile);
887 realfile = old;
888 }
889 }
890 }
891
892 return realfile;
893 }
894
ovl_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)895 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
896 int datasync)
897 {
898 struct file *realfile;
899 int err;
900
901 err = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
902 if (err <= 0)
903 return err;
904
905 realfile = ovl_dir_real_file(file, true);
906 err = PTR_ERR_OR_ZERO(realfile);
907
908 /* Nothing to sync for lower */
909 if (!realfile || err)
910 return err;
911
912 return vfs_fsync_range(realfile, start, end, datasync);
913 }
914
ovl_dir_release(struct inode * inode,struct file * file)915 static int ovl_dir_release(struct inode *inode, struct file *file)
916 {
917 struct ovl_dir_file *od = file->private_data;
918
919 if (od->cache) {
920 inode_lock(inode);
921 ovl_cache_put(od, inode);
922 inode_unlock(inode);
923 }
924 fput(od->realfile);
925 if (od->upperfile)
926 fput(od->upperfile);
927 kfree(od);
928
929 return 0;
930 }
931
ovl_dir_open(struct inode * inode,struct file * file)932 static int ovl_dir_open(struct inode *inode, struct file *file)
933 {
934 struct path realpath;
935 struct file *realfile;
936 struct ovl_dir_file *od;
937 enum ovl_path_type type;
938
939 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
940 if (!od)
941 return -ENOMEM;
942
943 type = ovl_path_real(file->f_path.dentry, &realpath);
944 realfile = ovl_dir_open_realfile(file, &realpath);
945 if (IS_ERR(realfile)) {
946 kfree(od);
947 return PTR_ERR(realfile);
948 }
949 od->realfile = realfile;
950 od->is_real = ovl_dir_is_real(inode);
951 od->is_upper = OVL_TYPE_UPPER(type);
952 file->private_data = od;
953
954 return 0;
955 }
956
957 WRAP_DIR_ITER(ovl_iterate) // FIXME!
958 const struct file_operations ovl_dir_operations = {
959 .read = generic_read_dir,
960 .open = ovl_dir_open,
961 .iterate_shared = shared_ovl_iterate,
962 .llseek = ovl_dir_llseek,
963 .fsync = ovl_dir_fsync,
964 .release = ovl_dir_release,
965 };
966
ovl_check_empty_dir(struct dentry * dentry,struct list_head * list)967 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
968 {
969 int err;
970 struct ovl_cache_entry *p, *n;
971 struct rb_root root = RB_ROOT;
972 const struct cred *old_cred;
973
974 old_cred = ovl_override_creds(dentry->d_sb);
975 err = ovl_dir_read_merged(dentry, list, &root);
976 revert_creds(old_cred);
977 if (err)
978 return err;
979
980 err = 0;
981
982 list_for_each_entry_safe(p, n, list, l_node) {
983 /*
984 * Select whiteouts in upperdir, they should
985 * be cleared when deleting this directory.
986 */
987 if (p->is_whiteout) {
988 if (p->is_upper)
989 continue;
990 goto del_entry;
991 }
992
993 if (p->name[0] == '.') {
994 if (p->len == 1)
995 goto del_entry;
996 if (p->len == 2 && p->name[1] == '.')
997 goto del_entry;
998 }
999 err = -ENOTEMPTY;
1000 break;
1001
1002 del_entry:
1003 list_del(&p->l_node);
1004 kfree(p);
1005 }
1006
1007 return err;
1008 }
1009
ovl_cleanup_whiteouts(struct ovl_fs * ofs,struct dentry * upper,struct list_head * list)1010 void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper,
1011 struct list_head *list)
1012 {
1013 struct ovl_cache_entry *p;
1014
1015 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD);
1016 list_for_each_entry(p, list, l_node) {
1017 struct dentry *dentry;
1018
1019 if (WARN_ON(!p->is_whiteout || !p->is_upper))
1020 continue;
1021
1022 dentry = ovl_lookup_upper(ofs, p->name, upper, p->len);
1023 if (IS_ERR(dentry)) {
1024 pr_err("lookup '%s/%.*s' failed (%i)\n",
1025 upper->d_name.name, p->len, p->name,
1026 (int) PTR_ERR(dentry));
1027 continue;
1028 }
1029 if (dentry->d_inode)
1030 ovl_cleanup(ofs, upper->d_inode, dentry);
1031 dput(dentry);
1032 }
1033 inode_unlock(upper->d_inode);
1034 }
1035
ovl_check_d_type(struct dir_context * ctx,const char * name,int namelen,loff_t offset,u64 ino,unsigned int d_type)1036 static bool ovl_check_d_type(struct dir_context *ctx, const char *name,
1037 int namelen, loff_t offset, u64 ino,
1038 unsigned int d_type)
1039 {
1040 struct ovl_readdir_data *rdd =
1041 container_of(ctx, struct ovl_readdir_data, ctx);
1042
1043 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
1044 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
1045 return true;
1046
1047 if (d_type != DT_UNKNOWN)
1048 rdd->d_type_supported = true;
1049
1050 return true;
1051 }
1052
1053 /*
1054 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
1055 * if error is encountered.
1056 */
ovl_check_d_type_supported(const struct path * realpath)1057 int ovl_check_d_type_supported(const struct path *realpath)
1058 {
1059 int err;
1060 struct ovl_readdir_data rdd = {
1061 .ctx.actor = ovl_check_d_type,
1062 .d_type_supported = false,
1063 };
1064
1065 err = ovl_dir_read(realpath, &rdd);
1066 if (err)
1067 return err;
1068
1069 return rdd.d_type_supported;
1070 }
1071
1072 #define OVL_INCOMPATDIR_NAME "incompat"
1073
ovl_workdir_cleanup_recurse(struct ovl_fs * ofs,const struct path * path,int level)1074 static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *path,
1075 int level)
1076 {
1077 int err;
1078 struct inode *dir = path->dentry->d_inode;
1079 LIST_HEAD(list);
1080 struct ovl_cache_entry *p;
1081 struct ovl_readdir_data rdd = {
1082 .ctx.actor = ovl_fill_plain,
1083 .list = &list,
1084 };
1085 bool incompat = false;
1086
1087 /*
1088 * The "work/incompat" directory is treated specially - if it is not
1089 * empty, instead of printing a generic error and mounting read-only,
1090 * we will error about incompat features and fail the mount.
1091 *
1092 * When called from ovl_indexdir_cleanup(), path->dentry->d_name.name
1093 * starts with '#'.
1094 */
1095 if (level == 2 &&
1096 !strcmp(path->dentry->d_name.name, OVL_INCOMPATDIR_NAME))
1097 incompat = true;
1098
1099 err = ovl_dir_read(path, &rdd);
1100 if (err)
1101 goto out;
1102
1103 inode_lock_nested(dir, I_MUTEX_PARENT);
1104 list_for_each_entry(p, &list, l_node) {
1105 struct dentry *dentry;
1106
1107 if (p->name[0] == '.') {
1108 if (p->len == 1)
1109 continue;
1110 if (p->len == 2 && p->name[1] == '.')
1111 continue;
1112 } else if (incompat) {
1113 pr_err("overlay with incompat feature '%s' cannot be mounted\n",
1114 p->name);
1115 err = -EINVAL;
1116 break;
1117 }
1118 dentry = ovl_lookup_upper(ofs, p->name, path->dentry, p->len);
1119 if (IS_ERR(dentry))
1120 continue;
1121 if (dentry->d_inode)
1122 err = ovl_workdir_cleanup(ofs, dir, path->mnt, dentry, level);
1123 dput(dentry);
1124 if (err)
1125 break;
1126 }
1127 inode_unlock(dir);
1128 out:
1129 ovl_cache_free(&list);
1130 return err;
1131 }
1132
ovl_workdir_cleanup(struct ovl_fs * ofs,struct inode * dir,struct vfsmount * mnt,struct dentry * dentry,int level)1133 int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir,
1134 struct vfsmount *mnt, struct dentry *dentry, int level)
1135 {
1136 int err;
1137
1138 if (!d_is_dir(dentry) || level > 1) {
1139 return ovl_cleanup(ofs, dir, dentry);
1140 }
1141
1142 err = ovl_do_rmdir(ofs, dir, dentry);
1143 if (err) {
1144 struct path path = { .mnt = mnt, .dentry = dentry };
1145
1146 inode_unlock(dir);
1147 err = ovl_workdir_cleanup_recurse(ofs, &path, level + 1);
1148 inode_lock_nested(dir, I_MUTEX_PARENT);
1149 if (!err)
1150 err = ovl_cleanup(ofs, dir, dentry);
1151 }
1152
1153 return err;
1154 }
1155
ovl_indexdir_cleanup(struct ovl_fs * ofs)1156 int ovl_indexdir_cleanup(struct ovl_fs *ofs)
1157 {
1158 int err;
1159 struct dentry *indexdir = ofs->indexdir;
1160 struct dentry *index = NULL;
1161 struct inode *dir = indexdir->d_inode;
1162 struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir };
1163 LIST_HEAD(list);
1164 struct ovl_cache_entry *p;
1165 struct ovl_readdir_data rdd = {
1166 .ctx.actor = ovl_fill_plain,
1167 .list = &list,
1168 };
1169
1170 err = ovl_dir_read(&path, &rdd);
1171 if (err)
1172 goto out;
1173
1174 inode_lock_nested(dir, I_MUTEX_PARENT);
1175 list_for_each_entry(p, &list, l_node) {
1176 if (p->name[0] == '.') {
1177 if (p->len == 1)
1178 continue;
1179 if (p->len == 2 && p->name[1] == '.')
1180 continue;
1181 }
1182 index = ovl_lookup_upper(ofs, p->name, indexdir, p->len);
1183 if (IS_ERR(index)) {
1184 err = PTR_ERR(index);
1185 index = NULL;
1186 break;
1187 }
1188 /* Cleanup leftover from index create/cleanup attempt */
1189 if (index->d_name.name[0] == '#') {
1190 err = ovl_workdir_cleanup(ofs, dir, path.mnt, index, 1);
1191 if (err)
1192 break;
1193 goto next;
1194 }
1195 err = ovl_verify_index(ofs, index);
1196 if (!err) {
1197 goto next;
1198 } else if (err == -ESTALE) {
1199 /* Cleanup stale index entries */
1200 err = ovl_cleanup(ofs, dir, index);
1201 } else if (err != -ENOENT) {
1202 /*
1203 * Abort mount to avoid corrupting the index if
1204 * an incompatible index entry was found or on out
1205 * of memory.
1206 */
1207 break;
1208 } else if (ofs->config.nfs_export) {
1209 /*
1210 * Whiteout orphan index to block future open by
1211 * handle after overlay nlink dropped to zero.
1212 */
1213 err = ovl_cleanup_and_whiteout(ofs, dir, index);
1214 } else {
1215 /* Cleanup orphan index entries */
1216 err = ovl_cleanup(ofs, dir, index);
1217 }
1218
1219 if (err)
1220 break;
1221
1222 next:
1223 dput(index);
1224 index = NULL;
1225 }
1226 dput(index);
1227 inode_unlock(dir);
1228 out:
1229 ovl_cache_free(&list);
1230 if (err)
1231 pr_err("failed index dir cleanup (%i)\n", err);
1232 return err;
1233 }
1234