xref: /openbmc/linux/fs/overlayfs/readdir.c (revision d894fc60)
1 /*
2  *
3  * Copyright (C) 2011 Novell Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include "overlayfs.h"
19 
20 struct ovl_cache_entry {
21 	unsigned int len;
22 	unsigned int type;
23 	u64 ino;
24 	struct list_head l_node;
25 	struct rb_node node;
26 	bool is_whiteout;
27 	char name[];
28 };
29 
30 struct ovl_dir_cache {
31 	long refcount;
32 	u64 version;
33 	struct list_head entries;
34 };
35 
36 struct ovl_readdir_data {
37 	struct dir_context ctx;
38 	bool is_merge;
39 	struct rb_root root;
40 	struct list_head *list;
41 	struct list_head middle;
42 	struct dentry *dir;
43 	int count;
44 	int err;
45 };
46 
47 struct ovl_dir_file {
48 	bool is_real;
49 	bool is_upper;
50 	struct ovl_dir_cache *cache;
51 	struct list_head *cursor;
52 	struct file *realfile;
53 	struct file *upperfile;
54 };
55 
56 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
57 {
58 	return container_of(n, struct ovl_cache_entry, node);
59 }
60 
61 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
62 						    const char *name, int len)
63 {
64 	struct rb_node *node = root->rb_node;
65 	int cmp;
66 
67 	while (node) {
68 		struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
69 
70 		cmp = strncmp(name, p->name, len);
71 		if (cmp > 0)
72 			node = p->node.rb_right;
73 		else if (cmp < 0 || len < p->len)
74 			node = p->node.rb_left;
75 		else
76 			return p;
77 	}
78 
79 	return NULL;
80 }
81 
82 static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
83 						   const char *name, int len,
84 						   u64 ino, unsigned int d_type)
85 {
86 	struct ovl_cache_entry *p;
87 	size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
88 
89 	p = kmalloc(size, GFP_KERNEL);
90 	if (!p)
91 		return NULL;
92 
93 	memcpy(p->name, name, len);
94 	p->name[len] = '\0';
95 	p->len = len;
96 	p->type = d_type;
97 	p->ino = ino;
98 	p->is_whiteout = false;
99 
100 	if (d_type == DT_CHR) {
101 		struct dentry *dentry;
102 		const struct cred *old_cred;
103 		struct cred *override_cred;
104 
105 		override_cred = prepare_creds();
106 		if (!override_cred) {
107 			kfree(p);
108 			return NULL;
109 		}
110 
111 		/*
112 		 * CAP_DAC_OVERRIDE for lookup
113 		 */
114 		cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
115 		old_cred = override_creds(override_cred);
116 
117 		dentry = lookup_one_len(name, dir, len);
118 		if (!IS_ERR(dentry)) {
119 			p->is_whiteout = ovl_is_whiteout(dentry);
120 			dput(dentry);
121 		}
122 		revert_creds(old_cred);
123 		put_cred(override_cred);
124 	}
125 	return p;
126 }
127 
128 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
129 				  const char *name, int len, u64 ino,
130 				  unsigned int d_type)
131 {
132 	struct rb_node **newp = &rdd->root.rb_node;
133 	struct rb_node *parent = NULL;
134 	struct ovl_cache_entry *p;
135 
136 	while (*newp) {
137 		int cmp;
138 		struct ovl_cache_entry *tmp;
139 
140 		parent = *newp;
141 		tmp = ovl_cache_entry_from_node(*newp);
142 		cmp = strncmp(name, tmp->name, len);
143 		if (cmp > 0)
144 			newp = &tmp->node.rb_right;
145 		else if (cmp < 0 || len < tmp->len)
146 			newp = &tmp->node.rb_left;
147 		else
148 			return 0;
149 	}
150 
151 	p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
152 	if (p == NULL)
153 		return -ENOMEM;
154 
155 	list_add_tail(&p->l_node, rdd->list);
156 	rb_link_node(&p->node, parent, newp);
157 	rb_insert_color(&p->node, &rdd->root);
158 
159 	return 0;
160 }
161 
162 static int ovl_fill_lower(struct ovl_readdir_data *rdd,
163 			  const char *name, int namelen,
164 			  loff_t offset, u64 ino, unsigned int d_type)
165 {
166 	struct ovl_cache_entry *p;
167 
168 	p = ovl_cache_entry_find(&rdd->root, name, namelen);
169 	if (p) {
170 		list_move_tail(&p->l_node, &rdd->middle);
171 	} else {
172 		p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
173 		if (p == NULL)
174 			rdd->err = -ENOMEM;
175 		else
176 			list_add_tail(&p->l_node, &rdd->middle);
177 	}
178 
179 	return rdd->err;
180 }
181 
182 void ovl_cache_free(struct list_head *list)
183 {
184 	struct ovl_cache_entry *p;
185 	struct ovl_cache_entry *n;
186 
187 	list_for_each_entry_safe(p, n, list, l_node)
188 		kfree(p);
189 
190 	INIT_LIST_HEAD(list);
191 }
192 
193 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
194 {
195 	struct ovl_dir_cache *cache = od->cache;
196 
197 	WARN_ON(cache->refcount <= 0);
198 	cache->refcount--;
199 	if (!cache->refcount) {
200 		if (ovl_dir_cache(dentry) == cache)
201 			ovl_set_dir_cache(dentry, NULL);
202 
203 		ovl_cache_free(&cache->entries);
204 		kfree(cache);
205 	}
206 }
207 
208 static int ovl_fill_merge(struct dir_context *ctx, const char *name,
209 			  int namelen, loff_t offset, u64 ino,
210 			  unsigned int d_type)
211 {
212 	struct ovl_readdir_data *rdd =
213 		container_of(ctx, struct ovl_readdir_data, ctx);
214 
215 	rdd->count++;
216 	if (!rdd->is_merge)
217 		return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
218 	else
219 		return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
220 }
221 
222 static inline int ovl_dir_read(struct path *realpath,
223 			       struct ovl_readdir_data *rdd)
224 {
225 	struct file *realfile;
226 	int err;
227 
228 	realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
229 	if (IS_ERR(realfile))
230 		return PTR_ERR(realfile);
231 
232 	rdd->dir = realpath->dentry;
233 	rdd->ctx.pos = 0;
234 	do {
235 		rdd->count = 0;
236 		rdd->err = 0;
237 		err = iterate_dir(realfile, &rdd->ctx);
238 		if (err >= 0)
239 			err = rdd->err;
240 	} while (!err && rdd->count);
241 	fput(realfile);
242 
243 	return err;
244 }
245 
246 static void ovl_dir_reset(struct file *file)
247 {
248 	struct ovl_dir_file *od = file->private_data;
249 	struct ovl_dir_cache *cache = od->cache;
250 	struct dentry *dentry = file->f_path.dentry;
251 	enum ovl_path_type type = ovl_path_type(dentry);
252 
253 	if (cache && ovl_dentry_version_get(dentry) != cache->version) {
254 		ovl_cache_put(od, dentry);
255 		od->cache = NULL;
256 		od->cursor = NULL;
257 	}
258 	WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
259 	if (od->is_real && OVL_TYPE_MERGE(type))
260 		od->is_real = false;
261 }
262 
263 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
264 {
265 	int err;
266 	struct path realpath;
267 	struct ovl_readdir_data rdd = {
268 		.ctx.actor = ovl_fill_merge,
269 		.list = list,
270 		.root = RB_ROOT,
271 		.is_merge = false,
272 	};
273 	int idx, next;
274 
275 	for (idx = 0; idx != -1; idx = next) {
276 		next = ovl_path_next(idx, dentry, &realpath);
277 
278 		if (next != -1) {
279 			err = ovl_dir_read(&realpath, &rdd);
280 			if (err)
281 				break;
282 		} else {
283 			/*
284 			 * Insert lowest layer entries before upper ones, this
285 			 * allows offsets to be reasonably constant
286 			 */
287 			list_add(&rdd.middle, rdd.list);
288 			rdd.is_merge = true;
289 			err = ovl_dir_read(&realpath, &rdd);
290 			list_del(&rdd.middle);
291 		}
292 	}
293 	return err;
294 }
295 
296 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
297 {
298 	struct list_head *p;
299 	loff_t off = 0;
300 
301 	list_for_each(p, &od->cache->entries) {
302 		if (off >= pos)
303 			break;
304 		off++;
305 	}
306 	/* Cursor is safe since the cache is stable */
307 	od->cursor = p;
308 }
309 
310 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
311 {
312 	int res;
313 	struct ovl_dir_cache *cache;
314 
315 	cache = ovl_dir_cache(dentry);
316 	if (cache && ovl_dentry_version_get(dentry) == cache->version) {
317 		cache->refcount++;
318 		return cache;
319 	}
320 	ovl_set_dir_cache(dentry, NULL);
321 
322 	cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
323 	if (!cache)
324 		return ERR_PTR(-ENOMEM);
325 
326 	cache->refcount = 1;
327 	INIT_LIST_HEAD(&cache->entries);
328 
329 	res = ovl_dir_read_merged(dentry, &cache->entries);
330 	if (res) {
331 		ovl_cache_free(&cache->entries);
332 		kfree(cache);
333 		return ERR_PTR(res);
334 	}
335 
336 	cache->version = ovl_dentry_version_get(dentry);
337 	ovl_set_dir_cache(dentry, cache);
338 
339 	return cache;
340 }
341 
342 static int ovl_iterate(struct file *file, struct dir_context *ctx)
343 {
344 	struct ovl_dir_file *od = file->private_data;
345 	struct dentry *dentry = file->f_path.dentry;
346 	struct ovl_cache_entry *p;
347 
348 	if (!ctx->pos)
349 		ovl_dir_reset(file);
350 
351 	if (od->is_real)
352 		return iterate_dir(od->realfile, ctx);
353 
354 	if (!od->cache) {
355 		struct ovl_dir_cache *cache;
356 
357 		cache = ovl_cache_get(dentry);
358 		if (IS_ERR(cache))
359 			return PTR_ERR(cache);
360 
361 		od->cache = cache;
362 		ovl_seek_cursor(od, ctx->pos);
363 	}
364 
365 	while (od->cursor != &od->cache->entries) {
366 		p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
367 		if (!p->is_whiteout)
368 			if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
369 				break;
370 		od->cursor = p->l_node.next;
371 		ctx->pos++;
372 	}
373 	return 0;
374 }
375 
376 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
377 {
378 	loff_t res;
379 	struct ovl_dir_file *od = file->private_data;
380 
381 	mutex_lock(&file_inode(file)->i_mutex);
382 	if (!file->f_pos)
383 		ovl_dir_reset(file);
384 
385 	if (od->is_real) {
386 		res = vfs_llseek(od->realfile, offset, origin);
387 		file->f_pos = od->realfile->f_pos;
388 	} else {
389 		res = -EINVAL;
390 
391 		switch (origin) {
392 		case SEEK_CUR:
393 			offset += file->f_pos;
394 			break;
395 		case SEEK_SET:
396 			break;
397 		default:
398 			goto out_unlock;
399 		}
400 		if (offset < 0)
401 			goto out_unlock;
402 
403 		if (offset != file->f_pos) {
404 			file->f_pos = offset;
405 			if (od->cache)
406 				ovl_seek_cursor(od, offset);
407 		}
408 		res = offset;
409 	}
410 out_unlock:
411 	mutex_unlock(&file_inode(file)->i_mutex);
412 
413 	return res;
414 }
415 
416 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
417 			 int datasync)
418 {
419 	struct ovl_dir_file *od = file->private_data;
420 	struct dentry *dentry = file->f_path.dentry;
421 	struct file *realfile = od->realfile;
422 
423 	/*
424 	 * Need to check if we started out being a lower dir, but got copied up
425 	 */
426 	if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
427 		struct inode *inode = file_inode(file);
428 
429 		realfile = lockless_dereference(od->upperfile);
430 		if (!realfile) {
431 			struct path upperpath;
432 
433 			ovl_path_upper(dentry, &upperpath);
434 			realfile = ovl_path_open(&upperpath, O_RDONLY);
435 			smp_mb__before_spinlock();
436 			mutex_lock(&inode->i_mutex);
437 			if (!od->upperfile) {
438 				if (IS_ERR(realfile)) {
439 					mutex_unlock(&inode->i_mutex);
440 					return PTR_ERR(realfile);
441 				}
442 				od->upperfile = realfile;
443 			} else {
444 				/* somebody has beaten us to it */
445 				if (!IS_ERR(realfile))
446 					fput(realfile);
447 				realfile = od->upperfile;
448 			}
449 			mutex_unlock(&inode->i_mutex);
450 		}
451 	}
452 
453 	return vfs_fsync_range(realfile, start, end, datasync);
454 }
455 
456 static int ovl_dir_release(struct inode *inode, struct file *file)
457 {
458 	struct ovl_dir_file *od = file->private_data;
459 
460 	if (od->cache) {
461 		mutex_lock(&inode->i_mutex);
462 		ovl_cache_put(od, file->f_path.dentry);
463 		mutex_unlock(&inode->i_mutex);
464 	}
465 	fput(od->realfile);
466 	if (od->upperfile)
467 		fput(od->upperfile);
468 	kfree(od);
469 
470 	return 0;
471 }
472 
473 static int ovl_dir_open(struct inode *inode, struct file *file)
474 {
475 	struct path realpath;
476 	struct file *realfile;
477 	struct ovl_dir_file *od;
478 	enum ovl_path_type type;
479 
480 	od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
481 	if (!od)
482 		return -ENOMEM;
483 
484 	type = ovl_path_real(file->f_path.dentry, &realpath);
485 	realfile = ovl_path_open(&realpath, file->f_flags);
486 	if (IS_ERR(realfile)) {
487 		kfree(od);
488 		return PTR_ERR(realfile);
489 	}
490 	od->realfile = realfile;
491 	od->is_real = !OVL_TYPE_MERGE(type);
492 	od->is_upper = OVL_TYPE_UPPER(type);
493 	file->private_data = od;
494 
495 	return 0;
496 }
497 
498 const struct file_operations ovl_dir_operations = {
499 	.read		= generic_read_dir,
500 	.open		= ovl_dir_open,
501 	.iterate	= ovl_iterate,
502 	.llseek		= ovl_dir_llseek,
503 	.fsync		= ovl_dir_fsync,
504 	.release	= ovl_dir_release,
505 };
506 
507 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
508 {
509 	int err;
510 	struct ovl_cache_entry *p;
511 
512 	err = ovl_dir_read_merged(dentry, list);
513 	if (err)
514 		return err;
515 
516 	err = 0;
517 
518 	list_for_each_entry(p, list, l_node) {
519 		if (p->is_whiteout)
520 			continue;
521 
522 		if (p->name[0] == '.') {
523 			if (p->len == 1)
524 				continue;
525 			if (p->len == 2 && p->name[1] == '.')
526 				continue;
527 		}
528 		err = -ENOTEMPTY;
529 		break;
530 	}
531 
532 	return err;
533 }
534 
535 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
536 {
537 	struct ovl_cache_entry *p;
538 
539 	mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD);
540 	list_for_each_entry(p, list, l_node) {
541 		struct dentry *dentry;
542 
543 		if (!p->is_whiteout)
544 			continue;
545 
546 		dentry = lookup_one_len(p->name, upper, p->len);
547 		if (IS_ERR(dentry)) {
548 			pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
549 			       upper->d_name.name, p->len, p->name,
550 			       (int) PTR_ERR(dentry));
551 			continue;
552 		}
553 		ovl_cleanup(upper->d_inode, dentry);
554 		dput(dentry);
555 	}
556 	mutex_unlock(&upper->d_inode->i_mutex);
557 }
558