xref: /openbmc/linux/fs/cachefiles/namei.c (revision 07a90e97)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* CacheFiles path walking and related routines
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/namei.h>
10 #include "internal.h"
11 
12 /*
13  * Mark the backing file as being a cache file if it's not already in use.  The
14  * mark tells the culling request command that it's not allowed to cull the
15  * file or directory.  The caller must hold the inode lock.
16  */
17 static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18 					   struct dentry *dentry)
19 {
20 	struct inode *inode = d_backing_inode(dentry);
21 	bool can_use = false;
22 
23 	if (!(inode->i_flags & S_KERNEL_FILE)) {
24 		inode->i_flags |= S_KERNEL_FILE;
25 		trace_cachefiles_mark_active(object, inode);
26 		can_use = true;
27 	} else {
28 		pr_notice("cachefiles: Inode already in use: %pd\n", dentry);
29 	}
30 
31 	return can_use;
32 }
33 
34 static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
35 					 struct dentry *dentry)
36 {
37 	struct inode *inode = d_backing_inode(dentry);
38 	bool can_use;
39 
40 	inode_lock(inode);
41 	can_use = __cachefiles_mark_inode_in_use(object, dentry);
42 	inode_unlock(inode);
43 	return can_use;
44 }
45 
46 /*
47  * Unmark a backing inode.  The caller must hold the inode lock.
48  */
49 static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
50 					     struct dentry *dentry)
51 {
52 	struct inode *inode = d_backing_inode(dentry);
53 
54 	inode->i_flags &= ~S_KERNEL_FILE;
55 	trace_cachefiles_mark_inactive(object, inode);
56 }
57 
58 /*
59  * Unmark a backing inode and tell cachefilesd that there's something that can
60  * be culled.
61  */
62 void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
63 				    struct file *file)
64 {
65 	struct cachefiles_cache *cache = object->volume->cache;
66 	struct inode *inode = file_inode(file);
67 
68 	if (inode) {
69 		inode_lock(inode);
70 		__cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
71 		inode_unlock(inode);
72 
73 		if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
74 			atomic_long_add(inode->i_blocks, &cache->b_released);
75 			if (atomic_inc_return(&cache->f_released))
76 				cachefiles_state_changed(cache);
77 		}
78 	}
79 }
80 
81 /*
82  * get a subdirectory
83  */
84 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
85 					struct dentry *dir,
86 					const char *dirname,
87 					bool *_is_new)
88 {
89 	struct dentry *subdir;
90 	struct path path;
91 	int ret;
92 
93 	_enter(",,%s", dirname);
94 
95 	/* search the current directory for the element name */
96 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
97 
98 retry:
99 	ret = cachefiles_inject_read_error();
100 	if (ret == 0)
101 		subdir = lookup_one_len(dirname, dir, strlen(dirname));
102 	else
103 		subdir = ERR_PTR(ret);
104 	if (IS_ERR(subdir)) {
105 		trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
106 					   PTR_ERR(subdir),
107 					   cachefiles_trace_lookup_error);
108 		if (PTR_ERR(subdir) == -ENOMEM)
109 			goto nomem_d_alloc;
110 		goto lookup_error;
111 	}
112 
113 	_debug("subdir -> %pd %s",
114 	       subdir, d_backing_inode(subdir) ? "positive" : "negative");
115 
116 	/* we need to create the subdir if it doesn't exist yet */
117 	if (d_is_negative(subdir)) {
118 		ret = cachefiles_has_space(cache, 1, 0);
119 		if (ret < 0)
120 			goto mkdir_error;
121 
122 		_debug("attempt mkdir");
123 
124 		path.mnt = cache->mnt;
125 		path.dentry = dir;
126 		ret = security_path_mkdir(&path, subdir, 0700);
127 		if (ret < 0)
128 			goto mkdir_error;
129 		ret = cachefiles_inject_write_error();
130 		if (ret == 0)
131 			ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
132 		if (ret < 0) {
133 			trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
134 						   cachefiles_trace_mkdir_error);
135 			goto mkdir_error;
136 		}
137 
138 		if (unlikely(d_unhashed(subdir))) {
139 			cachefiles_put_directory(subdir);
140 			goto retry;
141 		}
142 		ASSERT(d_backing_inode(subdir));
143 
144 		_debug("mkdir -> %pd{ino=%lu}",
145 		       subdir, d_backing_inode(subdir)->i_ino);
146 		if (_is_new)
147 			*_is_new = true;
148 	}
149 
150 	/* Tell rmdir() it's not allowed to delete the subdir */
151 	inode_lock(d_inode(subdir));
152 	inode_unlock(d_inode(dir));
153 
154 	if (!__cachefiles_mark_inode_in_use(NULL, subdir))
155 		goto mark_error;
156 
157 	inode_unlock(d_inode(subdir));
158 
159 	/* we need to make sure the subdir is a directory */
160 	ASSERT(d_backing_inode(subdir));
161 
162 	if (!d_can_lookup(subdir)) {
163 		pr_err("%s is not a directory\n", dirname);
164 		ret = -EIO;
165 		goto check_error;
166 	}
167 
168 	ret = -EPERM;
169 	if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
170 	    !d_backing_inode(subdir)->i_op->lookup ||
171 	    !d_backing_inode(subdir)->i_op->mkdir ||
172 	    !d_backing_inode(subdir)->i_op->rename ||
173 	    !d_backing_inode(subdir)->i_op->rmdir ||
174 	    !d_backing_inode(subdir)->i_op->unlink)
175 		goto check_error;
176 
177 	_leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
178 	return subdir;
179 
180 check_error:
181 	cachefiles_put_directory(subdir);
182 	_leave(" = %d [check]", ret);
183 	return ERR_PTR(ret);
184 
185 mark_error:
186 	inode_unlock(d_inode(subdir));
187 	dput(subdir);
188 	return ERR_PTR(-EBUSY);
189 
190 mkdir_error:
191 	inode_unlock(d_inode(dir));
192 	dput(subdir);
193 	pr_err("mkdir %s failed with error %d\n", dirname, ret);
194 	return ERR_PTR(ret);
195 
196 lookup_error:
197 	inode_unlock(d_inode(dir));
198 	ret = PTR_ERR(subdir);
199 	pr_err("Lookup %s failed with error %d\n", dirname, ret);
200 	return ERR_PTR(ret);
201 
202 nomem_d_alloc:
203 	inode_unlock(d_inode(dir));
204 	_leave(" = -ENOMEM");
205 	return ERR_PTR(-ENOMEM);
206 }
207 
208 /*
209  * Put a subdirectory.
210  */
211 void cachefiles_put_directory(struct dentry *dir)
212 {
213 	if (dir) {
214 		inode_lock(dir->d_inode);
215 		__cachefiles_unmark_inode_in_use(NULL, dir);
216 		inode_unlock(dir->d_inode);
217 		dput(dir);
218 	}
219 }
220 
221 /*
222  * Remove a regular file from the cache.
223  */
224 static int cachefiles_unlink(struct cachefiles_cache *cache,
225 			     struct cachefiles_object *object,
226 			     struct dentry *dir, struct dentry *dentry,
227 			     enum fscache_why_object_killed why)
228 {
229 	struct path path = {
230 		.mnt	= cache->mnt,
231 		.dentry	= dir,
232 	};
233 	int ret;
234 
235 	trace_cachefiles_unlink(object, dentry, why);
236 	ret = security_path_unlink(&path, dentry);
237 	if (ret < 0) {
238 		cachefiles_io_error(cache, "Unlink security error");
239 		return ret;
240 	}
241 
242 	ret = cachefiles_inject_remove_error();
243 	if (ret == 0) {
244 		ret = vfs_unlink(&init_user_ns, d_backing_inode(dir), dentry, NULL);
245 		if (ret == -EIO)
246 			cachefiles_io_error(cache, "Unlink failed");
247 	}
248 	if (ret != 0)
249 		trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
250 					   cachefiles_trace_unlink_error);
251 	return ret;
252 }
253 
254 /*
255  * Delete an object representation from the cache
256  * - File backed objects are unlinked
257  * - Directory backed objects are stuffed into the graveyard for userspace to
258  *   delete
259  */
260 int cachefiles_bury_object(struct cachefiles_cache *cache,
261 			   struct cachefiles_object *object,
262 			   struct dentry *dir,
263 			   struct dentry *rep,
264 			   enum fscache_why_object_killed why)
265 {
266 	struct dentry *grave, *trap;
267 	struct path path, path_to_graveyard;
268 	char nbuffer[8 + 8 + 1];
269 	int ret;
270 
271 	_enter(",'%pd','%pd'", dir, rep);
272 
273 	if (rep->d_parent != dir) {
274 		inode_unlock(d_inode(dir));
275 		_leave(" = -ESTALE");
276 		return -ESTALE;
277 	}
278 
279 	/* non-directories can just be unlinked */
280 	if (!d_is_dir(rep)) {
281 		dget(rep); /* Stop the dentry being negated if it's only pinned
282 			    * by a file struct.
283 			    */
284 		ret = cachefiles_unlink(cache, object, dir, rep, why);
285 		dput(rep);
286 
287 		inode_unlock(d_inode(dir));
288 		_leave(" = %d", ret);
289 		return ret;
290 	}
291 
292 	/* directories have to be moved to the graveyard */
293 	_debug("move stale object to graveyard");
294 	inode_unlock(d_inode(dir));
295 
296 try_again:
297 	/* first step is to make up a grave dentry in the graveyard */
298 	sprintf(nbuffer, "%08x%08x",
299 		(uint32_t) ktime_get_real_seconds(),
300 		(uint32_t) atomic_inc_return(&cache->gravecounter));
301 
302 	/* do the multiway lock magic */
303 	trap = lock_rename(cache->graveyard, dir);
304 
305 	/* do some checks before getting the grave dentry */
306 	if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
307 		/* the entry was probably culled when we dropped the parent dir
308 		 * lock */
309 		unlock_rename(cache->graveyard, dir);
310 		_leave(" = 0 [culled?]");
311 		return 0;
312 	}
313 
314 	if (!d_can_lookup(cache->graveyard)) {
315 		unlock_rename(cache->graveyard, dir);
316 		cachefiles_io_error(cache, "Graveyard no longer a directory");
317 		return -EIO;
318 	}
319 
320 	if (trap == rep) {
321 		unlock_rename(cache->graveyard, dir);
322 		cachefiles_io_error(cache, "May not make directory loop");
323 		return -EIO;
324 	}
325 
326 	if (d_mountpoint(rep)) {
327 		unlock_rename(cache->graveyard, dir);
328 		cachefiles_io_error(cache, "Mountpoint in cache");
329 		return -EIO;
330 	}
331 
332 	grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
333 	if (IS_ERR(grave)) {
334 		unlock_rename(cache->graveyard, dir);
335 		trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
336 					   PTR_ERR(grave),
337 					   cachefiles_trace_lookup_error);
338 
339 		if (PTR_ERR(grave) == -ENOMEM) {
340 			_leave(" = -ENOMEM");
341 			return -ENOMEM;
342 		}
343 
344 		cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
345 		return -EIO;
346 	}
347 
348 	if (d_is_positive(grave)) {
349 		unlock_rename(cache->graveyard, dir);
350 		dput(grave);
351 		grave = NULL;
352 		cond_resched();
353 		goto try_again;
354 	}
355 
356 	if (d_mountpoint(grave)) {
357 		unlock_rename(cache->graveyard, dir);
358 		dput(grave);
359 		cachefiles_io_error(cache, "Mountpoint in graveyard");
360 		return -EIO;
361 	}
362 
363 	/* target should not be an ancestor of source */
364 	if (trap == grave) {
365 		unlock_rename(cache->graveyard, dir);
366 		dput(grave);
367 		cachefiles_io_error(cache, "May not make directory loop");
368 		return -EIO;
369 	}
370 
371 	/* attempt the rename */
372 	path.mnt = cache->mnt;
373 	path.dentry = dir;
374 	path_to_graveyard.mnt = cache->mnt;
375 	path_to_graveyard.dentry = cache->graveyard;
376 	ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
377 	if (ret < 0) {
378 		cachefiles_io_error(cache, "Rename security error %d", ret);
379 	} else {
380 		struct renamedata rd = {
381 			.old_mnt_userns	= &init_user_ns,
382 			.old_dir	= d_inode(dir),
383 			.old_dentry	= rep,
384 			.new_mnt_userns	= &init_user_ns,
385 			.new_dir	= d_inode(cache->graveyard),
386 			.new_dentry	= grave,
387 		};
388 		trace_cachefiles_rename(object, rep, grave, why);
389 		ret = cachefiles_inject_read_error();
390 		if (ret == 0)
391 			ret = vfs_rename(&rd);
392 		if (ret != 0)
393 			trace_cachefiles_vfs_error(object, d_inode(dir), ret,
394 						   cachefiles_trace_rename_error);
395 		if (ret != 0 && ret != -ENOMEM)
396 			cachefiles_io_error(cache,
397 					    "Rename failed with error %d", ret);
398 	}
399 
400 	__cachefiles_unmark_inode_in_use(object, rep);
401 	unlock_rename(cache->graveyard, dir);
402 	dput(grave);
403 	_leave(" = 0");
404 	return 0;
405 }
406 
407 /*
408  * Look up an inode to be checked or culled.  Return -EBUSY if the inode is
409  * marked in use.
410  */
411 static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
412 						 struct dentry *dir,
413 						 char *filename)
414 {
415 	struct dentry *victim;
416 	int ret = -ENOENT;
417 
418 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
419 
420 	victim = lookup_one_len(filename, dir, strlen(filename));
421 	if (IS_ERR(victim))
422 		goto lookup_error;
423 	if (d_is_negative(victim))
424 		goto lookup_put;
425 	if (d_inode(victim)->i_flags & S_KERNEL_FILE)
426 		goto lookup_busy;
427 	return victim;
428 
429 lookup_busy:
430 	ret = -EBUSY;
431 lookup_put:
432 	inode_unlock(d_inode(dir));
433 	dput(victim);
434 	return ERR_PTR(ret);
435 
436 lookup_error:
437 	inode_unlock(d_inode(dir));
438 	ret = PTR_ERR(victim);
439 	if (ret == -ENOENT)
440 		return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
441 
442 	if (ret == -EIO) {
443 		cachefiles_io_error(cache, "Lookup failed");
444 	} else if (ret != -ENOMEM) {
445 		pr_err("Internal error: %d\n", ret);
446 		ret = -EIO;
447 	}
448 
449 	return ERR_PTR(ret);
450 }
451 
452 /*
453  * Cull an object if it's not in use
454  * - called only by cache manager daemon
455  */
456 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
457 		    char *filename)
458 {
459 	struct dentry *victim;
460 	struct inode *inode;
461 	int ret;
462 
463 	_enter(",%pd/,%s", dir, filename);
464 
465 	victim = cachefiles_lookup_for_cull(cache, dir, filename);
466 	if (IS_ERR(victim))
467 		return PTR_ERR(victim);
468 
469 	/* check to see if someone is using this object */
470 	inode = d_inode(victim);
471 	inode_lock(inode);
472 	if (inode->i_flags & S_KERNEL_FILE) {
473 		ret = -EBUSY;
474 	} else {
475 		/* Stop the cache from picking it back up */
476 		inode->i_flags |= S_KERNEL_FILE;
477 		ret = 0;
478 	}
479 	inode_unlock(inode);
480 	if (ret < 0)
481 		goto error_unlock;
482 
483 	ret = cachefiles_bury_object(cache, NULL, dir, victim,
484 				     FSCACHE_OBJECT_WAS_CULLED);
485 	if (ret < 0)
486 		goto error;
487 
488 	dput(victim);
489 	_leave(" = 0");
490 	return 0;
491 
492 error_unlock:
493 	inode_unlock(d_inode(dir));
494 error:
495 	dput(victim);
496 	if (ret == -ENOENT)
497 		return -ESTALE; /* Probably got retired by the netfs */
498 
499 	if (ret != -ENOMEM) {
500 		pr_err("Internal error: %d\n", ret);
501 		ret = -EIO;
502 	}
503 
504 	_leave(" = %d", ret);
505 	return ret;
506 }
507 
508 /*
509  * Find out if an object is in use or not
510  * - called only by cache manager daemon
511  * - returns -EBUSY or 0 to indicate whether an object is in use or not
512  */
513 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
514 			    char *filename)
515 {
516 	struct dentry *victim;
517 	int ret = 0;
518 
519 	victim = cachefiles_lookup_for_cull(cache, dir, filename);
520 	if (IS_ERR(victim))
521 		return PTR_ERR(victim);
522 
523 	inode_unlock(d_inode(dir));
524 	dput(victim);
525 	return ret;
526 }
527