xref: /openbmc/linux/fs/cachefiles/namei.c (revision ea5dc046)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* CacheFiles path walking and related routines
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/namei.h>
10 #include "internal.h"
11 
12 /*
13  * Mark the backing file as being a cache file if it's not already in use.  The
14  * mark tells the culling request command that it's not allowed to cull the
15  * file or directory.  The caller must hold the inode lock.
16  */
17 static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18 					   struct dentry *dentry)
19 {
20 	struct inode *inode = d_backing_inode(dentry);
21 	bool can_use = false;
22 
23 	if (!(inode->i_flags & S_KERNEL_FILE)) {
24 		inode->i_flags |= S_KERNEL_FILE;
25 		trace_cachefiles_mark_active(object, inode);
26 		can_use = true;
27 	} else {
28 		trace_cachefiles_mark_failed(object, inode);
29 		pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
30 			  dentry, inode->i_ino);
31 	}
32 
33 	return can_use;
34 }
35 
36 static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
37 					 struct dentry *dentry)
38 {
39 	struct inode *inode = d_backing_inode(dentry);
40 	bool can_use;
41 
42 	inode_lock(inode);
43 	can_use = __cachefiles_mark_inode_in_use(object, dentry);
44 	inode_unlock(inode);
45 	return can_use;
46 }
47 
48 /*
49  * Unmark a backing inode.  The caller must hold the inode lock.
50  */
51 static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
52 					     struct dentry *dentry)
53 {
54 	struct inode *inode = d_backing_inode(dentry);
55 
56 	inode->i_flags &= ~S_KERNEL_FILE;
57 	trace_cachefiles_mark_inactive(object, inode);
58 }
59 
60 static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
61 					      struct dentry *dentry)
62 {
63 	struct inode *inode = d_backing_inode(dentry);
64 
65 	inode_lock(inode);
66 	__cachefiles_unmark_inode_in_use(object, dentry);
67 	inode_unlock(inode);
68 }
69 
70 /*
71  * Unmark a backing inode and tell cachefilesd that there's something that can
72  * be culled.
73  */
74 void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
75 				    struct file *file)
76 {
77 	struct cachefiles_cache *cache = object->volume->cache;
78 	struct inode *inode = file_inode(file);
79 
80 	if (inode) {
81 		cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
82 
83 		if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
84 			atomic_long_add(inode->i_blocks, &cache->b_released);
85 			if (atomic_inc_return(&cache->f_released))
86 				cachefiles_state_changed(cache);
87 		}
88 	}
89 }
90 
91 /*
92  * get a subdirectory
93  */
94 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
95 					struct dentry *dir,
96 					const char *dirname,
97 					bool *_is_new)
98 {
99 	struct dentry *subdir;
100 	struct path path;
101 	int ret;
102 
103 	_enter(",,%s", dirname);
104 
105 	/* search the current directory for the element name */
106 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
107 
108 retry:
109 	ret = cachefiles_inject_read_error();
110 	if (ret == 0)
111 		subdir = lookup_one_len(dirname, dir, strlen(dirname));
112 	else
113 		subdir = ERR_PTR(ret);
114 	trace_cachefiles_lookup(NULL, dir, subdir);
115 	if (IS_ERR(subdir)) {
116 		trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
117 					   PTR_ERR(subdir),
118 					   cachefiles_trace_lookup_error);
119 		if (PTR_ERR(subdir) == -ENOMEM)
120 			goto nomem_d_alloc;
121 		goto lookup_error;
122 	}
123 
124 	_debug("subdir -> %pd %s",
125 	       subdir, d_backing_inode(subdir) ? "positive" : "negative");
126 
127 	/* we need to create the subdir if it doesn't exist yet */
128 	if (d_is_negative(subdir)) {
129 		ret = cachefiles_has_space(cache, 1, 0,
130 					   cachefiles_has_space_for_create);
131 		if (ret < 0)
132 			goto mkdir_error;
133 
134 		_debug("attempt mkdir");
135 
136 		path.mnt = cache->mnt;
137 		path.dentry = dir;
138 		ret = security_path_mkdir(&path, subdir, 0700);
139 		if (ret < 0)
140 			goto mkdir_error;
141 		ret = cachefiles_inject_write_error();
142 		if (ret == 0)
143 			ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
144 		if (ret < 0) {
145 			trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
146 						   cachefiles_trace_mkdir_error);
147 			goto mkdir_error;
148 		}
149 		trace_cachefiles_mkdir(dir, subdir);
150 
151 		if (unlikely(d_unhashed(subdir))) {
152 			cachefiles_put_directory(subdir);
153 			goto retry;
154 		}
155 		ASSERT(d_backing_inode(subdir));
156 
157 		_debug("mkdir -> %pd{ino=%lu}",
158 		       subdir, d_backing_inode(subdir)->i_ino);
159 		if (_is_new)
160 			*_is_new = true;
161 	}
162 
163 	/* Tell rmdir() it's not allowed to delete the subdir */
164 	inode_lock(d_inode(subdir));
165 	inode_unlock(d_inode(dir));
166 
167 	if (!__cachefiles_mark_inode_in_use(NULL, subdir))
168 		goto mark_error;
169 
170 	inode_unlock(d_inode(subdir));
171 
172 	/* we need to make sure the subdir is a directory */
173 	ASSERT(d_backing_inode(subdir));
174 
175 	if (!d_can_lookup(subdir)) {
176 		pr_err("%s is not a directory\n", dirname);
177 		ret = -EIO;
178 		goto check_error;
179 	}
180 
181 	ret = -EPERM;
182 	if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
183 	    !d_backing_inode(subdir)->i_op->lookup ||
184 	    !d_backing_inode(subdir)->i_op->mkdir ||
185 	    !d_backing_inode(subdir)->i_op->rename ||
186 	    !d_backing_inode(subdir)->i_op->rmdir ||
187 	    !d_backing_inode(subdir)->i_op->unlink)
188 		goto check_error;
189 
190 	_leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
191 	return subdir;
192 
193 check_error:
194 	cachefiles_put_directory(subdir);
195 	_leave(" = %d [check]", ret);
196 	return ERR_PTR(ret);
197 
198 mark_error:
199 	inode_unlock(d_inode(subdir));
200 	dput(subdir);
201 	return ERR_PTR(-EBUSY);
202 
203 mkdir_error:
204 	inode_unlock(d_inode(dir));
205 	dput(subdir);
206 	pr_err("mkdir %s failed with error %d\n", dirname, ret);
207 	return ERR_PTR(ret);
208 
209 lookup_error:
210 	inode_unlock(d_inode(dir));
211 	ret = PTR_ERR(subdir);
212 	pr_err("Lookup %s failed with error %d\n", dirname, ret);
213 	return ERR_PTR(ret);
214 
215 nomem_d_alloc:
216 	inode_unlock(d_inode(dir));
217 	_leave(" = -ENOMEM");
218 	return ERR_PTR(-ENOMEM);
219 }
220 
221 /*
222  * Put a subdirectory.
223  */
224 void cachefiles_put_directory(struct dentry *dir)
225 {
226 	if (dir) {
227 		inode_lock(dir->d_inode);
228 		__cachefiles_unmark_inode_in_use(NULL, dir);
229 		inode_unlock(dir->d_inode);
230 		dput(dir);
231 	}
232 }
233 
234 /*
235  * Remove a regular file from the cache.
236  */
237 static int cachefiles_unlink(struct cachefiles_cache *cache,
238 			     struct cachefiles_object *object,
239 			     struct dentry *dir, struct dentry *dentry,
240 			     enum fscache_why_object_killed why)
241 {
242 	struct path path = {
243 		.mnt	= cache->mnt,
244 		.dentry	= dir,
245 	};
246 	int ret;
247 
248 	trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
249 	ret = security_path_unlink(&path, dentry);
250 	if (ret < 0) {
251 		cachefiles_io_error(cache, "Unlink security error");
252 		return ret;
253 	}
254 
255 	ret = cachefiles_inject_remove_error();
256 	if (ret == 0) {
257 		ret = vfs_unlink(&init_user_ns, d_backing_inode(dir), dentry, NULL);
258 		if (ret == -EIO)
259 			cachefiles_io_error(cache, "Unlink failed");
260 	}
261 	if (ret != 0)
262 		trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
263 					   cachefiles_trace_unlink_error);
264 	return ret;
265 }
266 
267 /*
268  * Delete an object representation from the cache
269  * - File backed objects are unlinked
270  * - Directory backed objects are stuffed into the graveyard for userspace to
271  *   delete
272  */
273 int cachefiles_bury_object(struct cachefiles_cache *cache,
274 			   struct cachefiles_object *object,
275 			   struct dentry *dir,
276 			   struct dentry *rep,
277 			   enum fscache_why_object_killed why)
278 {
279 	struct dentry *grave, *trap;
280 	struct path path, path_to_graveyard;
281 	char nbuffer[8 + 8 + 1];
282 	int ret;
283 
284 	_enter(",'%pd','%pd'", dir, rep);
285 
286 	if (rep->d_parent != dir) {
287 		inode_unlock(d_inode(dir));
288 		_leave(" = -ESTALE");
289 		return -ESTALE;
290 	}
291 
292 	/* non-directories can just be unlinked */
293 	if (!d_is_dir(rep)) {
294 		dget(rep); /* Stop the dentry being negated if it's only pinned
295 			    * by a file struct.
296 			    */
297 		ret = cachefiles_unlink(cache, object, dir, rep, why);
298 		dput(rep);
299 
300 		inode_unlock(d_inode(dir));
301 		_leave(" = %d", ret);
302 		return ret;
303 	}
304 
305 	/* directories have to be moved to the graveyard */
306 	_debug("move stale object to graveyard");
307 	inode_unlock(d_inode(dir));
308 
309 try_again:
310 	/* first step is to make up a grave dentry in the graveyard */
311 	sprintf(nbuffer, "%08x%08x",
312 		(uint32_t) ktime_get_real_seconds(),
313 		(uint32_t) atomic_inc_return(&cache->gravecounter));
314 
315 	/* do the multiway lock magic */
316 	trap = lock_rename(cache->graveyard, dir);
317 
318 	/* do some checks before getting the grave dentry */
319 	if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
320 		/* the entry was probably culled when we dropped the parent dir
321 		 * lock */
322 		unlock_rename(cache->graveyard, dir);
323 		_leave(" = 0 [culled?]");
324 		return 0;
325 	}
326 
327 	if (!d_can_lookup(cache->graveyard)) {
328 		unlock_rename(cache->graveyard, dir);
329 		cachefiles_io_error(cache, "Graveyard no longer a directory");
330 		return -EIO;
331 	}
332 
333 	if (trap == rep) {
334 		unlock_rename(cache->graveyard, dir);
335 		cachefiles_io_error(cache, "May not make directory loop");
336 		return -EIO;
337 	}
338 
339 	if (d_mountpoint(rep)) {
340 		unlock_rename(cache->graveyard, dir);
341 		cachefiles_io_error(cache, "Mountpoint in cache");
342 		return -EIO;
343 	}
344 
345 	grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
346 	if (IS_ERR(grave)) {
347 		unlock_rename(cache->graveyard, dir);
348 		trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
349 					   PTR_ERR(grave),
350 					   cachefiles_trace_lookup_error);
351 
352 		if (PTR_ERR(grave) == -ENOMEM) {
353 			_leave(" = -ENOMEM");
354 			return -ENOMEM;
355 		}
356 
357 		cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
358 		return -EIO;
359 	}
360 
361 	if (d_is_positive(grave)) {
362 		unlock_rename(cache->graveyard, dir);
363 		dput(grave);
364 		grave = NULL;
365 		cond_resched();
366 		goto try_again;
367 	}
368 
369 	if (d_mountpoint(grave)) {
370 		unlock_rename(cache->graveyard, dir);
371 		dput(grave);
372 		cachefiles_io_error(cache, "Mountpoint in graveyard");
373 		return -EIO;
374 	}
375 
376 	/* target should not be an ancestor of source */
377 	if (trap == grave) {
378 		unlock_rename(cache->graveyard, dir);
379 		dput(grave);
380 		cachefiles_io_error(cache, "May not make directory loop");
381 		return -EIO;
382 	}
383 
384 	/* attempt the rename */
385 	path.mnt = cache->mnt;
386 	path.dentry = dir;
387 	path_to_graveyard.mnt = cache->mnt;
388 	path_to_graveyard.dentry = cache->graveyard;
389 	ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
390 	if (ret < 0) {
391 		cachefiles_io_error(cache, "Rename security error %d", ret);
392 	} else {
393 		struct renamedata rd = {
394 			.old_mnt_userns	= &init_user_ns,
395 			.old_dir	= d_inode(dir),
396 			.old_dentry	= rep,
397 			.new_mnt_userns	= &init_user_ns,
398 			.new_dir	= d_inode(cache->graveyard),
399 			.new_dentry	= grave,
400 		};
401 		trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
402 		ret = cachefiles_inject_read_error();
403 		if (ret == 0)
404 			ret = vfs_rename(&rd);
405 		if (ret != 0)
406 			trace_cachefiles_vfs_error(object, d_inode(dir), ret,
407 						   cachefiles_trace_rename_error);
408 		if (ret != 0 && ret != -ENOMEM)
409 			cachefiles_io_error(cache,
410 					    "Rename failed with error %d", ret);
411 	}
412 
413 	__cachefiles_unmark_inode_in_use(object, rep);
414 	unlock_rename(cache->graveyard, dir);
415 	dput(grave);
416 	_leave(" = 0");
417 	return 0;
418 }
419 
420 /*
421  * Delete a cache file.
422  */
423 int cachefiles_delete_object(struct cachefiles_object *object,
424 			     enum fscache_why_object_killed why)
425 {
426 	struct cachefiles_volume *volume = object->volume;
427 	struct dentry *dentry = object->file->f_path.dentry;
428 	struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
429 	int ret;
430 
431 	_enter(",OBJ%x{%pD}", object->debug_id, object->file);
432 
433 	/* Stop the dentry being negated if it's only pinned by a file struct. */
434 	dget(dentry);
435 
436 	inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
437 	ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
438 	inode_unlock(d_backing_inode(fan));
439 	dput(dentry);
440 	return ret;
441 }
442 
443 /*
444  * Create a temporary file and leave it unattached and un-xattr'd until the
445  * time comes to discard the object from memory.
446  */
447 struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
448 {
449 	struct cachefiles_volume *volume = object->volume;
450 	struct cachefiles_cache *cache = volume->cache;
451 	const struct cred *saved_cred;
452 	struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
453 	struct file *file;
454 	struct path path;
455 	uint64_t ni_size = object->cookie->object_size;
456 	long ret;
457 
458 	ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
459 
460 	cachefiles_begin_secure(cache, &saved_cred);
461 
462 	path.mnt = cache->mnt;
463 	ret = cachefiles_inject_write_error();
464 	if (ret == 0)
465 		path.dentry = vfs_tmpfile(&init_user_ns, fan, S_IFREG, O_RDWR);
466 	else
467 		path.dentry = ERR_PTR(ret);
468 	if (IS_ERR(path.dentry)) {
469 		trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(path.dentry),
470 					   cachefiles_trace_tmpfile_error);
471 		if (PTR_ERR(path.dentry) == -EIO)
472 			cachefiles_io_error_obj(object, "Failed to create tmpfile");
473 		file = ERR_CAST(path.dentry);
474 		goto out;
475 	}
476 
477 	trace_cachefiles_tmpfile(object, d_backing_inode(path.dentry));
478 
479 	if (!cachefiles_mark_inode_in_use(object, path.dentry)) {
480 		file = ERR_PTR(-EBUSY);
481 		goto out_dput;
482 	}
483 
484 	if (ni_size > 0) {
485 		trace_cachefiles_trunc(object, d_backing_inode(path.dentry), 0, ni_size,
486 				       cachefiles_trunc_expand_tmpfile);
487 		ret = cachefiles_inject_write_error();
488 		if (ret == 0)
489 			ret = vfs_truncate(&path, ni_size);
490 		if (ret < 0) {
491 			trace_cachefiles_vfs_error(
492 				object, d_backing_inode(path.dentry), ret,
493 				cachefiles_trace_trunc_error);
494 			file = ERR_PTR(ret);
495 			goto out_unuse;
496 		}
497 	}
498 
499 	file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
500 				   d_backing_inode(path.dentry), cache->cache_cred);
501 	if (IS_ERR(file)) {
502 		trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
503 					   PTR_ERR(file),
504 					   cachefiles_trace_open_error);
505 		goto out_unuse;
506 	}
507 	if (unlikely(!file->f_op->read_iter) ||
508 	    unlikely(!file->f_op->write_iter)) {
509 		fput(file);
510 		pr_notice("Cache does not support read_iter and write_iter\n");
511 		file = ERR_PTR(-EINVAL);
512 		goto out_unuse;
513 	}
514 
515 	goto out_dput;
516 
517 out_unuse:
518 	cachefiles_do_unmark_inode_in_use(object, path.dentry);
519 out_dput:
520 	dput(path.dentry);
521 out:
522 	cachefiles_end_secure(cache, saved_cred);
523 	return file;
524 }
525 
526 /*
527  * Create a new file.
528  */
529 static bool cachefiles_create_file(struct cachefiles_object *object)
530 {
531 	struct file *file;
532 	int ret;
533 
534 	ret = cachefiles_has_space(object->volume->cache, 1, 0,
535 				   cachefiles_has_space_for_create);
536 	if (ret < 0)
537 		return false;
538 
539 	file = cachefiles_create_tmpfile(object);
540 	if (IS_ERR(file))
541 		return false;
542 
543 	set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
544 	set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
545 	_debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
546 	object->file = file;
547 	return true;
548 }
549 
550 /*
551  * Open an existing file, checking its attributes and replacing it if it is
552  * stale.
553  */
554 static bool cachefiles_open_file(struct cachefiles_object *object,
555 				 struct dentry *dentry)
556 {
557 	struct cachefiles_cache *cache = object->volume->cache;
558 	struct file *file;
559 	struct path path;
560 	int ret;
561 
562 	_enter("%pd", dentry);
563 
564 	if (!cachefiles_mark_inode_in_use(object, dentry))
565 		return false;
566 
567 	/* We need to open a file interface onto a data file now as we can't do
568 	 * it on demand because writeback called from do_exit() sees
569 	 * current->fs == NULL - which breaks d_path() called from ext4 open.
570 	 */
571 	path.mnt = cache->mnt;
572 	path.dentry = dentry;
573 	file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
574 				   d_backing_inode(dentry), cache->cache_cred);
575 	if (IS_ERR(file)) {
576 		trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
577 					   PTR_ERR(file),
578 					   cachefiles_trace_open_error);
579 		goto error;
580 	}
581 
582 	if (unlikely(!file->f_op->read_iter) ||
583 	    unlikely(!file->f_op->write_iter)) {
584 		pr_notice("Cache does not support read_iter and write_iter\n");
585 		goto error_fput;
586 	}
587 	_debug("file -> %pd positive", dentry);
588 
589 	ret = cachefiles_check_auxdata(object, file);
590 	if (ret < 0)
591 		goto check_failed;
592 
593 	object->file = file;
594 
595 	/* Always update the atime on an object we've just looked up (this is
596 	 * used to keep track of culling, and atimes are only updated by read,
597 	 * write and readdir but not lookup or open).
598 	 */
599 	touch_atime(&file->f_path);
600 	dput(dentry);
601 	return true;
602 
603 check_failed:
604 	fscache_cookie_lookup_negative(object->cookie);
605 	cachefiles_unmark_inode_in_use(object, file);
606 	fput(file);
607 	dput(dentry);
608 	if (ret == -ESTALE)
609 		return cachefiles_create_file(object);
610 	return false;
611 
612 error_fput:
613 	fput(file);
614 error:
615 	cachefiles_do_unmark_inode_in_use(object, dentry);
616 	dput(dentry);
617 	return false;
618 }
619 
620 /*
621  * walk from the parent object to the child object through the backing
622  * filesystem, creating directories as we go
623  */
624 bool cachefiles_look_up_object(struct cachefiles_object *object)
625 {
626 	struct cachefiles_volume *volume = object->volume;
627 	struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
628 	int ret;
629 
630 	_enter("OBJ%x,%s,", object->debug_id, object->d_name);
631 
632 	/* Look up path "cache/vol/fanout/file". */
633 	ret = cachefiles_inject_read_error();
634 	if (ret == 0)
635 		dentry = lookup_positive_unlocked(object->d_name, fan,
636 						  object->d_name_len);
637 	else
638 		dentry = ERR_PTR(ret);
639 	trace_cachefiles_lookup(object, fan, dentry);
640 	if (IS_ERR(dentry)) {
641 		if (dentry == ERR_PTR(-ENOENT))
642 			goto new_file;
643 		if (dentry == ERR_PTR(-EIO))
644 			cachefiles_io_error_obj(object, "Lookup failed");
645 		return false;
646 	}
647 
648 	if (!d_is_reg(dentry)) {
649 		pr_err("%pd is not a file\n", dentry);
650 		inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
651 		ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
652 					     FSCACHE_OBJECT_IS_WEIRD);
653 		dput(dentry);
654 		if (ret < 0)
655 			return false;
656 		goto new_file;
657 	}
658 
659 	if (!cachefiles_open_file(object, dentry))
660 		return false;
661 
662 	_leave(" = t [%lu]", file_inode(object->file)->i_ino);
663 	return true;
664 
665 new_file:
666 	fscache_cookie_lookup_negative(object->cookie);
667 	return cachefiles_create_file(object);
668 }
669 
670 /*
671  * Attempt to link a temporary file into its rightful place in the cache.
672  */
673 bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
674 			       struct cachefiles_object *object)
675 {
676 	struct cachefiles_volume *volume = object->volume;
677 	struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
678 	bool success = false;
679 	int ret;
680 
681 	_enter(",%pD", object->file);
682 
683 	inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
684 	ret = cachefiles_inject_read_error();
685 	if (ret == 0)
686 		dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
687 	else
688 		dentry = ERR_PTR(ret);
689 	if (IS_ERR(dentry)) {
690 		trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
691 					   cachefiles_trace_lookup_error);
692 		_debug("lookup fail %ld", PTR_ERR(dentry));
693 		goto out_unlock;
694 	}
695 
696 	if (!d_is_negative(dentry)) {
697 		if (d_backing_inode(dentry) == file_inode(object->file)) {
698 			success = true;
699 			goto out_dput;
700 		}
701 
702 		ret = cachefiles_unlink(volume->cache, object, fan, dentry,
703 					FSCACHE_OBJECT_IS_STALE);
704 		if (ret < 0)
705 			goto out_dput;
706 
707 		dput(dentry);
708 		ret = cachefiles_inject_read_error();
709 		if (ret == 0)
710 			dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
711 		else
712 			dentry = ERR_PTR(ret);
713 		if (IS_ERR(dentry)) {
714 			trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
715 						   cachefiles_trace_lookup_error);
716 			_debug("lookup fail %ld", PTR_ERR(dentry));
717 			goto out_unlock;
718 		}
719 	}
720 
721 	ret = cachefiles_inject_read_error();
722 	if (ret == 0)
723 		ret = vfs_link(object->file->f_path.dentry, &init_user_ns,
724 			       d_inode(fan), dentry, NULL);
725 	if (ret < 0) {
726 		trace_cachefiles_vfs_error(object, d_inode(fan), ret,
727 					   cachefiles_trace_link_error);
728 		_debug("link fail %d", ret);
729 	} else {
730 		trace_cachefiles_link(object, file_inode(object->file));
731 		spin_lock(&object->lock);
732 		/* TODO: Do we want to switch the file pointer to the new dentry? */
733 		clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
734 		spin_unlock(&object->lock);
735 		success = true;
736 	}
737 
738 out_dput:
739 	dput(dentry);
740 out_unlock:
741 	inode_unlock(d_inode(fan));
742 	_leave(" = %u", success);
743 	return success;
744 }
745 
746 /*
747  * Look up an inode to be checked or culled.  Return -EBUSY if the inode is
748  * marked in use.
749  */
750 static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
751 						 struct dentry *dir,
752 						 char *filename)
753 {
754 	struct dentry *victim;
755 	int ret = -ENOENT;
756 
757 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
758 
759 	victim = lookup_one_len(filename, dir, strlen(filename));
760 	if (IS_ERR(victim))
761 		goto lookup_error;
762 	if (d_is_negative(victim))
763 		goto lookup_put;
764 	if (d_inode(victim)->i_flags & S_KERNEL_FILE)
765 		goto lookup_busy;
766 	return victim;
767 
768 lookup_busy:
769 	ret = -EBUSY;
770 lookup_put:
771 	inode_unlock(d_inode(dir));
772 	dput(victim);
773 	return ERR_PTR(ret);
774 
775 lookup_error:
776 	inode_unlock(d_inode(dir));
777 	ret = PTR_ERR(victim);
778 	if (ret == -ENOENT)
779 		return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
780 
781 	if (ret == -EIO) {
782 		cachefiles_io_error(cache, "Lookup failed");
783 	} else if (ret != -ENOMEM) {
784 		pr_err("Internal error: %d\n", ret);
785 		ret = -EIO;
786 	}
787 
788 	return ERR_PTR(ret);
789 }
790 
791 /*
792  * Cull an object if it's not in use
793  * - called only by cache manager daemon
794  */
795 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
796 		    char *filename)
797 {
798 	struct dentry *victim;
799 	struct inode *inode;
800 	int ret;
801 
802 	_enter(",%pd/,%s", dir, filename);
803 
804 	victim = cachefiles_lookup_for_cull(cache, dir, filename);
805 	if (IS_ERR(victim))
806 		return PTR_ERR(victim);
807 
808 	/* check to see if someone is using this object */
809 	inode = d_inode(victim);
810 	inode_lock(inode);
811 	if (inode->i_flags & S_KERNEL_FILE) {
812 		ret = -EBUSY;
813 	} else {
814 		/* Stop the cache from picking it back up */
815 		inode->i_flags |= S_KERNEL_FILE;
816 		ret = 0;
817 	}
818 	inode_unlock(inode);
819 	if (ret < 0)
820 		goto error_unlock;
821 
822 	ret = cachefiles_bury_object(cache, NULL, dir, victim,
823 				     FSCACHE_OBJECT_WAS_CULLED);
824 	if (ret < 0)
825 		goto error;
826 
827 	fscache_count_culled();
828 	dput(victim);
829 	_leave(" = 0");
830 	return 0;
831 
832 error_unlock:
833 	inode_unlock(d_inode(dir));
834 error:
835 	dput(victim);
836 	if (ret == -ENOENT)
837 		return -ESTALE; /* Probably got retired by the netfs */
838 
839 	if (ret != -ENOMEM) {
840 		pr_err("Internal error: %d\n", ret);
841 		ret = -EIO;
842 	}
843 
844 	_leave(" = %d", ret);
845 	return ret;
846 }
847 
848 /*
849  * Find out if an object is in use or not
850  * - called only by cache manager daemon
851  * - returns -EBUSY or 0 to indicate whether an object is in use or not
852  */
853 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
854 			    char *filename)
855 {
856 	struct dentry *victim;
857 	int ret = 0;
858 
859 	victim = cachefiles_lookup_for_cull(cache, dir, filename);
860 	if (IS_ERR(victim))
861 		return PTR_ERR(victim);
862 
863 	inode_unlock(d_inode(dir));
864 	dput(victim);
865 	return ret;
866 }
867