xref: /openbmc/linux/fs/cachefiles/daemon.c (revision 6bb6bd3d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Daemon interface
3  *
4  * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/sched.h>
11 #include <linux/completion.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/file.h>
15 #include <linux/namei.h>
16 #include <linux/poll.h>
17 #include <linux/mount.h>
18 #include <linux/statfs.h>
19 #include <linux/ctype.h>
20 #include <linux/string.h>
21 #include <linux/fs_struct.h>
22 #include "internal.h"
23 
24 static int cachefiles_daemon_open(struct inode *, struct file *);
25 static int cachefiles_daemon_release(struct inode *, struct file *);
26 static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
27 				      loff_t *);
28 static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
29 				       size_t, loff_t *);
30 static __poll_t cachefiles_daemon_poll(struct file *,
31 					   struct poll_table_struct *);
32 static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
33 static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
34 static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
35 static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
36 static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
37 static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
38 static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
39 static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
40 static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
41 static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
42 static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
43 static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
44 static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
45 static void cachefiles_daemon_unbind(struct cachefiles_cache *);
46 
47 static unsigned long cachefiles_open;
48 
49 const struct file_operations cachefiles_daemon_fops = {
50 	.owner		= THIS_MODULE,
51 	.open		= cachefiles_daemon_open,
52 	.release	= cachefiles_daemon_release,
53 	.read		= cachefiles_daemon_read,
54 	.write		= cachefiles_daemon_write,
55 	.poll		= cachefiles_daemon_poll,
56 	.llseek		= noop_llseek,
57 };
58 
59 struct cachefiles_daemon_cmd {
60 	char name[8];
61 	int (*handler)(struct cachefiles_cache *cache, char *args);
62 };
63 
64 static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
65 	{ "bind",	cachefiles_daemon_bind		},
66 	{ "brun",	cachefiles_daemon_brun		},
67 	{ "bcull",	cachefiles_daemon_bcull		},
68 	{ "bstop",	cachefiles_daemon_bstop		},
69 	{ "cull",	cachefiles_daemon_cull		},
70 	{ "debug",	cachefiles_daemon_debug		},
71 	{ "dir",	cachefiles_daemon_dir		},
72 	{ "frun",	cachefiles_daemon_frun		},
73 	{ "fcull",	cachefiles_daemon_fcull		},
74 	{ "fstop",	cachefiles_daemon_fstop		},
75 	{ "inuse",	cachefiles_daemon_inuse		},
76 	{ "secctx",	cachefiles_daemon_secctx	},
77 	{ "tag",	cachefiles_daemon_tag		},
78 #ifdef CONFIG_CACHEFILES_ONDEMAND
79 	{ "copen",	cachefiles_ondemand_copen	},
80 	{ "restore",	cachefiles_ondemand_restore	},
81 #endif
82 	{ "",		NULL				}
83 };
84 
85 
86 /*
87  * Prepare a cache for caching.
88  */
cachefiles_daemon_open(struct inode * inode,struct file * file)89 static int cachefiles_daemon_open(struct inode *inode, struct file *file)
90 {
91 	struct cachefiles_cache *cache;
92 
93 	_enter("");
94 
95 	/* only the superuser may do this */
96 	if (!capable(CAP_SYS_ADMIN))
97 		return -EPERM;
98 
99 	/* the cachefiles device may only be open once at a time */
100 	if (xchg(&cachefiles_open, 1) == 1)
101 		return -EBUSY;
102 
103 	/* allocate a cache record */
104 	cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
105 	if (!cache) {
106 		cachefiles_open = 0;
107 		return -ENOMEM;
108 	}
109 
110 	mutex_init(&cache->daemon_mutex);
111 	init_waitqueue_head(&cache->daemon_pollwq);
112 	INIT_LIST_HEAD(&cache->volumes);
113 	INIT_LIST_HEAD(&cache->object_list);
114 	spin_lock_init(&cache->object_list_lock);
115 	refcount_set(&cache->unbind_pincount, 1);
116 	xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
117 	xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
118 
119 	/* set default caching limits
120 	 * - limit at 1% free space and/or free files
121 	 * - cull below 5% free space and/or free files
122 	 * - cease culling above 7% free space and/or free files
123 	 */
124 	cache->frun_percent = 7;
125 	cache->fcull_percent = 5;
126 	cache->fstop_percent = 1;
127 	cache->brun_percent = 7;
128 	cache->bcull_percent = 5;
129 	cache->bstop_percent = 1;
130 
131 	file->private_data = cache;
132 	cache->cachefilesd = file;
133 	return 0;
134 }
135 
cachefiles_flush_reqs(struct cachefiles_cache * cache)136 void cachefiles_flush_reqs(struct cachefiles_cache *cache)
137 {
138 	struct xarray *xa = &cache->reqs;
139 	struct cachefiles_req *req;
140 	unsigned long index;
141 
142 	/*
143 	 * Make sure the following two operations won't be reordered.
144 	 *   1) set CACHEFILES_DEAD bit
145 	 *   2) flush requests in the xarray
146 	 * Otherwise the request may be enqueued after xarray has been
147 	 * flushed, leaving the orphan request never being completed.
148 	 *
149 	 * CPU 1			CPU 2
150 	 * =====			=====
151 	 * flush requests in the xarray
152 	 *				test CACHEFILES_DEAD bit
153 	 *				enqueue the request
154 	 * set CACHEFILES_DEAD bit
155 	 */
156 	smp_mb();
157 
158 	xa_lock(xa);
159 	xa_for_each(xa, index, req) {
160 		req->error = -EIO;
161 		complete(&req->done);
162 		__xa_erase(xa, index);
163 	}
164 	xa_unlock(xa);
165 
166 	xa_destroy(&cache->reqs);
167 	xa_destroy(&cache->ondemand_ids);
168 }
169 
cachefiles_put_unbind_pincount(struct cachefiles_cache * cache)170 void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
171 {
172 	if (refcount_dec_and_test(&cache->unbind_pincount)) {
173 		cachefiles_daemon_unbind(cache);
174 		cachefiles_open = 0;
175 		kfree(cache);
176 	}
177 }
178 
cachefiles_get_unbind_pincount(struct cachefiles_cache * cache)179 void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
180 {
181 	refcount_inc(&cache->unbind_pincount);
182 }
183 
184 /*
185  * Release a cache.
186  */
cachefiles_daemon_release(struct inode * inode,struct file * file)187 static int cachefiles_daemon_release(struct inode *inode, struct file *file)
188 {
189 	struct cachefiles_cache *cache = file->private_data;
190 
191 	_enter("");
192 
193 	ASSERT(cache);
194 
195 	set_bit(CACHEFILES_DEAD, &cache->flags);
196 
197 	if (cachefiles_in_ondemand_mode(cache))
198 		cachefiles_flush_reqs(cache);
199 
200 	/* clean up the control file interface */
201 	cache->cachefilesd = NULL;
202 	file->private_data = NULL;
203 
204 	cachefiles_put_unbind_pincount(cache);
205 
206 	_leave("");
207 	return 0;
208 }
209 
cachefiles_do_daemon_read(struct cachefiles_cache * cache,char __user * _buffer,size_t buflen)210 static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
211 					 char __user *_buffer, size_t buflen)
212 {
213 	unsigned long long b_released;
214 	unsigned f_released;
215 	char buffer[256];
216 	int n;
217 
218 	/* check how much space the cache has */
219 	cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
220 
221 	/* summarise */
222 	f_released = atomic_xchg(&cache->f_released, 0);
223 	b_released = atomic_long_xchg(&cache->b_released, 0);
224 	clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
225 
226 	n = snprintf(buffer, sizeof(buffer),
227 		     "cull=%c"
228 		     " frun=%llx"
229 		     " fcull=%llx"
230 		     " fstop=%llx"
231 		     " brun=%llx"
232 		     " bcull=%llx"
233 		     " bstop=%llx"
234 		     " freleased=%x"
235 		     " breleased=%llx",
236 		     test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
237 		     (unsigned long long) cache->frun,
238 		     (unsigned long long) cache->fcull,
239 		     (unsigned long long) cache->fstop,
240 		     (unsigned long long) cache->brun,
241 		     (unsigned long long) cache->bcull,
242 		     (unsigned long long) cache->bstop,
243 		     f_released,
244 		     b_released);
245 
246 	if (n > buflen)
247 		return -EMSGSIZE;
248 
249 	if (copy_to_user(_buffer, buffer, n) != 0)
250 		return -EFAULT;
251 
252 	return n;
253 }
254 
255 /*
256  * Read the cache state.
257  */
cachefiles_daemon_read(struct file * file,char __user * _buffer,size_t buflen,loff_t * pos)258 static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
259 				      size_t buflen, loff_t *pos)
260 {
261 	struct cachefiles_cache *cache = file->private_data;
262 
263 	//_enter(",,%zu,", buflen);
264 
265 	if (!test_bit(CACHEFILES_READY, &cache->flags))
266 		return 0;
267 
268 	if (cachefiles_in_ondemand_mode(cache))
269 		return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
270 	else
271 		return cachefiles_do_daemon_read(cache, _buffer, buflen);
272 }
273 
274 /*
275  * Take a command from cachefilesd, parse it and act on it.
276  */
cachefiles_daemon_write(struct file * file,const char __user * _data,size_t datalen,loff_t * pos)277 static ssize_t cachefiles_daemon_write(struct file *file,
278 				       const char __user *_data,
279 				       size_t datalen,
280 				       loff_t *pos)
281 {
282 	const struct cachefiles_daemon_cmd *cmd;
283 	struct cachefiles_cache *cache = file->private_data;
284 	ssize_t ret;
285 	char *data, *args, *cp;
286 
287 	//_enter(",,%zu,", datalen);
288 
289 	ASSERT(cache);
290 
291 	if (test_bit(CACHEFILES_DEAD, &cache->flags))
292 		return -EIO;
293 
294 	if (datalen > PAGE_SIZE - 1)
295 		return -EOPNOTSUPP;
296 
297 	/* drag the command string into the kernel so we can parse it */
298 	data = memdup_user_nul(_data, datalen);
299 	if (IS_ERR(data))
300 		return PTR_ERR(data);
301 
302 	ret = -EINVAL;
303 	if (memchr(data, '\0', datalen))
304 		goto error;
305 
306 	/* strip any newline */
307 	cp = memchr(data, '\n', datalen);
308 	if (cp) {
309 		if (cp == data)
310 			goto error;
311 
312 		*cp = '\0';
313 	}
314 
315 	/* parse the command */
316 	ret = -EOPNOTSUPP;
317 
318 	for (args = data; *args; args++)
319 		if (isspace(*args))
320 			break;
321 	if (*args) {
322 		if (args == data)
323 			goto error;
324 		*args = '\0';
325 		args = skip_spaces(++args);
326 	}
327 
328 	/* run the appropriate command handler */
329 	for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
330 		if (strcmp(cmd->name, data) == 0)
331 			goto found_command;
332 
333 error:
334 	kfree(data);
335 	//_leave(" = %zd", ret);
336 	return ret;
337 
338 found_command:
339 	mutex_lock(&cache->daemon_mutex);
340 
341 	ret = -EIO;
342 	if (!test_bit(CACHEFILES_DEAD, &cache->flags))
343 		ret = cmd->handler(cache, args);
344 
345 	mutex_unlock(&cache->daemon_mutex);
346 
347 	if (ret == 0)
348 		ret = datalen;
349 	goto error;
350 }
351 
352 /*
353  * Poll for culling state
354  * - use EPOLLOUT to indicate culling state
355  */
cachefiles_daemon_poll(struct file * file,struct poll_table_struct * poll)356 static __poll_t cachefiles_daemon_poll(struct file *file,
357 					   struct poll_table_struct *poll)
358 {
359 	struct cachefiles_cache *cache = file->private_data;
360 	XA_STATE(xas, &cache->reqs, 0);
361 	struct cachefiles_req *req;
362 	__poll_t mask;
363 
364 	poll_wait(file, &cache->daemon_pollwq, poll);
365 	mask = 0;
366 
367 	if (cachefiles_in_ondemand_mode(cache)) {
368 		if (!xa_empty(&cache->reqs)) {
369 			xas_lock(&xas);
370 			xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
371 				if (!cachefiles_ondemand_is_reopening_read(req)) {
372 					mask |= EPOLLIN;
373 					break;
374 				}
375 			}
376 			xas_unlock(&xas);
377 		}
378 	} else {
379 		if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
380 			mask |= EPOLLIN;
381 	}
382 
383 	if (test_bit(CACHEFILES_CULLING, &cache->flags))
384 		mask |= EPOLLOUT;
385 
386 	return mask;
387 }
388 
389 /*
390  * Give a range error for cache space constraints
391  * - can be tail-called
392  */
cachefiles_daemon_range_error(struct cachefiles_cache * cache,char * args)393 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
394 					 char *args)
395 {
396 	pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
397 
398 	return -EINVAL;
399 }
400 
401 /*
402  * Set the percentage of files at which to stop culling
403  * - command: "frun <N>%"
404  */
cachefiles_daemon_frun(struct cachefiles_cache * cache,char * args)405 static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
406 {
407 	unsigned long frun;
408 
409 	_enter(",%s", args);
410 
411 	if (!*args)
412 		return -EINVAL;
413 
414 	frun = simple_strtoul(args, &args, 10);
415 	if (args[0] != '%' || args[1] != '\0')
416 		return -EINVAL;
417 
418 	if (frun <= cache->fcull_percent || frun >= 100)
419 		return cachefiles_daemon_range_error(cache, args);
420 
421 	cache->frun_percent = frun;
422 	return 0;
423 }
424 
425 /*
426  * Set the percentage of files at which to start culling
427  * - command: "fcull <N>%"
428  */
cachefiles_daemon_fcull(struct cachefiles_cache * cache,char * args)429 static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
430 {
431 	unsigned long fcull;
432 
433 	_enter(",%s", args);
434 
435 	if (!*args)
436 		return -EINVAL;
437 
438 	fcull = simple_strtoul(args, &args, 10);
439 	if (args[0] != '%' || args[1] != '\0')
440 		return -EINVAL;
441 
442 	if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
443 		return cachefiles_daemon_range_error(cache, args);
444 
445 	cache->fcull_percent = fcull;
446 	return 0;
447 }
448 
449 /*
450  * Set the percentage of files at which to stop allocating
451  * - command: "fstop <N>%"
452  */
cachefiles_daemon_fstop(struct cachefiles_cache * cache,char * args)453 static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
454 {
455 	unsigned long fstop;
456 
457 	_enter(",%s", args);
458 
459 	if (!*args)
460 		return -EINVAL;
461 
462 	fstop = simple_strtoul(args, &args, 10);
463 	if (args[0] != '%' || args[1] != '\0')
464 		return -EINVAL;
465 
466 	if (fstop >= cache->fcull_percent)
467 		return cachefiles_daemon_range_error(cache, args);
468 
469 	cache->fstop_percent = fstop;
470 	return 0;
471 }
472 
473 /*
474  * Set the percentage of blocks at which to stop culling
475  * - command: "brun <N>%"
476  */
cachefiles_daemon_brun(struct cachefiles_cache * cache,char * args)477 static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
478 {
479 	unsigned long brun;
480 
481 	_enter(",%s", args);
482 
483 	if (!*args)
484 		return -EINVAL;
485 
486 	brun = simple_strtoul(args, &args, 10);
487 	if (args[0] != '%' || args[1] != '\0')
488 		return -EINVAL;
489 
490 	if (brun <= cache->bcull_percent || brun >= 100)
491 		return cachefiles_daemon_range_error(cache, args);
492 
493 	cache->brun_percent = brun;
494 	return 0;
495 }
496 
497 /*
498  * Set the percentage of blocks at which to start culling
499  * - command: "bcull <N>%"
500  */
cachefiles_daemon_bcull(struct cachefiles_cache * cache,char * args)501 static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
502 {
503 	unsigned long bcull;
504 
505 	_enter(",%s", args);
506 
507 	if (!*args)
508 		return -EINVAL;
509 
510 	bcull = simple_strtoul(args, &args, 10);
511 	if (args[0] != '%' || args[1] != '\0')
512 		return -EINVAL;
513 
514 	if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
515 		return cachefiles_daemon_range_error(cache, args);
516 
517 	cache->bcull_percent = bcull;
518 	return 0;
519 }
520 
521 /*
522  * Set the percentage of blocks at which to stop allocating
523  * - command: "bstop <N>%"
524  */
cachefiles_daemon_bstop(struct cachefiles_cache * cache,char * args)525 static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
526 {
527 	unsigned long bstop;
528 
529 	_enter(",%s", args);
530 
531 	if (!*args)
532 		return -EINVAL;
533 
534 	bstop = simple_strtoul(args, &args, 10);
535 	if (args[0] != '%' || args[1] != '\0')
536 		return -EINVAL;
537 
538 	if (bstop >= cache->bcull_percent)
539 		return cachefiles_daemon_range_error(cache, args);
540 
541 	cache->bstop_percent = bstop;
542 	return 0;
543 }
544 
545 /*
546  * Set the cache directory
547  * - command: "dir <name>"
548  */
cachefiles_daemon_dir(struct cachefiles_cache * cache,char * args)549 static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
550 {
551 	char *dir;
552 
553 	_enter(",%s", args);
554 
555 	if (!*args) {
556 		pr_err("Empty directory specified\n");
557 		return -EINVAL;
558 	}
559 
560 	if (cache->rootdirname) {
561 		pr_err("Second cache directory specified\n");
562 		return -EEXIST;
563 	}
564 
565 	dir = kstrdup(args, GFP_KERNEL);
566 	if (!dir)
567 		return -ENOMEM;
568 
569 	cache->rootdirname = dir;
570 	return 0;
571 }
572 
573 /*
574  * Set the cache security context
575  * - command: "secctx <ctx>"
576  */
cachefiles_daemon_secctx(struct cachefiles_cache * cache,char * args)577 static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
578 {
579 	char *secctx;
580 
581 	_enter(",%s", args);
582 
583 	if (!*args) {
584 		pr_err("Empty security context specified\n");
585 		return -EINVAL;
586 	}
587 
588 	if (cache->secctx) {
589 		pr_err("Second security context specified\n");
590 		return -EINVAL;
591 	}
592 
593 	secctx = kstrdup(args, GFP_KERNEL);
594 	if (!secctx)
595 		return -ENOMEM;
596 
597 	cache->secctx = secctx;
598 	return 0;
599 }
600 
601 /*
602  * Set the cache tag
603  * - command: "tag <name>"
604  */
cachefiles_daemon_tag(struct cachefiles_cache * cache,char * args)605 static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
606 {
607 	char *tag;
608 
609 	_enter(",%s", args);
610 
611 	if (!*args) {
612 		pr_err("Empty tag specified\n");
613 		return -EINVAL;
614 	}
615 
616 	if (cache->tag)
617 		return -EEXIST;
618 
619 	tag = kstrdup(args, GFP_KERNEL);
620 	if (!tag)
621 		return -ENOMEM;
622 
623 	cache->tag = tag;
624 	return 0;
625 }
626 
627 /*
628  * Request a node in the cache be culled from the current working directory
629  * - command: "cull <name>"
630  */
cachefiles_daemon_cull(struct cachefiles_cache * cache,char * args)631 static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
632 {
633 	struct path path;
634 	const struct cred *saved_cred;
635 	int ret;
636 
637 	_enter(",%s", args);
638 
639 	if (strchr(args, '/'))
640 		goto inval;
641 
642 	if (!test_bit(CACHEFILES_READY, &cache->flags)) {
643 		pr_err("cull applied to unready cache\n");
644 		return -EIO;
645 	}
646 
647 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
648 		pr_err("cull applied to dead cache\n");
649 		return -EIO;
650 	}
651 
652 	get_fs_pwd(current->fs, &path);
653 
654 	if (!d_can_lookup(path.dentry))
655 		goto notdir;
656 
657 	cachefiles_begin_secure(cache, &saved_cred);
658 	ret = cachefiles_cull(cache, path.dentry, args);
659 	cachefiles_end_secure(cache, saved_cred);
660 
661 	path_put(&path);
662 	_leave(" = %d", ret);
663 	return ret;
664 
665 notdir:
666 	path_put(&path);
667 	pr_err("cull command requires dirfd to be a directory\n");
668 	return -ENOTDIR;
669 
670 inval:
671 	pr_err("cull command requires dirfd and filename\n");
672 	return -EINVAL;
673 }
674 
675 /*
676  * Set debugging mode
677  * - command: "debug <mask>"
678  */
cachefiles_daemon_debug(struct cachefiles_cache * cache,char * args)679 static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
680 {
681 	unsigned long mask;
682 
683 	_enter(",%s", args);
684 
685 	mask = simple_strtoul(args, &args, 0);
686 	if (args[0] != '\0')
687 		goto inval;
688 
689 	cachefiles_debug = mask;
690 	_leave(" = 0");
691 	return 0;
692 
693 inval:
694 	pr_err("debug command requires mask\n");
695 	return -EINVAL;
696 }
697 
698 /*
699  * Find out whether an object in the current working directory is in use or not
700  * - command: "inuse <name>"
701  */
cachefiles_daemon_inuse(struct cachefiles_cache * cache,char * args)702 static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
703 {
704 	struct path path;
705 	const struct cred *saved_cred;
706 	int ret;
707 
708 	//_enter(",%s", args);
709 
710 	if (strchr(args, '/'))
711 		goto inval;
712 
713 	if (!test_bit(CACHEFILES_READY, &cache->flags)) {
714 		pr_err("inuse applied to unready cache\n");
715 		return -EIO;
716 	}
717 
718 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
719 		pr_err("inuse applied to dead cache\n");
720 		return -EIO;
721 	}
722 
723 	get_fs_pwd(current->fs, &path);
724 
725 	if (!d_can_lookup(path.dentry))
726 		goto notdir;
727 
728 	cachefiles_begin_secure(cache, &saved_cred);
729 	ret = cachefiles_check_in_use(cache, path.dentry, args);
730 	cachefiles_end_secure(cache, saved_cred);
731 
732 	path_put(&path);
733 	//_leave(" = %d", ret);
734 	return ret;
735 
736 notdir:
737 	path_put(&path);
738 	pr_err("inuse command requires dirfd to be a directory\n");
739 	return -ENOTDIR;
740 
741 inval:
742 	pr_err("inuse command requires dirfd and filename\n");
743 	return -EINVAL;
744 }
745 
746 /*
747  * Bind a directory as a cache
748  */
cachefiles_daemon_bind(struct cachefiles_cache * cache,char * args)749 static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
750 {
751 	_enter("{%u,%u,%u,%u,%u,%u},%s",
752 	       cache->frun_percent,
753 	       cache->fcull_percent,
754 	       cache->fstop_percent,
755 	       cache->brun_percent,
756 	       cache->bcull_percent,
757 	       cache->bstop_percent,
758 	       args);
759 
760 	if (cache->fstop_percent >= cache->fcull_percent ||
761 	    cache->fcull_percent >= cache->frun_percent ||
762 	    cache->frun_percent  >= 100)
763 		return -ERANGE;
764 
765 	if (cache->bstop_percent >= cache->bcull_percent ||
766 	    cache->bcull_percent >= cache->brun_percent ||
767 	    cache->brun_percent  >= 100)
768 		return -ERANGE;
769 
770 	if (!cache->rootdirname) {
771 		pr_err("No cache directory specified\n");
772 		return -EINVAL;
773 	}
774 
775 	/* Don't permit already bound caches to be re-bound */
776 	if (test_bit(CACHEFILES_READY, &cache->flags)) {
777 		pr_err("Cache already bound\n");
778 		return -EBUSY;
779 	}
780 
781 	if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
782 		if (!strcmp(args, "ondemand")) {
783 			set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
784 		} else if (*args) {
785 			pr_err("Invalid argument to the 'bind' command\n");
786 			return -EINVAL;
787 		}
788 	} else if (*args) {
789 		pr_err("'bind' command doesn't take an argument\n");
790 		return -EINVAL;
791 	}
792 
793 	/* Make sure we have copies of the tag string */
794 	if (!cache->tag) {
795 		/*
796 		 * The tag string is released by the fops->release()
797 		 * function, so we don't release it on error here
798 		 */
799 		cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
800 		if (!cache->tag)
801 			return -ENOMEM;
802 	}
803 
804 	return cachefiles_add_cache(cache);
805 }
806 
807 /*
808  * Unbind a cache.
809  */
cachefiles_daemon_unbind(struct cachefiles_cache * cache)810 static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
811 {
812 	_enter("");
813 
814 	if (test_bit(CACHEFILES_READY, &cache->flags))
815 		cachefiles_withdraw_cache(cache);
816 
817 	cachefiles_put_directory(cache->graveyard);
818 	cachefiles_put_directory(cache->store);
819 	mntput(cache->mnt);
820 	put_cred(cache->cache_cred);
821 
822 	kfree(cache->rootdirname);
823 	kfree(cache->secctx);
824 	kfree(cache->tag);
825 
826 	_leave("");
827 }
828