xref: /openbmc/linux/fs/cachefiles/daemon.c (revision 2984f26a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Daemon interface
3  *
4  * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/sched.h>
11 #include <linux/completion.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/file.h>
15 #include <linux/namei.h>
16 #include <linux/poll.h>
17 #include <linux/mount.h>
18 #include <linux/statfs.h>
19 #include <linux/ctype.h>
20 #include <linux/string.h>
21 #include <linux/fs_struct.h>
22 #include "internal.h"
23 
24 static int cachefiles_daemon_open(struct inode *, struct file *);
25 static int cachefiles_daemon_release(struct inode *, struct file *);
26 static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
27 				      loff_t *);
28 static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
29 				       size_t, loff_t *);
30 static __poll_t cachefiles_daemon_poll(struct file *,
31 					   struct poll_table_struct *);
32 static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
33 static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
34 static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
35 static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
36 static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
37 static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
38 static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
39 static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
40 static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
41 static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
42 static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
43 static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
44 static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
45 static void cachefiles_daemon_unbind(struct cachefiles_cache *);
46 
47 static unsigned long cachefiles_open;
48 
49 const struct file_operations cachefiles_daemon_fops = {
50 	.owner		= THIS_MODULE,
51 	.open		= cachefiles_daemon_open,
52 	.release	= cachefiles_daemon_release,
53 	.read		= cachefiles_daemon_read,
54 	.write		= cachefiles_daemon_write,
55 	.poll		= cachefiles_daemon_poll,
56 	.llseek		= noop_llseek,
57 };
58 
59 struct cachefiles_daemon_cmd {
60 	char name[8];
61 	int (*handler)(struct cachefiles_cache *cache, char *args);
62 };
63 
64 static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
65 	{ "bind",	cachefiles_daemon_bind		},
66 	{ "brun",	cachefiles_daemon_brun		},
67 	{ "bcull",	cachefiles_daemon_bcull		},
68 	{ "bstop",	cachefiles_daemon_bstop		},
69 	{ "cull",	cachefiles_daemon_cull		},
70 	{ "debug",	cachefiles_daemon_debug		},
71 	{ "dir",	cachefiles_daemon_dir		},
72 	{ "frun",	cachefiles_daemon_frun		},
73 	{ "fcull",	cachefiles_daemon_fcull		},
74 	{ "fstop",	cachefiles_daemon_fstop		},
75 	{ "inuse",	cachefiles_daemon_inuse		},
76 	{ "secctx",	cachefiles_daemon_secctx	},
77 	{ "tag",	cachefiles_daemon_tag		},
78 #ifdef CONFIG_CACHEFILES_ONDEMAND
79 	{ "copen",	cachefiles_ondemand_copen	},
80 #endif
81 	{ "",		NULL				}
82 };
83 
84 
85 /*
86  * Prepare a cache for caching.
87  */
88 static int cachefiles_daemon_open(struct inode *inode, struct file *file)
89 {
90 	struct cachefiles_cache *cache;
91 
92 	_enter("");
93 
94 	/* only the superuser may do this */
95 	if (!capable(CAP_SYS_ADMIN))
96 		return -EPERM;
97 
98 	/* the cachefiles device may only be open once at a time */
99 	if (xchg(&cachefiles_open, 1) == 1)
100 		return -EBUSY;
101 
102 	/* allocate a cache record */
103 	cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
104 	if (!cache) {
105 		cachefiles_open = 0;
106 		return -ENOMEM;
107 	}
108 
109 	mutex_init(&cache->daemon_mutex);
110 	init_waitqueue_head(&cache->daemon_pollwq);
111 	INIT_LIST_HEAD(&cache->volumes);
112 	INIT_LIST_HEAD(&cache->object_list);
113 	spin_lock_init(&cache->object_list_lock);
114 	refcount_set(&cache->unbind_pincount, 1);
115 	xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
116 	xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
117 
118 	/* set default caching limits
119 	 * - limit at 1% free space and/or free files
120 	 * - cull below 5% free space and/or free files
121 	 * - cease culling above 7% free space and/or free files
122 	 */
123 	cache->frun_percent = 7;
124 	cache->fcull_percent = 5;
125 	cache->fstop_percent = 1;
126 	cache->brun_percent = 7;
127 	cache->bcull_percent = 5;
128 	cache->bstop_percent = 1;
129 
130 	file->private_data = cache;
131 	cache->cachefilesd = file;
132 	return 0;
133 }
134 
135 static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
136 {
137 	struct xarray *xa = &cache->reqs;
138 	struct cachefiles_req *req;
139 	unsigned long index;
140 
141 	/*
142 	 * Make sure the following two operations won't be reordered.
143 	 *   1) set CACHEFILES_DEAD bit
144 	 *   2) flush requests in the xarray
145 	 * Otherwise the request may be enqueued after xarray has been
146 	 * flushed, leaving the orphan request never being completed.
147 	 *
148 	 * CPU 1			CPU 2
149 	 * =====			=====
150 	 * flush requests in the xarray
151 	 *				test CACHEFILES_DEAD bit
152 	 *				enqueue the request
153 	 * set CACHEFILES_DEAD bit
154 	 */
155 	smp_mb();
156 
157 	xa_lock(xa);
158 	xa_for_each(xa, index, req) {
159 		req->error = -EIO;
160 		complete(&req->done);
161 	}
162 	xa_unlock(xa);
163 
164 	xa_destroy(&cache->reqs);
165 	xa_destroy(&cache->ondemand_ids);
166 }
167 
168 void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
169 {
170 	if (refcount_dec_and_test(&cache->unbind_pincount)) {
171 		cachefiles_daemon_unbind(cache);
172 		cachefiles_open = 0;
173 		kfree(cache);
174 	}
175 }
176 
177 void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
178 {
179 	refcount_inc(&cache->unbind_pincount);
180 }
181 
182 /*
183  * Release a cache.
184  */
185 static int cachefiles_daemon_release(struct inode *inode, struct file *file)
186 {
187 	struct cachefiles_cache *cache = file->private_data;
188 
189 	_enter("");
190 
191 	ASSERT(cache);
192 
193 	set_bit(CACHEFILES_DEAD, &cache->flags);
194 
195 	if (cachefiles_in_ondemand_mode(cache))
196 		cachefiles_flush_reqs(cache);
197 
198 	/* clean up the control file interface */
199 	cache->cachefilesd = NULL;
200 	file->private_data = NULL;
201 
202 	cachefiles_put_unbind_pincount(cache);
203 
204 	_leave("");
205 	return 0;
206 }
207 
208 static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
209 					 char __user *_buffer, size_t buflen)
210 {
211 	unsigned long long b_released;
212 	unsigned f_released;
213 	char buffer[256];
214 	int n;
215 
216 	/* check how much space the cache has */
217 	cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
218 
219 	/* summarise */
220 	f_released = atomic_xchg(&cache->f_released, 0);
221 	b_released = atomic_long_xchg(&cache->b_released, 0);
222 	clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
223 
224 	n = snprintf(buffer, sizeof(buffer),
225 		     "cull=%c"
226 		     " frun=%llx"
227 		     " fcull=%llx"
228 		     " fstop=%llx"
229 		     " brun=%llx"
230 		     " bcull=%llx"
231 		     " bstop=%llx"
232 		     " freleased=%x"
233 		     " breleased=%llx",
234 		     test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
235 		     (unsigned long long) cache->frun,
236 		     (unsigned long long) cache->fcull,
237 		     (unsigned long long) cache->fstop,
238 		     (unsigned long long) cache->brun,
239 		     (unsigned long long) cache->bcull,
240 		     (unsigned long long) cache->bstop,
241 		     f_released,
242 		     b_released);
243 
244 	if (n > buflen)
245 		return -EMSGSIZE;
246 
247 	if (copy_to_user(_buffer, buffer, n) != 0)
248 		return -EFAULT;
249 
250 	return n;
251 }
252 
253 /*
254  * Read the cache state.
255  */
256 static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
257 				      size_t buflen, loff_t *pos)
258 {
259 	struct cachefiles_cache *cache = file->private_data;
260 
261 	//_enter(",,%zu,", buflen);
262 
263 	if (!test_bit(CACHEFILES_READY, &cache->flags))
264 		return 0;
265 
266 	if (cachefiles_in_ondemand_mode(cache))
267 		return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
268 	else
269 		return cachefiles_do_daemon_read(cache, _buffer, buflen);
270 }
271 
272 /*
273  * Take a command from cachefilesd, parse it and act on it.
274  */
275 static ssize_t cachefiles_daemon_write(struct file *file,
276 				       const char __user *_data,
277 				       size_t datalen,
278 				       loff_t *pos)
279 {
280 	const struct cachefiles_daemon_cmd *cmd;
281 	struct cachefiles_cache *cache = file->private_data;
282 	ssize_t ret;
283 	char *data, *args, *cp;
284 
285 	//_enter(",,%zu,", datalen);
286 
287 	ASSERT(cache);
288 
289 	if (test_bit(CACHEFILES_DEAD, &cache->flags))
290 		return -EIO;
291 
292 	if (datalen > PAGE_SIZE - 1)
293 		return -EOPNOTSUPP;
294 
295 	/* drag the command string into the kernel so we can parse it */
296 	data = memdup_user_nul(_data, datalen);
297 	if (IS_ERR(data))
298 		return PTR_ERR(data);
299 
300 	ret = -EINVAL;
301 	if (memchr(data, '\0', datalen))
302 		goto error;
303 
304 	/* strip any newline */
305 	cp = memchr(data, '\n', datalen);
306 	if (cp) {
307 		if (cp == data)
308 			goto error;
309 
310 		*cp = '\0';
311 	}
312 
313 	/* parse the command */
314 	ret = -EOPNOTSUPP;
315 
316 	for (args = data; *args; args++)
317 		if (isspace(*args))
318 			break;
319 	if (*args) {
320 		if (args == data)
321 			goto error;
322 		*args = '\0';
323 		args = skip_spaces(++args);
324 	}
325 
326 	/* run the appropriate command handler */
327 	for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
328 		if (strcmp(cmd->name, data) == 0)
329 			goto found_command;
330 
331 error:
332 	kfree(data);
333 	//_leave(" = %zd", ret);
334 	return ret;
335 
336 found_command:
337 	mutex_lock(&cache->daemon_mutex);
338 
339 	ret = -EIO;
340 	if (!test_bit(CACHEFILES_DEAD, &cache->flags))
341 		ret = cmd->handler(cache, args);
342 
343 	mutex_unlock(&cache->daemon_mutex);
344 
345 	if (ret == 0)
346 		ret = datalen;
347 	goto error;
348 }
349 
350 /*
351  * Poll for culling state
352  * - use EPOLLOUT to indicate culling state
353  */
354 static __poll_t cachefiles_daemon_poll(struct file *file,
355 					   struct poll_table_struct *poll)
356 {
357 	struct cachefiles_cache *cache = file->private_data;
358 	__poll_t mask;
359 
360 	poll_wait(file, &cache->daemon_pollwq, poll);
361 	mask = 0;
362 
363 	if (cachefiles_in_ondemand_mode(cache)) {
364 		if (!xa_empty(&cache->reqs))
365 			mask |= EPOLLIN;
366 	} else {
367 		if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
368 			mask |= EPOLLIN;
369 	}
370 
371 	if (test_bit(CACHEFILES_CULLING, &cache->flags))
372 		mask |= EPOLLOUT;
373 
374 	return mask;
375 }
376 
377 /*
378  * Give a range error for cache space constraints
379  * - can be tail-called
380  */
381 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
382 					 char *args)
383 {
384 	pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
385 
386 	return -EINVAL;
387 }
388 
389 /*
390  * Set the percentage of files at which to stop culling
391  * - command: "frun <N>%"
392  */
393 static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
394 {
395 	unsigned long frun;
396 
397 	_enter(",%s", args);
398 
399 	if (!*args)
400 		return -EINVAL;
401 
402 	frun = simple_strtoul(args, &args, 10);
403 	if (args[0] != '%' || args[1] != '\0')
404 		return -EINVAL;
405 
406 	if (frun <= cache->fcull_percent || frun >= 100)
407 		return cachefiles_daemon_range_error(cache, args);
408 
409 	cache->frun_percent = frun;
410 	return 0;
411 }
412 
413 /*
414  * Set the percentage of files at which to start culling
415  * - command: "fcull <N>%"
416  */
417 static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
418 {
419 	unsigned long fcull;
420 
421 	_enter(",%s", args);
422 
423 	if (!*args)
424 		return -EINVAL;
425 
426 	fcull = simple_strtoul(args, &args, 10);
427 	if (args[0] != '%' || args[1] != '\0')
428 		return -EINVAL;
429 
430 	if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
431 		return cachefiles_daemon_range_error(cache, args);
432 
433 	cache->fcull_percent = fcull;
434 	return 0;
435 }
436 
437 /*
438  * Set the percentage of files at which to stop allocating
439  * - command: "fstop <N>%"
440  */
441 static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
442 {
443 	unsigned long fstop;
444 
445 	_enter(",%s", args);
446 
447 	if (!*args)
448 		return -EINVAL;
449 
450 	fstop = simple_strtoul(args, &args, 10);
451 	if (args[0] != '%' || args[1] != '\0')
452 		return -EINVAL;
453 
454 	if (fstop >= cache->fcull_percent)
455 		return cachefiles_daemon_range_error(cache, args);
456 
457 	cache->fstop_percent = fstop;
458 	return 0;
459 }
460 
461 /*
462  * Set the percentage of blocks at which to stop culling
463  * - command: "brun <N>%"
464  */
465 static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
466 {
467 	unsigned long brun;
468 
469 	_enter(",%s", args);
470 
471 	if (!*args)
472 		return -EINVAL;
473 
474 	brun = simple_strtoul(args, &args, 10);
475 	if (args[0] != '%' || args[1] != '\0')
476 		return -EINVAL;
477 
478 	if (brun <= cache->bcull_percent || brun >= 100)
479 		return cachefiles_daemon_range_error(cache, args);
480 
481 	cache->brun_percent = brun;
482 	return 0;
483 }
484 
485 /*
486  * Set the percentage of blocks at which to start culling
487  * - command: "bcull <N>%"
488  */
489 static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
490 {
491 	unsigned long bcull;
492 
493 	_enter(",%s", args);
494 
495 	if (!*args)
496 		return -EINVAL;
497 
498 	bcull = simple_strtoul(args, &args, 10);
499 	if (args[0] != '%' || args[1] != '\0')
500 		return -EINVAL;
501 
502 	if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
503 		return cachefiles_daemon_range_error(cache, args);
504 
505 	cache->bcull_percent = bcull;
506 	return 0;
507 }
508 
509 /*
510  * Set the percentage of blocks at which to stop allocating
511  * - command: "bstop <N>%"
512  */
513 static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
514 {
515 	unsigned long bstop;
516 
517 	_enter(",%s", args);
518 
519 	if (!*args)
520 		return -EINVAL;
521 
522 	bstop = simple_strtoul(args, &args, 10);
523 	if (args[0] != '%' || args[1] != '\0')
524 		return -EINVAL;
525 
526 	if (bstop >= cache->bcull_percent)
527 		return cachefiles_daemon_range_error(cache, args);
528 
529 	cache->bstop_percent = bstop;
530 	return 0;
531 }
532 
533 /*
534  * Set the cache directory
535  * - command: "dir <name>"
536  */
537 static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
538 {
539 	char *dir;
540 
541 	_enter(",%s", args);
542 
543 	if (!*args) {
544 		pr_err("Empty directory specified\n");
545 		return -EINVAL;
546 	}
547 
548 	if (cache->rootdirname) {
549 		pr_err("Second cache directory specified\n");
550 		return -EEXIST;
551 	}
552 
553 	dir = kstrdup(args, GFP_KERNEL);
554 	if (!dir)
555 		return -ENOMEM;
556 
557 	cache->rootdirname = dir;
558 	return 0;
559 }
560 
561 /*
562  * Set the cache security context
563  * - command: "secctx <ctx>"
564  */
565 static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
566 {
567 	char *secctx;
568 
569 	_enter(",%s", args);
570 
571 	if (!*args) {
572 		pr_err("Empty security context specified\n");
573 		return -EINVAL;
574 	}
575 
576 	if (cache->secctx) {
577 		pr_err("Second security context specified\n");
578 		return -EINVAL;
579 	}
580 
581 	secctx = kstrdup(args, GFP_KERNEL);
582 	if (!secctx)
583 		return -ENOMEM;
584 
585 	cache->secctx = secctx;
586 	return 0;
587 }
588 
589 /*
590  * Set the cache tag
591  * - command: "tag <name>"
592  */
593 static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
594 {
595 	char *tag;
596 
597 	_enter(",%s", args);
598 
599 	if (!*args) {
600 		pr_err("Empty tag specified\n");
601 		return -EINVAL;
602 	}
603 
604 	if (cache->tag)
605 		return -EEXIST;
606 
607 	tag = kstrdup(args, GFP_KERNEL);
608 	if (!tag)
609 		return -ENOMEM;
610 
611 	cache->tag = tag;
612 	return 0;
613 }
614 
615 /*
616  * Request a node in the cache be culled from the current working directory
617  * - command: "cull <name>"
618  */
619 static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
620 {
621 	struct path path;
622 	const struct cred *saved_cred;
623 	int ret;
624 
625 	_enter(",%s", args);
626 
627 	if (strchr(args, '/'))
628 		goto inval;
629 
630 	if (!test_bit(CACHEFILES_READY, &cache->flags)) {
631 		pr_err("cull applied to unready cache\n");
632 		return -EIO;
633 	}
634 
635 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
636 		pr_err("cull applied to dead cache\n");
637 		return -EIO;
638 	}
639 
640 	get_fs_pwd(current->fs, &path);
641 
642 	if (!d_can_lookup(path.dentry))
643 		goto notdir;
644 
645 	cachefiles_begin_secure(cache, &saved_cred);
646 	ret = cachefiles_cull(cache, path.dentry, args);
647 	cachefiles_end_secure(cache, saved_cred);
648 
649 	path_put(&path);
650 	_leave(" = %d", ret);
651 	return ret;
652 
653 notdir:
654 	path_put(&path);
655 	pr_err("cull command requires dirfd to be a directory\n");
656 	return -ENOTDIR;
657 
658 inval:
659 	pr_err("cull command requires dirfd and filename\n");
660 	return -EINVAL;
661 }
662 
663 /*
664  * Set debugging mode
665  * - command: "debug <mask>"
666  */
667 static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
668 {
669 	unsigned long mask;
670 
671 	_enter(",%s", args);
672 
673 	mask = simple_strtoul(args, &args, 0);
674 	if (args[0] != '\0')
675 		goto inval;
676 
677 	cachefiles_debug = mask;
678 	_leave(" = 0");
679 	return 0;
680 
681 inval:
682 	pr_err("debug command requires mask\n");
683 	return -EINVAL;
684 }
685 
686 /*
687  * Find out whether an object in the current working directory is in use or not
688  * - command: "inuse <name>"
689  */
690 static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
691 {
692 	struct path path;
693 	const struct cred *saved_cred;
694 	int ret;
695 
696 	//_enter(",%s", args);
697 
698 	if (strchr(args, '/'))
699 		goto inval;
700 
701 	if (!test_bit(CACHEFILES_READY, &cache->flags)) {
702 		pr_err("inuse applied to unready cache\n");
703 		return -EIO;
704 	}
705 
706 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
707 		pr_err("inuse applied to dead cache\n");
708 		return -EIO;
709 	}
710 
711 	get_fs_pwd(current->fs, &path);
712 
713 	if (!d_can_lookup(path.dentry))
714 		goto notdir;
715 
716 	cachefiles_begin_secure(cache, &saved_cred);
717 	ret = cachefiles_check_in_use(cache, path.dentry, args);
718 	cachefiles_end_secure(cache, saved_cred);
719 
720 	path_put(&path);
721 	//_leave(" = %d", ret);
722 	return ret;
723 
724 notdir:
725 	path_put(&path);
726 	pr_err("inuse command requires dirfd to be a directory\n");
727 	return -ENOTDIR;
728 
729 inval:
730 	pr_err("inuse command requires dirfd and filename\n");
731 	return -EINVAL;
732 }
733 
734 /*
735  * Bind a directory as a cache
736  */
737 static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
738 {
739 	_enter("{%u,%u,%u,%u,%u,%u},%s",
740 	       cache->frun_percent,
741 	       cache->fcull_percent,
742 	       cache->fstop_percent,
743 	       cache->brun_percent,
744 	       cache->bcull_percent,
745 	       cache->bstop_percent,
746 	       args);
747 
748 	if (cache->fstop_percent >= cache->fcull_percent ||
749 	    cache->fcull_percent >= cache->frun_percent ||
750 	    cache->frun_percent  >= 100)
751 		return -ERANGE;
752 
753 	if (cache->bstop_percent >= cache->bcull_percent ||
754 	    cache->bcull_percent >= cache->brun_percent ||
755 	    cache->brun_percent  >= 100)
756 		return -ERANGE;
757 
758 	if (!cache->rootdirname) {
759 		pr_err("No cache directory specified\n");
760 		return -EINVAL;
761 	}
762 
763 	/* Don't permit already bound caches to be re-bound */
764 	if (test_bit(CACHEFILES_READY, &cache->flags)) {
765 		pr_err("Cache already bound\n");
766 		return -EBUSY;
767 	}
768 
769 	if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
770 		if (!strcmp(args, "ondemand")) {
771 			set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
772 		} else if (*args) {
773 			pr_err("Invalid argument to the 'bind' command\n");
774 			return -EINVAL;
775 		}
776 	} else if (*args) {
777 		pr_err("'bind' command doesn't take an argument\n");
778 		return -EINVAL;
779 	}
780 
781 	/* Make sure we have copies of the tag string */
782 	if (!cache->tag) {
783 		/*
784 		 * The tag string is released by the fops->release()
785 		 * function, so we don't release it on error here
786 		 */
787 		cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
788 		if (!cache->tag)
789 			return -ENOMEM;
790 	}
791 
792 	return cachefiles_add_cache(cache);
793 }
794 
795 /*
796  * Unbind a cache.
797  */
798 static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
799 {
800 	_enter("");
801 
802 	if (test_bit(CACHEFILES_READY, &cache->flags))
803 		cachefiles_withdraw_cache(cache);
804 
805 	cachefiles_put_directory(cache->graveyard);
806 	cachefiles_put_directory(cache->store);
807 	mntput(cache->mnt);
808 	put_cred(cache->cache_cred);
809 
810 	kfree(cache->rootdirname);
811 	kfree(cache->secctx);
812 	kfree(cache->tag);
813 
814 	_leave("");
815 }
816