xref: /openbmc/linux/fs/cachefiles/ondemand.c (revision 8a73c08e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/fdtable.h>
3 #include <linux/anon_inodes.h>
4 #include <linux/uio.h>
5 #include "internal.h"
6 
7 struct ondemand_anon_file {
8 	struct file *file;
9 	int fd;
10 };
11 
cachefiles_req_put(struct cachefiles_req * req)12 static inline void cachefiles_req_put(struct cachefiles_req *req)
13 {
14 	if (refcount_dec_and_test(&req->ref))
15 		kfree(req);
16 }
17 
cachefiles_ondemand_fd_release(struct inode * inode,struct file * file)18 static int cachefiles_ondemand_fd_release(struct inode *inode,
19 					  struct file *file)
20 {
21 	struct cachefiles_object *object = file->private_data;
22 	struct cachefiles_cache *cache;
23 	struct cachefiles_ondemand_info *info;
24 	int object_id;
25 	struct cachefiles_req *req;
26 	XA_STATE(xas, NULL, 0);
27 
28 	if (!object)
29 		return 0;
30 
31 	info = object->ondemand;
32 	cache = object->volume->cache;
33 	xas.xa = &cache->reqs;
34 
35 	xa_lock(&cache->reqs);
36 	spin_lock(&info->lock);
37 	object_id = info->ondemand_id;
38 	info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
39 	cachefiles_ondemand_set_object_close(object);
40 	spin_unlock(&info->lock);
41 
42 	/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
43 	xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
44 		if (req->msg.object_id == object_id &&
45 		    req->msg.opcode == CACHEFILES_OP_CLOSE) {
46 			complete(&req->done);
47 			xas_store(&xas, NULL);
48 		}
49 	}
50 	xa_unlock(&cache->reqs);
51 
52 	xa_erase(&cache->ondemand_ids, object_id);
53 	trace_cachefiles_ondemand_fd_release(object, object_id);
54 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
55 	cachefiles_put_unbind_pincount(cache);
56 	return 0;
57 }
58 
cachefiles_ondemand_fd_write_iter(struct kiocb * kiocb,struct iov_iter * iter)59 static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
60 						 struct iov_iter *iter)
61 {
62 	struct cachefiles_object *object = kiocb->ki_filp->private_data;
63 	struct cachefiles_cache *cache = object->volume->cache;
64 	struct file *file = object->file;
65 	size_t len = iter->count;
66 	loff_t pos = kiocb->ki_pos;
67 	const struct cred *saved_cred;
68 	int ret;
69 
70 	if (!file)
71 		return -ENOBUFS;
72 
73 	cachefiles_begin_secure(cache, &saved_cred);
74 	ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
75 	cachefiles_end_secure(cache, saved_cred);
76 	if (ret < 0)
77 		return ret;
78 
79 	trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
80 	ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
81 	if (!ret)
82 		ret = len;
83 
84 	return ret;
85 }
86 
cachefiles_ondemand_fd_llseek(struct file * filp,loff_t pos,int whence)87 static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
88 					    int whence)
89 {
90 	struct cachefiles_object *object = filp->private_data;
91 	struct file *file = object->file;
92 
93 	if (!file)
94 		return -ENOBUFS;
95 
96 	return vfs_llseek(file, pos, whence);
97 }
98 
cachefiles_ondemand_fd_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)99 static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
100 					 unsigned long arg)
101 {
102 	struct cachefiles_object *object = filp->private_data;
103 	struct cachefiles_cache *cache = object->volume->cache;
104 	struct cachefiles_req *req;
105 	unsigned long id;
106 
107 	if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
108 		return -EINVAL;
109 
110 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
111 		return -EOPNOTSUPP;
112 
113 	id = arg;
114 	req = xa_erase(&cache->reqs, id);
115 	if (!req)
116 		return -EINVAL;
117 
118 	trace_cachefiles_ondemand_cread(object, id);
119 	complete(&req->done);
120 	return 0;
121 }
122 
123 static const struct file_operations cachefiles_ondemand_fd_fops = {
124 	.owner		= THIS_MODULE,
125 	.release	= cachefiles_ondemand_fd_release,
126 	.write_iter	= cachefiles_ondemand_fd_write_iter,
127 	.llseek		= cachefiles_ondemand_fd_llseek,
128 	.unlocked_ioctl	= cachefiles_ondemand_fd_ioctl,
129 };
130 
131 /*
132  * OPEN request Completion (copen)
133  * - command: "copen <id>,<cache_size>"
134  *   <cache_size> indicates the object size if >=0, error code if negative
135  */
cachefiles_ondemand_copen(struct cachefiles_cache * cache,char * args)136 int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
137 {
138 	struct cachefiles_req *req;
139 	struct fscache_cookie *cookie;
140 	struct cachefiles_ondemand_info *info;
141 	char *pid, *psize;
142 	unsigned long id;
143 	long size;
144 	int ret;
145 
146 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
147 		return -EOPNOTSUPP;
148 
149 	if (!*args) {
150 		pr_err("Empty id specified\n");
151 		return -EINVAL;
152 	}
153 
154 	pid = args;
155 	psize = strchr(args, ',');
156 	if (!psize) {
157 		pr_err("Cache size is not specified\n");
158 		return -EINVAL;
159 	}
160 
161 	*psize = 0;
162 	psize++;
163 
164 	ret = kstrtoul(pid, 0, &id);
165 	if (ret)
166 		return ret;
167 
168 	req = xa_erase(&cache->reqs, id);
169 	if (!req)
170 		return -EINVAL;
171 
172 	/* fail OPEN request if copen format is invalid */
173 	ret = kstrtol(psize, 0, &size);
174 	if (ret) {
175 		req->error = ret;
176 		goto out;
177 	}
178 
179 	/* fail OPEN request if daemon reports an error */
180 	if (size < 0) {
181 		if (!IS_ERR_VALUE(size)) {
182 			req->error = -EINVAL;
183 			ret = -EINVAL;
184 		} else {
185 			req->error = size;
186 			ret = 0;
187 		}
188 		goto out;
189 	}
190 
191 	info = req->object->ondemand;
192 	spin_lock(&info->lock);
193 	/*
194 	 * The anonymous fd was closed before copen ? Fail the request.
195 	 *
196 	 *             t1             |             t2
197 	 * ---------------------------------------------------------
198 	 *                             cachefiles_ondemand_copen
199 	 *                             req = xa_erase(&cache->reqs, id)
200 	 * // Anon fd is maliciously closed.
201 	 * cachefiles_ondemand_fd_release
202 	 * xa_lock(&cache->reqs)
203 	 * cachefiles_ondemand_set_object_close(object)
204 	 * xa_unlock(&cache->reqs)
205 	 *                             cachefiles_ondemand_set_object_open
206 	 *                             // No one will ever close it again.
207 	 * cachefiles_ondemand_daemon_read
208 	 * cachefiles_ondemand_select_req
209 	 *
210 	 * Get a read req but its fd is already closed. The daemon can't
211 	 * issue a cread ioctl with an closed fd, then hung.
212 	 */
213 	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
214 		spin_unlock(&info->lock);
215 		req->error = -EBADFD;
216 		goto out;
217 	}
218 	cookie = req->object->cookie;
219 	cookie->object_size = size;
220 	if (size)
221 		clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
222 	else
223 		set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
224 	trace_cachefiles_ondemand_copen(req->object, id, size);
225 
226 	cachefiles_ondemand_set_object_open(req->object);
227 	spin_unlock(&info->lock);
228 	wake_up_all(&cache->daemon_pollwq);
229 
230 out:
231 	complete(&req->done);
232 	return ret;
233 }
234 
cachefiles_ondemand_restore(struct cachefiles_cache * cache,char * args)235 int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
236 {
237 	struct cachefiles_req *req;
238 
239 	XA_STATE(xas, &cache->reqs, 0);
240 
241 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
242 		return -EOPNOTSUPP;
243 
244 	/*
245 	 * Reset the requests to CACHEFILES_REQ_NEW state, so that the
246 	 * requests have been processed halfway before the crash of the
247 	 * user daemon could be reprocessed after the recovery.
248 	 */
249 	xas_lock(&xas);
250 	xas_for_each(&xas, req, ULONG_MAX)
251 		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
252 	xas_unlock(&xas);
253 
254 	wake_up_all(&cache->daemon_pollwq);
255 	return 0;
256 }
257 
cachefiles_ondemand_get_fd(struct cachefiles_req * req,struct ondemand_anon_file * anon_file)258 static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
259 				      struct ondemand_anon_file *anon_file)
260 {
261 	struct cachefiles_object *object;
262 	struct cachefiles_cache *cache;
263 	struct cachefiles_open *load;
264 	u32 object_id;
265 	int ret;
266 
267 	object = cachefiles_grab_object(req->object,
268 			cachefiles_obj_get_ondemand_fd);
269 	cache = object->volume->cache;
270 
271 	ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
272 			      XA_LIMIT(1, INT_MAX),
273 			      &cache->ondemand_id_next, GFP_KERNEL);
274 	if (ret < 0)
275 		goto err;
276 
277 	anon_file->fd = get_unused_fd_flags(O_WRONLY);
278 	if (anon_file->fd < 0) {
279 		ret = anon_file->fd;
280 		goto err_free_id;
281 	}
282 
283 	anon_file->file = anon_inode_getfile("[cachefiles]",
284 				&cachefiles_ondemand_fd_fops, object, O_WRONLY);
285 	if (IS_ERR(anon_file->file)) {
286 		ret = PTR_ERR(anon_file->file);
287 		goto err_put_fd;
288 	}
289 
290 	spin_lock(&object->ondemand->lock);
291 	if (object->ondemand->ondemand_id > 0) {
292 		spin_unlock(&object->ondemand->lock);
293 		/* Pair with check in cachefiles_ondemand_fd_release(). */
294 		anon_file->file->private_data = NULL;
295 		ret = -EEXIST;
296 		goto err_put_file;
297 	}
298 
299 	anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
300 
301 	load = (void *)req->msg.data;
302 	load->fd = anon_file->fd;
303 	object->ondemand->ondemand_id = object_id;
304 	spin_unlock(&object->ondemand->lock);
305 
306 	cachefiles_get_unbind_pincount(cache);
307 	trace_cachefiles_ondemand_open(object, &req->msg, load);
308 	return 0;
309 
310 err_put_file:
311 	fput(anon_file->file);
312 	anon_file->file = NULL;
313 err_put_fd:
314 	put_unused_fd(anon_file->fd);
315 	anon_file->fd = ret;
316 err_free_id:
317 	xa_erase(&cache->ondemand_ids, object_id);
318 err:
319 	spin_lock(&object->ondemand->lock);
320 	/* Avoid marking an opened object as closed. */
321 	if (object->ondemand->ondemand_id <= 0)
322 		cachefiles_ondemand_set_object_close(object);
323 	spin_unlock(&object->ondemand->lock);
324 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
325 	return ret;
326 }
327 
ondemand_object_worker(struct work_struct * work)328 static void ondemand_object_worker(struct work_struct *work)
329 {
330 	struct cachefiles_ondemand_info *info =
331 		container_of(work, struct cachefiles_ondemand_info, ondemand_work);
332 
333 	cachefiles_ondemand_init_object(info->object);
334 }
335 
336 /*
337  * If there are any inflight or subsequent READ requests on the
338  * closed object, reopen it.
339  * Skip read requests whose related object is reopening.
340  */
cachefiles_ondemand_select_req(struct xa_state * xas,unsigned long xa_max)341 static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
342 							      unsigned long xa_max)
343 {
344 	struct cachefiles_req *req;
345 	struct cachefiles_object *object;
346 	struct cachefiles_ondemand_info *info;
347 
348 	xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
349 		if (req->msg.opcode != CACHEFILES_OP_READ)
350 			return req;
351 		object = req->object;
352 		info = object->ondemand;
353 		if (cachefiles_ondemand_object_is_close(object)) {
354 			cachefiles_ondemand_set_object_reopening(object);
355 			queue_work(fscache_wq, &info->ondemand_work);
356 			continue;
357 		}
358 		if (cachefiles_ondemand_object_is_reopening(object))
359 			continue;
360 		return req;
361 	}
362 	return NULL;
363 }
364 
cachefiles_ondemand_daemon_read(struct cachefiles_cache * cache,char __user * _buffer,size_t buflen)365 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
366 					char __user *_buffer, size_t buflen)
367 {
368 	struct cachefiles_req *req;
369 	struct cachefiles_msg *msg;
370 	size_t n;
371 	int ret = 0;
372 	struct ondemand_anon_file anon_file;
373 	XA_STATE(xas, &cache->reqs, cache->req_id_next);
374 
375 	xa_lock(&cache->reqs);
376 	/*
377 	 * Cyclically search for a request that has not ever been processed,
378 	 * to prevent requests from being processed repeatedly, and make
379 	 * request distribution fair.
380 	 */
381 	req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
382 	if (!req && cache->req_id_next > 0) {
383 		xas_set(&xas, 0);
384 		req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
385 	}
386 	if (!req) {
387 		xa_unlock(&cache->reqs);
388 		return 0;
389 	}
390 
391 	msg = &req->msg;
392 	n = msg->len;
393 
394 	if (n > buflen) {
395 		xa_unlock(&cache->reqs);
396 		return -EMSGSIZE;
397 	}
398 
399 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
400 	cache->req_id_next = xas.xa_index + 1;
401 	refcount_inc(&req->ref);
402 	cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
403 	xa_unlock(&cache->reqs);
404 
405 	if (msg->opcode == CACHEFILES_OP_OPEN) {
406 		ret = cachefiles_ondemand_get_fd(req, &anon_file);
407 		if (ret)
408 			goto out;
409 	}
410 
411 	msg->msg_id = xas.xa_index;
412 	msg->object_id = req->object->ondemand->ondemand_id;
413 
414 	if (copy_to_user(_buffer, msg, n) != 0)
415 		ret = -EFAULT;
416 
417 	if (msg->opcode == CACHEFILES_OP_OPEN) {
418 		if (ret < 0) {
419 			fput(anon_file.file);
420 			put_unused_fd(anon_file.fd);
421 			goto out;
422 		}
423 		fd_install(anon_file.fd, anon_file.file);
424 	}
425 out:
426 	cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
427 	/* Remove error request and CLOSE request has no reply */
428 	if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
429 		xas_reset(&xas);
430 		xas_lock(&xas);
431 		if (xas_load(&xas) == req) {
432 			req->error = ret;
433 			complete(&req->done);
434 			xas_store(&xas, NULL);
435 		}
436 		xas_unlock(&xas);
437 	}
438 	cachefiles_req_put(req);
439 	return ret ? ret : n;
440 }
441 
442 typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
443 
cachefiles_ondemand_send_req(struct cachefiles_object * object,enum cachefiles_opcode opcode,size_t data_len,init_req_fn init_req,void * private)444 static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
445 					enum cachefiles_opcode opcode,
446 					size_t data_len,
447 					init_req_fn init_req,
448 					void *private)
449 {
450 	struct cachefiles_cache *cache = object->volume->cache;
451 	struct cachefiles_req *req = NULL;
452 	XA_STATE(xas, &cache->reqs, 0);
453 	int ret;
454 
455 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
456 		return 0;
457 
458 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
459 		ret = -EIO;
460 		goto out;
461 	}
462 
463 	req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
464 	if (!req) {
465 		ret = -ENOMEM;
466 		goto out;
467 	}
468 
469 	refcount_set(&req->ref, 1);
470 	req->object = object;
471 	init_completion(&req->done);
472 	req->msg.opcode = opcode;
473 	req->msg.len = sizeof(struct cachefiles_msg) + data_len;
474 
475 	ret = init_req(req, private);
476 	if (ret)
477 		goto out;
478 
479 	do {
480 		/*
481 		 * Stop enqueuing the request when daemon is dying. The
482 		 * following two operations need to be atomic as a whole.
483 		 *   1) check cache state, and
484 		 *   2) enqueue request if cache is alive.
485 		 * Otherwise the request may be enqueued after xarray has been
486 		 * flushed, leaving the orphan request never being completed.
487 		 *
488 		 * CPU 1			CPU 2
489 		 * =====			=====
490 		 *				test CACHEFILES_DEAD bit
491 		 * set CACHEFILES_DEAD bit
492 		 * flush requests in the xarray
493 		 *				enqueue the request
494 		 */
495 		xas_lock(&xas);
496 
497 		if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
498 			xas_unlock(&xas);
499 			ret = -EIO;
500 			goto out;
501 		}
502 
503 		/* coupled with the barrier in cachefiles_flush_reqs() */
504 		smp_mb();
505 
506 		if (opcode == CACHEFILES_OP_CLOSE &&
507 			!cachefiles_ondemand_object_is_open(object)) {
508 			WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
509 			xas_unlock(&xas);
510 			ret = -EIO;
511 			goto out;
512 		}
513 
514 		xas.xa_index = 0;
515 		xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
516 		if (xas.xa_node == XAS_RESTART)
517 			xas_set_err(&xas, -EBUSY);
518 		xas_store(&xas, req);
519 		xas_clear_mark(&xas, XA_FREE_MARK);
520 		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
521 		xas_unlock(&xas);
522 	} while (xas_nomem(&xas, GFP_KERNEL));
523 
524 	ret = xas_error(&xas);
525 	if (ret)
526 		goto out;
527 
528 	wake_up_all(&cache->daemon_pollwq);
529 	wait_for_completion(&req->done);
530 	ret = req->error;
531 	cachefiles_req_put(req);
532 	return ret;
533 out:
534 	/* Reset the object to close state in error handling path.
535 	 * If error occurs after creating the anonymous fd,
536 	 * cachefiles_ondemand_fd_release() will set object to close.
537 	 */
538 	if (opcode == CACHEFILES_OP_OPEN)
539 		cachefiles_ondemand_set_object_close(object);
540 	kfree(req);
541 	return ret;
542 }
543 
cachefiles_ondemand_init_open_req(struct cachefiles_req * req,void * private)544 static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
545 					     void *private)
546 {
547 	struct cachefiles_object *object = req->object;
548 	struct fscache_cookie *cookie = object->cookie;
549 	struct fscache_volume *volume = object->volume->vcookie;
550 	struct cachefiles_open *load = (void *)req->msg.data;
551 	size_t volume_key_size, cookie_key_size;
552 	void *volume_key, *cookie_key;
553 
554 	/*
555 	 * Volume key is a NUL-terminated string. key[0] stores strlen() of the
556 	 * string, followed by the content of the string (excluding '\0').
557 	 */
558 	volume_key_size = volume->key[0] + 1;
559 	volume_key = volume->key + 1;
560 
561 	/* Cookie key is binary data, which is netfs specific. */
562 	cookie_key_size = cookie->key_len;
563 	cookie_key = fscache_get_key(cookie);
564 
565 	if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
566 		pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
567 		return -EINVAL;
568 	}
569 
570 	load->volume_key_size = volume_key_size;
571 	load->cookie_key_size = cookie_key_size;
572 	memcpy(load->data, volume_key, volume_key_size);
573 	memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
574 
575 	return 0;
576 }
577 
cachefiles_ondemand_init_close_req(struct cachefiles_req * req,void * private)578 static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
579 					      void *private)
580 {
581 	struct cachefiles_object *object = req->object;
582 
583 	if (!cachefiles_ondemand_object_is_open(object))
584 		return -ENOENT;
585 
586 	trace_cachefiles_ondemand_close(object, &req->msg);
587 	return 0;
588 }
589 
590 struct cachefiles_read_ctx {
591 	loff_t off;
592 	size_t len;
593 };
594 
cachefiles_ondemand_init_read_req(struct cachefiles_req * req,void * private)595 static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
596 					     void *private)
597 {
598 	struct cachefiles_object *object = req->object;
599 	struct cachefiles_read *load = (void *)req->msg.data;
600 	struct cachefiles_read_ctx *read_ctx = private;
601 
602 	load->off = read_ctx->off;
603 	load->len = read_ctx->len;
604 	trace_cachefiles_ondemand_read(object, &req->msg, load);
605 	return 0;
606 }
607 
cachefiles_ondemand_init_object(struct cachefiles_object * object)608 int cachefiles_ondemand_init_object(struct cachefiles_object *object)
609 {
610 	struct fscache_cookie *cookie = object->cookie;
611 	struct fscache_volume *volume = object->volume->vcookie;
612 	size_t volume_key_size, cookie_key_size, data_len;
613 
614 	if (!object->ondemand)
615 		return 0;
616 
617 	/*
618 	 * CacheFiles will firstly check the cache file under the root cache
619 	 * directory. If the coherency check failed, it will fallback to
620 	 * creating a new tmpfile as the cache file. Reuse the previously
621 	 * allocated object ID if any.
622 	 */
623 	if (cachefiles_ondemand_object_is_open(object))
624 		return 0;
625 
626 	volume_key_size = volume->key[0] + 1;
627 	cookie_key_size = cookie->key_len;
628 	data_len = sizeof(struct cachefiles_open) +
629 		   volume_key_size + cookie_key_size;
630 
631 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
632 			data_len, cachefiles_ondemand_init_open_req, NULL);
633 }
634 
cachefiles_ondemand_clean_object(struct cachefiles_object * object)635 void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
636 {
637 	cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
638 			cachefiles_ondemand_init_close_req, NULL);
639 }
640 
cachefiles_ondemand_init_obj_info(struct cachefiles_object * object,struct cachefiles_volume * volume)641 int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
642 				struct cachefiles_volume *volume)
643 {
644 	if (!cachefiles_in_ondemand_mode(volume->cache))
645 		return 0;
646 
647 	object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
648 					GFP_KERNEL);
649 	if (!object->ondemand)
650 		return -ENOMEM;
651 
652 	object->ondemand->object = object;
653 	spin_lock_init(&object->ondemand->lock);
654 	INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
655 	return 0;
656 }
657 
cachefiles_ondemand_deinit_obj_info(struct cachefiles_object * object)658 void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
659 {
660 	kfree(object->ondemand);
661 	object->ondemand = NULL;
662 }
663 
cachefiles_ondemand_read(struct cachefiles_object * object,loff_t pos,size_t len)664 int cachefiles_ondemand_read(struct cachefiles_object *object,
665 			     loff_t pos, size_t len)
666 {
667 	struct cachefiles_read_ctx read_ctx = {pos, len};
668 
669 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
670 			sizeof(struct cachefiles_read),
671 			cachefiles_ondemand_init_read_req, &read_ctx);
672 }
673