xref: /openbmc/linux/fs/cachefiles/ondemand.c (revision 93696d8f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/fdtable.h>
3 #include <linux/anon_inodes.h>
4 #include <linux/uio.h>
5 #include "internal.h"
6 
7 struct ondemand_anon_file {
8 	struct file *file;
9 	int fd;
10 };
11 
12 static inline void cachefiles_req_put(struct cachefiles_req *req)
13 {
14 	if (refcount_dec_and_test(&req->ref))
15 		kfree(req);
16 }
17 
18 static int cachefiles_ondemand_fd_release(struct inode *inode,
19 					  struct file *file)
20 {
21 	struct cachefiles_object *object = file->private_data;
22 	struct cachefiles_cache *cache;
23 	struct cachefiles_ondemand_info *info;
24 	int object_id;
25 	struct cachefiles_req *req;
26 	XA_STATE(xas, NULL, 0);
27 
28 	if (!object)
29 		return 0;
30 
31 	info = object->ondemand;
32 	cache = object->volume->cache;
33 	xas.xa = &cache->reqs;
34 
35 	xa_lock(&cache->reqs);
36 	spin_lock(&info->lock);
37 	object_id = info->ondemand_id;
38 	info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
39 	cachefiles_ondemand_set_object_close(object);
40 	spin_unlock(&info->lock);
41 
42 	/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
43 	xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
44 		if (req->msg.object_id == object_id &&
45 		    req->msg.opcode == CACHEFILES_OP_CLOSE) {
46 			complete(&req->done);
47 			xas_store(&xas, NULL);
48 		}
49 	}
50 	xa_unlock(&cache->reqs);
51 
52 	xa_erase(&cache->ondemand_ids, object_id);
53 	trace_cachefiles_ondemand_fd_release(object, object_id);
54 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
55 	cachefiles_put_unbind_pincount(cache);
56 	return 0;
57 }
58 
59 static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
60 						 struct iov_iter *iter)
61 {
62 	struct cachefiles_object *object = kiocb->ki_filp->private_data;
63 	struct cachefiles_cache *cache = object->volume->cache;
64 	struct file *file = object->file;
65 	size_t len = iter->count;
66 	loff_t pos = kiocb->ki_pos;
67 	const struct cred *saved_cred;
68 	int ret;
69 
70 	if (!file)
71 		return -ENOBUFS;
72 
73 	cachefiles_begin_secure(cache, &saved_cred);
74 	ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
75 	cachefiles_end_secure(cache, saved_cred);
76 	if (ret < 0)
77 		return ret;
78 
79 	trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
80 	ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
81 	if (!ret)
82 		ret = len;
83 
84 	return ret;
85 }
86 
87 static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
88 					    int whence)
89 {
90 	struct cachefiles_object *object = filp->private_data;
91 	struct file *file = object->file;
92 
93 	if (!file)
94 		return -ENOBUFS;
95 
96 	return vfs_llseek(file, pos, whence);
97 }
98 
99 static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
100 					 unsigned long id)
101 {
102 	struct cachefiles_object *object = filp->private_data;
103 	struct cachefiles_cache *cache = object->volume->cache;
104 	struct cachefiles_req *req;
105 	XA_STATE(xas, &cache->reqs, id);
106 
107 	if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
108 		return -EINVAL;
109 
110 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
111 		return -EOPNOTSUPP;
112 
113 	xa_lock(&cache->reqs);
114 	req = xas_load(&xas);
115 	if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
116 	    req->object != object) {
117 		xa_unlock(&cache->reqs);
118 		return -EINVAL;
119 	}
120 	xas_store(&xas, NULL);
121 	xa_unlock(&cache->reqs);
122 
123 	trace_cachefiles_ondemand_cread(object, id);
124 	complete(&req->done);
125 	return 0;
126 }
127 
128 static const struct file_operations cachefiles_ondemand_fd_fops = {
129 	.owner		= THIS_MODULE,
130 	.release	= cachefiles_ondemand_fd_release,
131 	.write_iter	= cachefiles_ondemand_fd_write_iter,
132 	.llseek		= cachefiles_ondemand_fd_llseek,
133 	.unlocked_ioctl	= cachefiles_ondemand_fd_ioctl,
134 };
135 
136 /*
137  * OPEN request Completion (copen)
138  * - command: "copen <id>,<cache_size>"
139  *   <cache_size> indicates the object size if >=0, error code if negative
140  */
141 int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
142 {
143 	struct cachefiles_req *req;
144 	struct fscache_cookie *cookie;
145 	struct cachefiles_ondemand_info *info;
146 	char *pid, *psize;
147 	unsigned long id;
148 	long size;
149 	int ret;
150 	XA_STATE(xas, &cache->reqs, 0);
151 
152 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
153 		return -EOPNOTSUPP;
154 
155 	if (!*args) {
156 		pr_err("Empty id specified\n");
157 		return -EINVAL;
158 	}
159 
160 	pid = args;
161 	psize = strchr(args, ',');
162 	if (!psize) {
163 		pr_err("Cache size is not specified\n");
164 		return -EINVAL;
165 	}
166 
167 	*psize = 0;
168 	psize++;
169 
170 	ret = kstrtoul(pid, 0, &id);
171 	if (ret)
172 		return ret;
173 
174 	xa_lock(&cache->reqs);
175 	xas.xa_index = id;
176 	req = xas_load(&xas);
177 	if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
178 	    !req->object->ondemand->ondemand_id) {
179 		xa_unlock(&cache->reqs);
180 		return -EINVAL;
181 	}
182 	xas_store(&xas, NULL);
183 	xa_unlock(&cache->reqs);
184 
185 	info = req->object->ondemand;
186 	/* fail OPEN request if copen format is invalid */
187 	ret = kstrtol(psize, 0, &size);
188 	if (ret) {
189 		req->error = ret;
190 		goto out;
191 	}
192 
193 	/* fail OPEN request if daemon reports an error */
194 	if (size < 0) {
195 		if (!IS_ERR_VALUE(size)) {
196 			req->error = -EINVAL;
197 			ret = -EINVAL;
198 		} else {
199 			req->error = size;
200 			ret = 0;
201 		}
202 		goto out;
203 	}
204 
205 	spin_lock(&info->lock);
206 	/*
207 	 * The anonymous fd was closed before copen ? Fail the request.
208 	 *
209 	 *             t1             |             t2
210 	 * ---------------------------------------------------------
211 	 *                             cachefiles_ondemand_copen
212 	 *                             req = xa_erase(&cache->reqs, id)
213 	 * // Anon fd is maliciously closed.
214 	 * cachefiles_ondemand_fd_release
215 	 * xa_lock(&cache->reqs)
216 	 * cachefiles_ondemand_set_object_close(object)
217 	 * xa_unlock(&cache->reqs)
218 	 *                             cachefiles_ondemand_set_object_open
219 	 *                             // No one will ever close it again.
220 	 * cachefiles_ondemand_daemon_read
221 	 * cachefiles_ondemand_select_req
222 	 *
223 	 * Get a read req but its fd is already closed. The daemon can't
224 	 * issue a cread ioctl with an closed fd, then hung.
225 	 */
226 	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
227 		spin_unlock(&info->lock);
228 		req->error = -EBADFD;
229 		goto out;
230 	}
231 	cookie = req->object->cookie;
232 	cookie->object_size = size;
233 	if (size)
234 		clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
235 	else
236 		set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
237 	trace_cachefiles_ondemand_copen(req->object, id, size);
238 
239 	cachefiles_ondemand_set_object_open(req->object);
240 	spin_unlock(&info->lock);
241 	wake_up_all(&cache->daemon_pollwq);
242 
243 out:
244 	spin_lock(&info->lock);
245 	/* Need to set object close to avoid reopen status continuing */
246 	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
247 		cachefiles_ondemand_set_object_close(req->object);
248 	spin_unlock(&info->lock);
249 	complete(&req->done);
250 	return ret;
251 }
252 
253 int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
254 {
255 	struct cachefiles_req *req;
256 
257 	XA_STATE(xas, &cache->reqs, 0);
258 
259 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
260 		return -EOPNOTSUPP;
261 
262 	/*
263 	 * Reset the requests to CACHEFILES_REQ_NEW state, so that the
264 	 * requests have been processed halfway before the crash of the
265 	 * user daemon could be reprocessed after the recovery.
266 	 */
267 	xas_lock(&xas);
268 	xas_for_each(&xas, req, ULONG_MAX)
269 		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
270 	xas_unlock(&xas);
271 
272 	wake_up_all(&cache->daemon_pollwq);
273 	return 0;
274 }
275 
276 static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
277 				      struct ondemand_anon_file *anon_file)
278 {
279 	struct cachefiles_object *object;
280 	struct cachefiles_cache *cache;
281 	struct cachefiles_open *load;
282 	u32 object_id;
283 	int ret;
284 
285 	object = cachefiles_grab_object(req->object,
286 			cachefiles_obj_get_ondemand_fd);
287 	cache = object->volume->cache;
288 
289 	ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
290 			      XA_LIMIT(1, INT_MAX),
291 			      &cache->ondemand_id_next, GFP_KERNEL);
292 	if (ret < 0)
293 		goto err;
294 
295 	anon_file->fd = get_unused_fd_flags(O_WRONLY);
296 	if (anon_file->fd < 0) {
297 		ret = anon_file->fd;
298 		goto err_free_id;
299 	}
300 
301 	anon_file->file = anon_inode_getfile("[cachefiles]",
302 				&cachefiles_ondemand_fd_fops, object, O_WRONLY);
303 	if (IS_ERR(anon_file->file)) {
304 		ret = PTR_ERR(anon_file->file);
305 		goto err_put_fd;
306 	}
307 
308 	spin_lock(&object->ondemand->lock);
309 	if (object->ondemand->ondemand_id > 0) {
310 		spin_unlock(&object->ondemand->lock);
311 		/* Pair with check in cachefiles_ondemand_fd_release(). */
312 		anon_file->file->private_data = NULL;
313 		ret = -EEXIST;
314 		goto err_put_file;
315 	}
316 
317 	anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
318 
319 	load = (void *)req->msg.data;
320 	load->fd = anon_file->fd;
321 	object->ondemand->ondemand_id = object_id;
322 	spin_unlock(&object->ondemand->lock);
323 
324 	cachefiles_get_unbind_pincount(cache);
325 	trace_cachefiles_ondemand_open(object, &req->msg, load);
326 	return 0;
327 
328 err_put_file:
329 	fput(anon_file->file);
330 	anon_file->file = NULL;
331 err_put_fd:
332 	put_unused_fd(anon_file->fd);
333 	anon_file->fd = ret;
334 err_free_id:
335 	xa_erase(&cache->ondemand_ids, object_id);
336 err:
337 	spin_lock(&object->ondemand->lock);
338 	/* Avoid marking an opened object as closed. */
339 	if (object->ondemand->ondemand_id <= 0)
340 		cachefiles_ondemand_set_object_close(object);
341 	spin_unlock(&object->ondemand->lock);
342 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
343 	return ret;
344 }
345 
346 static void ondemand_object_worker(struct work_struct *work)
347 {
348 	struct cachefiles_ondemand_info *info =
349 		container_of(work, struct cachefiles_ondemand_info, ondemand_work);
350 
351 	cachefiles_ondemand_init_object(info->object);
352 }
353 
354 /*
355  * If there are any inflight or subsequent READ requests on the
356  * closed object, reopen it.
357  * Skip read requests whose related object is reopening.
358  */
359 static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
360 							      unsigned long xa_max)
361 {
362 	struct cachefiles_req *req;
363 	struct cachefiles_object *object;
364 	struct cachefiles_ondemand_info *info;
365 
366 	xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
367 		if (req->msg.opcode != CACHEFILES_OP_READ)
368 			return req;
369 		object = req->object;
370 		info = object->ondemand;
371 		if (cachefiles_ondemand_object_is_close(object)) {
372 			cachefiles_ondemand_set_object_reopening(object);
373 			queue_work(fscache_wq, &info->ondemand_work);
374 			continue;
375 		}
376 		if (cachefiles_ondemand_object_is_reopening(object))
377 			continue;
378 		return req;
379 	}
380 	return NULL;
381 }
382 
383 static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
384 						  struct xa_state *xas, int err)
385 {
386 	if (unlikely(!xas || !req))
387 		return false;
388 
389 	if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
390 		return false;
391 
392 	req->error = err;
393 	complete(&req->done);
394 	return true;
395 }
396 
397 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
398 					char __user *_buffer, size_t buflen)
399 {
400 	struct cachefiles_req *req;
401 	struct cachefiles_msg *msg;
402 	size_t n;
403 	int ret = 0;
404 	struct ondemand_anon_file anon_file;
405 	XA_STATE(xas, &cache->reqs, cache->req_id_next);
406 
407 	xa_lock(&cache->reqs);
408 	/*
409 	 * Cyclically search for a request that has not ever been processed,
410 	 * to prevent requests from being processed repeatedly, and make
411 	 * request distribution fair.
412 	 */
413 	req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
414 	if (!req && cache->req_id_next > 0) {
415 		xas_set(&xas, 0);
416 		req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
417 	}
418 	if (!req) {
419 		xa_unlock(&cache->reqs);
420 		return 0;
421 	}
422 
423 	msg = &req->msg;
424 	n = msg->len;
425 
426 	if (n > buflen) {
427 		xa_unlock(&cache->reqs);
428 		return -EMSGSIZE;
429 	}
430 
431 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
432 	cache->req_id_next = xas.xa_index + 1;
433 	refcount_inc(&req->ref);
434 	cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
435 	xa_unlock(&cache->reqs);
436 
437 	if (msg->opcode == CACHEFILES_OP_OPEN) {
438 		ret = cachefiles_ondemand_get_fd(req, &anon_file);
439 		if (ret)
440 			goto out;
441 	}
442 
443 	msg->msg_id = xas.xa_index;
444 	msg->object_id = req->object->ondemand->ondemand_id;
445 
446 	if (copy_to_user(_buffer, msg, n) != 0)
447 		ret = -EFAULT;
448 
449 	if (msg->opcode == CACHEFILES_OP_OPEN) {
450 		if (ret < 0) {
451 			fput(anon_file.file);
452 			put_unused_fd(anon_file.fd);
453 			goto out;
454 		}
455 		fd_install(anon_file.fd, anon_file.file);
456 	}
457 out:
458 	cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
459 	/* Remove error request and CLOSE request has no reply */
460 	if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
461 		cachefiles_ondemand_finish_req(req, &xas, ret);
462 	cachefiles_req_put(req);
463 	return ret ? ret : n;
464 }
465 
466 typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
467 
468 static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
469 					enum cachefiles_opcode opcode,
470 					size_t data_len,
471 					init_req_fn init_req,
472 					void *private)
473 {
474 	struct cachefiles_cache *cache = object->volume->cache;
475 	struct cachefiles_req *req = NULL;
476 	XA_STATE(xas, &cache->reqs, 0);
477 	int ret;
478 
479 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
480 		return 0;
481 
482 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
483 		ret = -EIO;
484 		goto out;
485 	}
486 
487 	req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
488 	if (!req) {
489 		ret = -ENOMEM;
490 		goto out;
491 	}
492 
493 	refcount_set(&req->ref, 1);
494 	req->object = object;
495 	init_completion(&req->done);
496 	req->msg.opcode = opcode;
497 	req->msg.len = sizeof(struct cachefiles_msg) + data_len;
498 
499 	ret = init_req(req, private);
500 	if (ret)
501 		goto out;
502 
503 	do {
504 		/*
505 		 * Stop enqueuing the request when daemon is dying. The
506 		 * following two operations need to be atomic as a whole.
507 		 *   1) check cache state, and
508 		 *   2) enqueue request if cache is alive.
509 		 * Otherwise the request may be enqueued after xarray has been
510 		 * flushed, leaving the orphan request never being completed.
511 		 *
512 		 * CPU 1			CPU 2
513 		 * =====			=====
514 		 *				test CACHEFILES_DEAD bit
515 		 * set CACHEFILES_DEAD bit
516 		 * flush requests in the xarray
517 		 *				enqueue the request
518 		 */
519 		xas_lock(&xas);
520 
521 		if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
522 		    cachefiles_ondemand_object_is_dropping(object)) {
523 			xas_unlock(&xas);
524 			ret = -EIO;
525 			goto out;
526 		}
527 
528 		/* coupled with the barrier in cachefiles_flush_reqs() */
529 		smp_mb();
530 
531 		if (opcode == CACHEFILES_OP_CLOSE &&
532 		    !cachefiles_ondemand_object_is_open(object)) {
533 			WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
534 			xas_unlock(&xas);
535 			ret = -EIO;
536 			goto out;
537 		}
538 
539 		/*
540 		 * Cyclically find a free xas to avoid msg_id reuse that would
541 		 * cause the daemon to successfully copen a stale msg_id.
542 		 */
543 		xas.xa_index = cache->msg_id_next;
544 		xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
545 		if (xas.xa_node == XAS_RESTART) {
546 			xas.xa_index = 0;
547 			xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
548 		}
549 		if (xas.xa_node == XAS_RESTART)
550 			xas_set_err(&xas, -EBUSY);
551 
552 		xas_store(&xas, req);
553 		if (xas_valid(&xas)) {
554 			cache->msg_id_next = xas.xa_index + 1;
555 			xas_clear_mark(&xas, XA_FREE_MARK);
556 			xas_set_mark(&xas, CACHEFILES_REQ_NEW);
557 		}
558 		xas_unlock(&xas);
559 	} while (xas_nomem(&xas, GFP_KERNEL));
560 
561 	ret = xas_error(&xas);
562 	if (ret)
563 		goto out;
564 
565 	wake_up_all(&cache->daemon_pollwq);
566 wait:
567 	ret = wait_for_completion_killable(&req->done);
568 	if (!ret) {
569 		ret = req->error;
570 	} else {
571 		ret = -EINTR;
572 		if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
573 			/* Someone will complete it soon. */
574 			cpu_relax();
575 			goto wait;
576 		}
577 	}
578 	cachefiles_req_put(req);
579 	return ret;
580 out:
581 	/* Reset the object to close state in error handling path.
582 	 * If error occurs after creating the anonymous fd,
583 	 * cachefiles_ondemand_fd_release() will set object to close.
584 	 */
585 	if (opcode == CACHEFILES_OP_OPEN &&
586 	    !cachefiles_ondemand_object_is_dropping(object))
587 		cachefiles_ondemand_set_object_close(object);
588 	kfree(req);
589 	return ret;
590 }
591 
592 static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
593 					     void *private)
594 {
595 	struct cachefiles_object *object = req->object;
596 	struct fscache_cookie *cookie = object->cookie;
597 	struct fscache_volume *volume = object->volume->vcookie;
598 	struct cachefiles_open *load = (void *)req->msg.data;
599 	size_t volume_key_size, cookie_key_size;
600 	void *volume_key, *cookie_key;
601 
602 	/*
603 	 * Volume key is a NUL-terminated string. key[0] stores strlen() of the
604 	 * string, followed by the content of the string (excluding '\0').
605 	 */
606 	volume_key_size = volume->key[0] + 1;
607 	volume_key = volume->key + 1;
608 
609 	/* Cookie key is binary data, which is netfs specific. */
610 	cookie_key_size = cookie->key_len;
611 	cookie_key = fscache_get_key(cookie);
612 
613 	if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
614 		pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
615 		return -EINVAL;
616 	}
617 
618 	load->volume_key_size = volume_key_size;
619 	load->cookie_key_size = cookie_key_size;
620 	memcpy(load->data, volume_key, volume_key_size);
621 	memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
622 
623 	return 0;
624 }
625 
626 static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
627 					      void *private)
628 {
629 	struct cachefiles_object *object = req->object;
630 
631 	if (!cachefiles_ondemand_object_is_open(object))
632 		return -ENOENT;
633 
634 	trace_cachefiles_ondemand_close(object, &req->msg);
635 	return 0;
636 }
637 
638 struct cachefiles_read_ctx {
639 	loff_t off;
640 	size_t len;
641 };
642 
643 static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
644 					     void *private)
645 {
646 	struct cachefiles_object *object = req->object;
647 	struct cachefiles_read *load = (void *)req->msg.data;
648 	struct cachefiles_read_ctx *read_ctx = private;
649 
650 	load->off = read_ctx->off;
651 	load->len = read_ctx->len;
652 	trace_cachefiles_ondemand_read(object, &req->msg, load);
653 	return 0;
654 }
655 
656 int cachefiles_ondemand_init_object(struct cachefiles_object *object)
657 {
658 	struct fscache_cookie *cookie = object->cookie;
659 	struct fscache_volume *volume = object->volume->vcookie;
660 	size_t volume_key_size, cookie_key_size, data_len;
661 
662 	if (!object->ondemand)
663 		return 0;
664 
665 	/*
666 	 * CacheFiles will firstly check the cache file under the root cache
667 	 * directory. If the coherency check failed, it will fallback to
668 	 * creating a new tmpfile as the cache file. Reuse the previously
669 	 * allocated object ID if any.
670 	 */
671 	if (cachefiles_ondemand_object_is_open(object))
672 		return 0;
673 
674 	volume_key_size = volume->key[0] + 1;
675 	cookie_key_size = cookie->key_len;
676 	data_len = sizeof(struct cachefiles_open) +
677 		   volume_key_size + cookie_key_size;
678 
679 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
680 			data_len, cachefiles_ondemand_init_open_req, NULL);
681 }
682 
683 void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
684 {
685 	unsigned long index;
686 	struct cachefiles_req *req;
687 	struct cachefiles_cache *cache;
688 
689 	if (!object->ondemand)
690 		return;
691 
692 	cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
693 			cachefiles_ondemand_init_close_req, NULL);
694 
695 	if (!object->ondemand->ondemand_id)
696 		return;
697 
698 	/* Cancel all requests for the object that is being dropped. */
699 	cache = object->volume->cache;
700 	xa_lock(&cache->reqs);
701 	cachefiles_ondemand_set_object_dropping(object);
702 	xa_for_each(&cache->reqs, index, req) {
703 		if (req->object == object) {
704 			req->error = -EIO;
705 			complete(&req->done);
706 			__xa_erase(&cache->reqs, index);
707 		}
708 	}
709 	xa_unlock(&cache->reqs);
710 
711 	/* Wait for ondemand_object_worker() to finish to avoid UAF. */
712 	cancel_work_sync(&object->ondemand->ondemand_work);
713 }
714 
715 int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
716 				struct cachefiles_volume *volume)
717 {
718 	if (!cachefiles_in_ondemand_mode(volume->cache))
719 		return 0;
720 
721 	object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
722 					GFP_KERNEL);
723 	if (!object->ondemand)
724 		return -ENOMEM;
725 
726 	object->ondemand->object = object;
727 	spin_lock_init(&object->ondemand->lock);
728 	INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
729 	return 0;
730 }
731 
732 void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
733 {
734 	kfree(object->ondemand);
735 	object->ondemand = NULL;
736 }
737 
738 int cachefiles_ondemand_read(struct cachefiles_object *object,
739 			     loff_t pos, size_t len)
740 {
741 	struct cachefiles_read_ctx read_ctx = {pos, len};
742 
743 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
744 			sizeof(struct cachefiles_read),
745 			cachefiles_ondemand_init_read_req, &read_ctx);
746 }
747